diff options
author | Stephen Warren <swarren@nvidia.com> | 2015-10-05 12:09:01 -0600 |
---|---|---|
committer | Albert ARIBAUD <albert.u.boot@aribaud.net> | 2015-11-10 18:04:19 +0100 |
commit | 376cb1a4531538a031f3875f0aab21f31f8f8c90 (patch) | |
tree | 83572915e7c471bbcb40c5e9400e79ab4e949dd7 | |
parent | 3c6af3bad4f644e050f67146f4a6e177d5e39e76 (diff) |
ARM: tegra: add custom MMU setup on ARMv8
This sets up a fine-grained page table, which is a requirement for
noncached_init() to operate correctly.
MMU setup code currently exists in a number of places:
- A version in the core ARMv8 support code that sets up page tables that
use very large block sizes that CONFIG_SYS_NONCACHED_MEMORY doesn't
support.
- Enhanced versions for fsl-lsch3 and zynmq that set up finer grained
page tables.
Ideally, rather than duplicating the MMU setup code yet again this patch
would instead consolidate all the different routines into the core ARMv8
code so that it supported all use-cases. However, this will require
significant effort since there appear to be a number of discrepancies[1]
between different versions of the code, and between the defines/values by
some copies of the MMU setup code use and the architectural MMU
documentation. Some reverse engineering will be required to determine the
intent of the current code.
[1] For example, in the core ARMv8 MMU setup code, three defines named
TCR_EL[123]_IPS_BITS exist, but only one of them sets the IPS field and
the others set a different field (T1SZ) in the page tables. As far as I
can tell so far, there should be no need to set different values per
exception level nor to modify the T1SZ field at all, since TTBR1 shouldn't
be enabled anyway. Another example is inconsistent values for *_VA_BITS
between the current core ARMv8 MMU setup code and the various SoC-
specific MMU setup code. Another example is that asm/armv8/mmu.h's value
for SECTION_SHIFT doesn't match asm/system.h's MMU_SECTION_SHIFT;
research is needed to determine which code relies on which of those
values and why, and whether fixing the incorrect value will cause any
regression.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
-rw-r--r-- | arch/arm/mach-tegra/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/mach-tegra/arm64-mmu.c | 131 |
2 files changed, 132 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile index 75924ad848..98431a91f8 100644 --- a/arch/arm/mach-tegra/Makefile +++ b/arch/arm/mach-tegra/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_CMD_ENTERRCM) += cmd_enterrcm.o obj-$(CONFIG_PWM_TEGRA) += pwm.o endif +obj-$(CONFIG_ARM64) += arm64-mmu.o obj-y += ap.o obj-y += board.o board2.o obj-y += cache.o diff --git a/arch/arm/mach-tegra/arm64-mmu.c b/arch/arm/mach-tegra/arm64-mmu.c new file mode 100644 index 0000000000..c2276523cb --- /dev/null +++ b/arch/arm/mach-tegra/arm64-mmu.c @@ -0,0 +1,131 @@ +/* + * (C) Copyright 2014 - 2015 Xilinx, Inc. + * Michal Simek <michal.simek@xilinx.com> + * (This file derived from arch/arm/cpu/armv8/zynqmp/cpu.c) + * + * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <common.h> +#include <asm/system.h> +#include <asm/armv8/mmu.h> + +DECLARE_GLOBAL_DATA_PTR; + +#define SECTION_SHIFT_L1 30UL +#define SECTION_SHIFT_L2 21UL +#define BLOCK_SIZE_L0 0x8000000000UL +#define BLOCK_SIZE_L1 (1 << SECTION_SHIFT_L1) +#define BLOCK_SIZE_L2 (1 << SECTION_SHIFT_L2) + +#define TCR_TG1_4K (1 << 31) +#define TCR_EPD1_DISABLE (1 << 23) +#define TEGRA_VA_BITS 40 +#define TEGRA_TCR TCR_TG1_4K | \ + TCR_EPD1_DISABLE | \ + TCR_SHARED_OUTER | \ + TCR_SHARED_INNER | \ + TCR_IRGN_WBWA | \ + TCR_ORGN_WBWA | \ + TCR_T0SZ(TEGRA_VA_BITS) + +#define MEMORY_ATTR PMD_SECT_AF | PMD_SECT_INNER_SHARE | \ + PMD_ATTRINDX(MT_NORMAL) | \ + PMD_TYPE_SECT +#define DEVICE_ATTR PMD_SECT_AF | PMD_SECT_PXN | \ + PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_NGNRNE) | \ + PMD_TYPE_SECT + +/* 4K size is required to place 512 entries in each level */ +#define TLB_TABLE_SIZE 0x1000 + +/* + * This mmu table looks as below + * Level 0 table contains two entries to 512GB sizes. One is Level1 Table 0 + * and other Level1 Table1. + * Level1 Table0 contains entries for each 1GB from 0 to 511GB. + * Level1 Table1 contains entries for each 1GB from 512GB to 1TB. + * Level2 Table0, Level2 Table1, Level2 Table2 and Level2 Table3 contains + * entries for each 2MB starting from 0GB, 1GB, 2GB and 3GB respectively. + */ +void mmu_setup(void) +{ + int el; + u64 i, section_l1t0, section_l1t1; + u64 section_l2t0, section_l2t1, section_l2t2, section_l2t3; + u64 *level0_table = (u64 *)gd->arch.tlb_addr; + u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + TLB_TABLE_SIZE); + u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + (2 * TLB_TABLE_SIZE)); + u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + (3 * TLB_TABLE_SIZE)); + u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + (4 * TLB_TABLE_SIZE)); + u64 *level2_table_2 = (u64 *)(gd->arch.tlb_addr + (5 * TLB_TABLE_SIZE)); + u64 *level2_table_3 = (u64 *)(gd->arch.tlb_addr + (6 * TLB_TABLE_SIZE)); + + /* Invalidate all table entries */ + memset(level0_table, 0, PGTABLE_SIZE); + + level0_table[0] = + (u64)level1_table_0 | PMD_TYPE_TABLE; + level0_table[1] = + (u64)level1_table_1 | PMD_TYPE_TABLE; + + /* + * set level 1 table 0, covering 0 to 512GB + * set level 1 table 1, covering 512GB to 1TB + */ + section_l1t0 = 0; + section_l1t1 = BLOCK_SIZE_L0; + + for (i = 0; i < 512; i++) { + level1_table_0[i] = section_l1t0; + if (i >= 4) + level1_table_0[i] |= MEMORY_ATTR; + level1_table_1[i] = section_l1t1; + level1_table_1[i] |= MEMORY_ATTR; + section_l1t0 += BLOCK_SIZE_L1; + section_l1t1 += BLOCK_SIZE_L1; + } + + level1_table_0[0] = + (u64)level2_table_0 | PMD_TYPE_TABLE; + level1_table_0[1] = + (u64)level2_table_1 | PMD_TYPE_TABLE; + level1_table_0[2] = + (u64)level2_table_2 | PMD_TYPE_TABLE; + level1_table_0[3] = + (u64)level2_table_3 | PMD_TYPE_TABLE; + + section_l2t0 = 0; + section_l2t1 = section_l2t0 + BLOCK_SIZE_L1; /* 1GB */ + section_l2t2 = section_l2t1 + BLOCK_SIZE_L1; /* 2GB */ + section_l2t3 = section_l2t2 + BLOCK_SIZE_L1; /* 3GB */ + + for (i = 0; i < 512; i++) { + level2_table_0[i] = section_l2t0 | DEVICE_ATTR; + level2_table_1[i] = section_l2t1 | DEVICE_ATTR; + level2_table_2[i] = section_l2t2 | MEMORY_ATTR; + level2_table_3[i] = section_l2t3 | MEMORY_ATTR; + section_l2t0 += BLOCK_SIZE_L2; + section_l2t1 += BLOCK_SIZE_L2; + section_l2t2 += BLOCK_SIZE_L2; + section_l2t3 += BLOCK_SIZE_L2; + } + + /* flush new MMU table */ + flush_dcache_range(gd->arch.tlb_addr, + gd->arch.tlb_addr + gd->arch.tlb_size); + + /* point TTBR to the new table */ + el = current_el(); + set_ttbr_tcr_mair(el, gd->arch.tlb_addr, + TEGRA_TCR, MEMORY_ATTRIBUTES); + + set_sctlr(get_sctlr() | CR_M); +} + +u64 *arch_get_page_table(void) +{ + return (u64 *)(gd->arch.tlb_addr + (3 * TLB_TABLE_SIZE)); +} |