summaryrefslogtreecommitdiff
path: root/arch/arm/cpu/armv8/cache_v8.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/cpu/armv8/cache_v8.c')
-rw-r--r--arch/arm/cpu/armv8/cache_v8.c112
1 files changed, 62 insertions, 50 deletions
diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c
index 1615542a99..ac909a15ff 100644
--- a/arch/arm/cpu/armv8/cache_v8.c
+++ b/arch/arm/cpu/armv8/cache_v8.c
@@ -35,7 +35,7 @@ DECLARE_GLOBAL_DATA_PTR;
* off: FFF
*/
-static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
+u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
{
u64 max_addr = 0;
u64 ips, va_bits;
@@ -44,7 +44,7 @@ static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
/* Find the largest address we need to support */
for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
- max_addr = max(max_addr, mem_map[i].base + mem_map[i].size);
+ max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
/* Calculate the maximum physical (and thus virtual) address */
if (max_addr > (1ULL << 44)) {
@@ -167,49 +167,6 @@ static void set_pte_table(u64 *pte, u64 *table)
*pte = PTE_TYPE_TABLE | (ulong)table;
}
-/* Add one mm_region map entry to the page tables */
-static void add_map(struct mm_region *map)
-{
- u64 *pte;
- u64 addr = map->base;
- u64 size = map->size;
- u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
- u64 blocksize;
- int level;
- u64 *new_table;
-
- while (size) {
- pte = find_pte(addr, 0);
- if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
- debug("Creating table for addr 0x%llx\n", addr);
- new_table = create_table();
- set_pte_table(pte, new_table);
- }
-
- for (level = 1; level < 4; level++) {
- pte = find_pte(addr, level);
- blocksize = 1ULL << level2shift(level);
- debug("Checking if pte fits for addr=%llx size=%llx "
- "blocksize=%llx\n", addr, size, blocksize);
- if (size >= blocksize && !(addr & (blocksize - 1))) {
- /* Page fits, create block PTE */
- debug("Setting PTE %p to block addr=%llx\n",
- pte, addr);
- *pte = addr | attrs;
- addr += blocksize;
- size -= blocksize;
- break;
- } else if ((pte_type(pte) == PTE_TYPE_FAULT)) {
- /* Page doesn't fit, create subpages */
- debug("Creating subtable for addr 0x%llx "
- "blksize=%llx\n", addr, blocksize);
- new_table = create_table();
- set_pte_table(pte, new_table);
- }
- }
- }
-}
-
/* Splits a block PTE into table with subpages spanning the old block */
static void split_block(u64 *pte, int level)
{
@@ -241,6 +198,58 @@ static void split_block(u64 *pte, int level)
set_pte_table(pte, new_table);
}
+/* Add one mm_region map entry to the page tables */
+static void add_map(struct mm_region *map)
+{
+ u64 *pte;
+ u64 virt = map->virt;
+ u64 phys = map->phys;
+ u64 size = map->size;
+ u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
+ u64 blocksize;
+ int level;
+ u64 *new_table;
+
+ while (size) {
+ pte = find_pte(virt, 0);
+ if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
+ debug("Creating table for virt 0x%llx\n", virt);
+ new_table = create_table();
+ set_pte_table(pte, new_table);
+ }
+
+ for (level = 1; level < 4; level++) {
+ pte = find_pte(virt, level);
+ if (!pte)
+ panic("pte not found\n");
+
+ blocksize = 1ULL << level2shift(level);
+ debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
+ virt, size, blocksize);
+ if (size >= blocksize && !(virt & (blocksize - 1))) {
+ /* Page fits, create block PTE */
+ debug("Setting PTE %p to block virt=%llx\n",
+ pte, virt);
+ *pte = phys | attrs;
+ virt += blocksize;
+ phys += blocksize;
+ size -= blocksize;
+ break;
+ } else if (pte_type(pte) == PTE_TYPE_FAULT) {
+ /* Page doesn't fit, create subpages */
+ debug("Creating subtable for virt 0x%llx blksize=%llx\n",
+ virt, blocksize);
+ new_table = create_table();
+ set_pte_table(pte, new_table);
+ } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
+ debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
+ virt, blocksize);
+ split_block(pte, level);
+ }
+ }
+ }
+}
+
enum pte_type {
PTE_INVAL,
PTE_BLOCK,
@@ -265,7 +274,7 @@ static int count_required_pts(u64 addr, int level, u64 maxaddr)
for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
struct mm_region *map = &mem_map[i];
- u64 start = map->base;
+ u64 start = map->virt;
u64 end = start + map->size;
/* Check if the PTE would overlap with the map */
@@ -349,10 +358,13 @@ __weak u64 get_page_table_size(void)
return size;
}
-static void setup_pgtables(void)
+void setup_pgtables(void)
{
int i;
+ if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
+ panic("Page table pointer not setup.");
+
/*
* Allocate the first level we're on with invalidate entries.
* If the starting level is 0 (va_bits >= 39), then this is our
@@ -363,9 +375,6 @@ static void setup_pgtables(void)
/* Now add all MMU table entries one after another to the table */
for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
add_map(&mem_map[i]);
-
- /* Create the same thing once more for our emergency page table */
- create_table();
}
static void setup_all_pgtables(void)
@@ -527,6 +536,9 @@ void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
+ if (!gd->arch.tlb_emerg)
+ panic("Emergency page table not setup.");
+
/*
* We can not modify page tables that we're currently running on,
* so we first need to switch to the "emergency" page tables where