summaryrefslogtreecommitdiff
path: root/arch/arm/cpu/armv8
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/cpu/armv8')
-rw-r--r--arch/arm/cpu/armv8/Makefile1
-rw-r--r--arch/arm/cpu/armv8/bcmns3/Makefile5
-rw-r--r--arch/arm/cpu/armv8/bcmns3/lowlevel.S98
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/Kconfig9
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/Makefile2
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/fdt.c9
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S165
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/mp.c80
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/soc.c43
-rw-r--r--arch/arm/cpu/armv8/fsl-layerscape/spintable.S118
10 files changed, 300 insertions, 230 deletions
diff --git a/arch/arm/cpu/armv8/Makefile b/arch/arm/cpu/armv8/Makefile
index 2e48df0eb9..7e33a183d5 100644
--- a/arch/arm/cpu/armv8/Makefile
+++ b/arch/arm/cpu/armv8/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_S32V234) += s32v234/
obj-$(CONFIG_TARGET_HIKEY) += hisilicon/
obj-$(CONFIG_ARMV8_PSCI) += psci.o
obj-$(CONFIG_ARCH_SUNXI) += lowlevel_init.o
+obj-$(CONFIG_TARGET_BCMNS3) += bcmns3/
diff --git a/arch/arm/cpu/armv8/bcmns3/Makefile b/arch/arm/cpu/armv8/bcmns3/Makefile
new file mode 100644
index 0000000000..a35e29d11a
--- /dev/null
+++ b/arch/arm/cpu/armv8/bcmns3/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Copyright 2020 Broadcom.
+
+obj-y += lowlevel.o
diff --git a/arch/arm/cpu/armv8/bcmns3/lowlevel.S b/arch/arm/cpu/armv8/bcmns3/lowlevel.S
new file mode 100644
index 0000000000..bf1a17ab03
--- /dev/null
+++ b/arch/arm/cpu/armv8/bcmns3/lowlevel.S
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2020 Broadcom.
+ *
+ */
+
+#include <asm/macro.h>
+#include <linux/linkage.h>
+
+hnf_pstate_poll:
+ /* x0 has the desired status, return 0 for success, 1 for timeout
+ * clobber x1, x2, x3, x4, x6, x7
+ */
+ mov x1, x0
+ mov x7, #0 /* flag for timeout */
+ mrs x3, cntpct_el0 /* read timer */
+ mov w0, #600
+ mov w6, #1000
+ mul w0, w0, w6
+ add x3, x3, x0 /* timeout after 100 microseconds */
+ mov x0, #0x18
+ movk x0, #0x6120, lsl #16 /* HNF0_PSTATE_STATUS */
+ mov w6, #4 /* HN-F node count */
+1:
+ ldr x2, [x0]
+ cmp x2, x1 /* check status */
+ b.eq 2f
+ mrs x4, cntpct_el0
+ cmp x4, x3
+ b.ls 1b
+ mov x7, #1 /* timeout */
+ b 3f
+2:
+ add x0, x0, #0x10000 /* move to next node */
+ subs w6, w6, #1
+ cbnz w6, 1b
+3:
+ mov x0, x7
+ ret
+
+hnf_set_pstate:
+ /* x0 has the desired state, clobber x1, x2, x6 */
+ mov x1, x0
+ /* power state to SFONLY */
+ mov w6, #4 /* HN-F node count */
+ mov x0, #0x10
+ movk x0, #0x6120, lsl #16 /* HNF0_PSTATE_REQ */
+1: /* set pstate to sfonly */
+ ldr x2, [x0]
+ and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
+ orr x2, x2, x1
+ str x2, [x0]
+ add x0, x0, #0x10000 /* move to next node */
+ subs w6, w6, #1
+ cbnz w6, 1b
+
+ ret
+
+ENTRY(__asm_flush_l3_dcache)
+ /*
+ * Return status in x0
+ * success 0
+ * timeout 1 for setting SFONLY, 2 for FAM, 3 for both
+ */
+ mov x29, lr
+ mov x8, #0
+
+ dsb sy
+ mov x0, #0x1 /* HNFPSTAT_SFONLY */
+ bl hnf_set_pstate
+
+ mov x0, #0x4 /* SFONLY status */
+ bl hnf_pstate_poll
+ cbz x0, 1f
+ mov x8, #1 /* timeout */
+1:
+ dsb sy
+ mov x0, #0x3 /* HNFPSTAT_FAM */
+ bl hnf_set_pstate
+
+ mov x0, #0xc /* FAM status */
+ bl hnf_pstate_poll
+ cbz x0, 1f
+ add x8, x8, #0x2
+1:
+ mov x0, x8
+ mov lr, x29
+ ret
+ENDPROC(__asm_flush_l3_dcache)
+
+ENTRY(save_boot_params)
+/*
+ * void set_boot_params(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3)
+ */
+ adr x4, bl33_info
+ str x0, [x4]
+ b save_boot_params_ret
+ENDPROC(save_boot_params)
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/Kconfig b/arch/arm/cpu/armv8/fsl-layerscape/Kconfig
index 2f75b2cdd3..be51b7d856 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/Kconfig
+++ b/arch/arm/cpu/armv8/fsl-layerscape/Kconfig
@@ -23,6 +23,7 @@ config ARCH_LS1012A
config ARCH_LS1028A
bool
select ARMV8_SET_SMPEN
+ select FSL_LAYERSCAPE
select FSL_LSCH3
select NXP_LSCH3_2
select SYS_FSL_HAS_CCI400
@@ -67,7 +68,6 @@ config ARCH_LS1043A
select SYS_FSL_ERRATUM_A009660 if !TFABOOT
select SYS_FSL_ERRATUM_A009663 if !TFABOOT
select SYS_FSL_ERRATUM_A009798
- select SYS_FSL_ERRATUM_A009929
select SYS_FSL_ERRATUM_A009942 if !TFABOOT
select SYS_FSL_ERRATUM_A010315
select SYS_FSL_ERRATUM_A010539
@@ -75,7 +75,7 @@ config ARCH_LS1043A
select SYS_FSL_HAS_DDR4
select ARCH_EARLY_INIT_R
select BOARD_EARLY_INIT_F
- select SYS_I2C_MXC if !DM_I2C
+ select SYS_I2C_MXC
select SYS_I2C_MXC_I2C1 if !DM_I2C
select SYS_I2C_MXC_I2C2 if !DM_I2C
select SYS_I2C_MXC_I2C3 if !DM_I2C
@@ -108,7 +108,7 @@ config ARCH_LS1046A
select SYS_FSL_SRDS_2
select ARCH_EARLY_INIT_R
select BOARD_EARLY_INIT_F
- select SYS_I2C_MXC if !DM_I2C
+ select SYS_I2C_MXC
select SYS_I2C_MXC_I2C1 if !DM_I2C
select SYS_I2C_MXC_I2C2 if !DM_I2C
select SYS_I2C_MXC_I2C3 if !DM_I2C
@@ -591,9 +591,6 @@ config SYS_FSL_ERRATUM_A009635
config SYS_FSL_ERRATUM_A009660
bool
-config SYS_FSL_ERRATUM_A009929
- bool
-
config SYS_FSL_ERRATUM_A050382
bool
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/Makefile b/arch/arm/cpu/armv8/fsl-layerscape/Makefile
index e398aecd12..9ecb372b4e 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/Makefile
+++ b/arch/arm/cpu/armv8/fsl-layerscape/Makefile
@@ -6,7 +6,7 @@ obj-y += cpu.o
obj-y += lowlevel.o
obj-y += soc.o
ifndef CONFIG_SPL_BUILD
-obj-$(CONFIG_MP) += mp.o
+obj-$(CONFIG_MP) += mp.o spintable.o
obj-$(CONFIG_OF_LIBFDT) += fdt.o
endif
obj-$(CONFIG_SPL) += spl.o
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/fdt.c b/arch/arm/cpu/armv8/fsl-layerscape/fdt.c
index 67764ee83d..7400b2cf29 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/fdt.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/fdt.c
@@ -54,7 +54,6 @@ void ft_fixup_cpu(void *blob)
fdt32_t *reg;
int addr_cells;
u64 val, core_id;
- size_t *boot_code_size = &(__secondary_boot_code_size);
u32 mask = cpu_pos_mask();
int off_prev = -1;
@@ -145,11 +144,11 @@ remove_psci_node:
"cpu", 4);
}
- fdt_add_mem_rsv(blob, (uintptr_t)&secondary_boot_code,
- *boot_code_size);
+ fdt_add_mem_rsv(blob, (uintptr_t)secondary_boot_code_start,
+ secondary_boot_code_size);
#if CONFIG_IS_ENABLED(EFI_LOADER)
- efi_add_memory_map((uintptr_t)&secondary_boot_code, *boot_code_size,
- EFI_RESERVED_MEMORY_TYPE);
+ efi_add_memory_map((uintptr_t)secondary_boot_code_start,
+ secondary_boot_code_size, EFI_RESERVED_MEMORY_TYPE);
#endif
}
#endif
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
index 711ab87556..a519f6ed67 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
+++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
@@ -11,14 +11,16 @@
#include <asm/gic.h>
#include <asm/macro.h>
#include <asm/arch-fsl-layerscape/soc.h>
-#ifdef CONFIG_MP
-#include <asm/arch/mp.h>
-#endif
#ifdef CONFIG_FSL_LSCH3
#include <asm/arch-fsl-layerscape/immap_lsch3.h>
#endif
#include <asm/u-boot.h>
+ .align 3
+ .weak secondary_boot_addr
+secondary_boot_addr:
+ .quad 0
+
/* Get GIC offset
* For LS1043a rev1.0, GIC base address align with 4k.
* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
@@ -208,8 +210,13 @@ ENTRY(lowlevel_init)
branch_if_master x0, x1, 2f
#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
- ldr x0, =secondary_boot_func
- blr x0
+ /*
+ * Formerly, here was a jump to secondary_boot_func, but we just
+ * return early here and let the generic code in start.S handle
+ * the jump to secondary_boot_func.
+ */
+ mov lr, x29 /* Restore LR */
+ ret
#endif
2:
@@ -419,151 +426,3 @@ ENTRY(__asm_flush_l3_dcache)
ret
ENDPROC(__asm_flush_l3_dcache)
#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
-
-#ifdef CONFIG_MP
- /* Keep literals not used by the secondary boot code outside it */
- .ltorg
-
- /* Using 64 bit alignment since the spin table is accessed as data */
- .align 4
- .global secondary_boot_code
- /* Secondary Boot Code starts here */
-secondary_boot_code:
- .global __spin_table
-__spin_table:
- .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
-
- .align 2
-ENTRY(secondary_boot_func)
- /*
- * MPIDR_EL1 Fields:
- * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
- * MPIDR[7:2] = AFF0_RES
- * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
- * MPIDR[23:16] = AFF2_CLUSTERID
- * MPIDR[24] = MT
- * MPIDR[29:25] = RES0
- * MPIDR[30] = U
- * MPIDR[31] = ME
- * MPIDR[39:32] = AFF3
- *
- * Linear Processor ID (LPID) calculation from MPIDR_EL1:
- * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
- * until AFF2_CLUSTERID and AFF3 have non-zero values)
- *
- * LPID = MPIDR[15:8] | MPIDR[1:0]
- */
- mrs x0, mpidr_el1
- ubfm x1, x0, #8, #15
- ubfm x2, x0, #0, #1
- orr x10, x2, x1, lsl #2 /* x10 has LPID */
- ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
- /*
- * offset of the spin table element for this core from start of spin
- * table (each elem is padded to 64 bytes)
- */
- lsl x1, x10, #6
- ldr x0, =__spin_table
- /* physical address of this cpus spin table element */
- add x11, x1, x0
-
- ldr x0, =__real_cntfrq
- ldr x0, [x0]
- msr cntfrq_el0, x0 /* set with real frequency */
- str x9, [x11, #16] /* LPID */
- mov x4, #1
- str x4, [x11, #8] /* STATUS */
- dsb sy
-#if defined(CONFIG_GICV3)
- gic_wait_for_interrupt_m x0
-#elif defined(CONFIG_GICV2)
- bl get_gic_offset
- mov x0, x1
- gic_wait_for_interrupt_m x0, w1
-#endif
-
-slave_cpu:
- wfe
- ldr x0, [x11]
- cbz x0, slave_cpu
-#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
- mrs x1, sctlr_el2
-#else
- mrs x1, sctlr_el1
-#endif
- tbz x1, #25, cpu_is_le
- rev x0, x0 /* BE to LE conversion */
-cpu_is_le:
- ldr x5, [x11, #24]
- cbz x5, 1f
-
-#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
- adr x4, secondary_switch_to_el1
- ldr x5, =ES_TO_AARCH64
-#else
- ldr x4, [x11]
- ldr x5, =ES_TO_AARCH32
-#endif
- bl secondary_switch_to_el2
-
-1:
-#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
- adr x4, secondary_switch_to_el1
-#else
- ldr x4, [x11]
-#endif
- ldr x5, =ES_TO_AARCH64
- bl secondary_switch_to_el2
-
-ENDPROC(secondary_boot_func)
-
-ENTRY(secondary_switch_to_el2)
- switch_el x6, 1f, 0f, 0f
-0: ret
-1: armv8_switch_to_el2_m x4, x5, x6
-ENDPROC(secondary_switch_to_el2)
-
-ENTRY(secondary_switch_to_el1)
- mrs x0, mpidr_el1
- ubfm x1, x0, #8, #15
- ubfm x2, x0, #0, #1
- orr x10, x2, x1, lsl #2 /* x10 has LPID */
-
- lsl x1, x10, #6
- ldr x0, =__spin_table
- /* physical address of this cpus spin table element */
- add x11, x1, x0
-
- ldr x4, [x11]
-
- ldr x5, [x11, #24]
- cbz x5, 2f
-
- ldr x5, =ES_TO_AARCH32
- bl switch_to_el1
-
-2: ldr x5, =ES_TO_AARCH64
-
-switch_to_el1:
- switch_el x6, 0f, 1f, 0f
-0: ret
-1: armv8_switch_to_el1_m x4, x5, x6
-ENDPROC(secondary_switch_to_el1)
-
- /* Ensure that the literals used by the secondary boot code are
- * assembled within it (this is required so that we can protect
- * this area with a single memreserve region
- */
- .ltorg
-
- /* 64 bit alignment for elements accessed as data */
- .align 4
- .global __real_cntfrq
-__real_cntfrq:
- .quad COUNTER_FREQUENCY
- .globl __secondary_boot_code_size
- .type __secondary_boot_code_size, %object
- /* Secondary Boot Code ends here */
-__secondary_boot_code_size:
- .quad .-secondary_boot_code
-#endif
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/mp.c b/arch/arm/cpu/armv8/fsl-layerscape/mp.c
index 1ea887b331..bd85351705 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/mp.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/mp.c
@@ -6,6 +6,7 @@
#include <common.h>
#include <cpu_func.h>
#include <image.h>
+#include <log.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <asm/system.h>
@@ -14,17 +15,14 @@
#include <linux/delay.h>
#include "cpu.h"
#include <asm/arch-fsl-layerscape/soc.h>
+#include <efi_loader.h>
DECLARE_GLOBAL_DATA_PTR;
void *get_spin_tbl_addr(void)
{
- return &__spin_table;
-}
-
-phys_addr_t determine_mp_bootpg(void)
-{
- return (phys_addr_t)&secondary_boot_code;
+ /* the spin table is at the beginning */
+ return secondary_boot_code_start;
}
void update_os_arch_secondary_cores(uint8_t os_arch)
@@ -43,7 +41,7 @@ void update_os_arch_secondary_cores(uint8_t os_arch)
}
#ifdef CONFIG_FSL_LSCH3
-void wake_secondary_core_n(int cluster, int core, int cluster_cores)
+static void wake_secondary_core_n(int cluster, int core, int cluster_cores)
{
struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
struct ccsr_reset __iomem *rst = (void *)(CONFIG_SYS_FSL_RST_ADDR);
@@ -80,7 +78,11 @@ int fsl_layerscape_wake_seconday_cores(void)
#endif
u32 cores, cpu_up_mask = 1;
int i, timeout = 10;
- u64 *table = get_spin_tbl_addr();
+ u64 *table;
+#ifdef CONFIG_EFI_LOADER
+ u64 reloc_addr = U32_MAX;
+ efi_status_t ret;
+#endif
#ifdef COUNTER_FREQUENCY_REAL
/* update for secondary cores */
@@ -89,16 +91,49 @@ int fsl_layerscape_wake_seconday_cores(void)
(unsigned long)&__real_cntfrq + 8);
#endif
+#ifdef CONFIG_EFI_LOADER
+ /*
+ * EFI will reserve 64kb for its runtime services. This will probably
+ * overlap with our spin table code, which is why we have to relocate
+ * it.
+ * Keep this after the __real_cntfrq update, so we have it when we
+ * copy the complete section here.
+ */
+ ret = efi_allocate_pages(EFI_ALLOCATE_MAX_ADDRESS,
+ EFI_RESERVED_MEMORY_TYPE,
+ efi_size_in_pages(secondary_boot_code_size),
+ &reloc_addr);
+ if (ret == EFI_SUCCESS) {
+ debug("Relocating spin table from %llx to %llx (size %lx)\n",
+ (u64)secondary_boot_code_start, reloc_addr,
+ secondary_boot_code_size);
+ memcpy((void *)reloc_addr, secondary_boot_code_start,
+ secondary_boot_code_size);
+ flush_dcache_range(reloc_addr,
+ reloc_addr + secondary_boot_code_size);
+
+ /* set new entry point for secondary cores */
+ secondary_boot_addr += (void *)reloc_addr -
+ secondary_boot_code_start;
+ flush_dcache_range((unsigned long)&secondary_boot_addr,
+ (unsigned long)&secondary_boot_addr + 8);
+
+ /* this will be used to reserve the memory */
+ secondary_boot_code_start = (void *)reloc_addr;
+ }
+#endif
+
cores = cpu_mask();
/* Clear spin table so that secondary processors
* observe the correct value after waking up from wfe.
*/
+ table = get_spin_tbl_addr();
memset(table, 0, CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE);
flush_dcache_range((unsigned long)table,
(unsigned long)table +
(CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE));
- printf("Waking secondary cores to start from %lx\n", gd->relocaddr);
+ debug("Waking secondary cores to start from %lx\n", gd->relocaddr);
#ifdef CONFIG_FSL_LSCH3
gur_out32(&gur->bootlocptrh, (u32)(gd->relocaddr >> 32));
@@ -168,11 +203,11 @@ int fsl_layerscape_wake_seconday_cores(void)
udelay(10);
}
if (timeout <= 0) {
- printf("Not all cores (0x%x) are up (0x%x)\n",
- cores, cpu_up_mask);
+ printf("CPU: Failed to bring up some cores (mask 0x%x)\n",
+ cores ^ cpu_up_mask);
return 1;
}
- printf("All (%d) cores are up.\n", hweight32(cores));
+ printf("CPU: %d cores online\n", hweight32(cores));
return 0;
}
@@ -189,9 +224,9 @@ static int is_pos_valid(unsigned int pos)
int is_core_online(u64 cpu_id)
{
- u64 *table;
+ u64 *table = get_spin_tbl_addr();
int pos = id_to_core(cpu_id);
- table = (u64 *)get_spin_tbl_addr() + pos * WORDS_PER_SPIN_TABLE_ENTRY;
+ table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1;
}
@@ -237,18 +272,16 @@ static int core_to_pos(int nr)
int cpu_status(u32 nr)
{
- u64 *table;
+ u64 *table = get_spin_tbl_addr();
int pos;
if (nr == 0) {
- table = (u64 *)get_spin_tbl_addr();
printf("table base @ 0x%p\n", table);
} else {
pos = core_to_pos(nr);
if (pos < 0)
return -1;
- table = (u64 *)get_spin_tbl_addr() + pos *
- WORDS_PER_SPIN_TABLE_ENTRY;
+ table += pos * WORDS_PER_SPIN_TABLE_ENTRY;
printf("table @ 0x%p\n", table);
printf(" addr - 0x%016llx\n",
table[SPIN_TABLE_ELEM_ENTRY_ADDR_IDX]);
@@ -264,7 +297,7 @@ int cpu_status(u32 nr)
int cpu_release(u32 nr, int argc, char *const argv[])
{
u64 boot_addr;
- u64 *table = (u64 *)get_spin_tbl_addr();
+ u64 *table = get_spin_tbl_addr();
int pos;
pos = core_to_pos(nr);
@@ -277,11 +310,12 @@ int cpu_release(u32 nr, int argc, char *const argv[])
flush_dcache_range((unsigned long)table,
(unsigned long)table + SPIN_TABLE_ELEM_SIZE);
asm volatile("dsb st");
- smp_kick_all_cpus(); /* only those with entry addr set will run */
+
/*
- * When the first release command runs, all cores are set to go. Those
- * without a valid entry address will be trapped by "wfe". "sev" kicks
- * them off to check the address again. When set, they continue to run.
+ * The secondary CPUs polling the spin-table above for a non-zero
+ * value. To save power "wfe" is called. Thus call "sev" here to
+ * wake the CPUs and let them check the spin-table again (see
+ * slave_cpu loop in lowlevel.S)
*/
asm volatile("sev");
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/soc.c b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
index ad7ea05935..fde893e8c9 100644
--- a/arch/arm/cpu/armv8/fsl-layerscape/soc.c
+++ b/arch/arm/cpu/armv8/fsl-layerscape/soc.c
@@ -41,37 +41,11 @@ DECLARE_GLOBAL_DATA_PTR;
#endif
#ifdef CONFIG_GIC_V3_ITS
-#define PENDTABLE_MAX_SZ ALIGN(BIT(ITS_MAX_LPI_NRBITS), SZ_64K)
-#define PROPTABLE_MAX_SZ ALIGN(BIT(ITS_MAX_LPI_NRBITS) / 8, SZ_64K)
-#define GIC_LPI_SIZE ALIGN(cpu_numcores() * PENDTABLE_MAX_SZ + \
- PROPTABLE_MAX_SZ, SZ_1M)
-static int fdt_add_resv_mem_gic_rd_tables(void *blob, u64 base, size_t size)
-{
- u32 phandle;
- int err;
- struct fdt_memory gic_rd_tables;
-
- gic_rd_tables.start = base;
- gic_rd_tables.end = base + size - 1;
- err = fdtdec_add_reserved_memory(blob, "gic-rd-tables", &gic_rd_tables,
- &phandle);
- if (err < 0)
- debug("%s: failed to add reserved memory: %d\n", __func__, err);
-
- return err;
-}
-
int ls_gic_rd_tables_init(void *blob)
{
- u64 gic_lpi_base;
int ret;
- gic_lpi_base = ALIGN(gd->arch.resv_ram - GIC_LPI_SIZE, SZ_64K);
- ret = fdt_add_resv_mem_gic_rd_tables(blob, gic_lpi_base, GIC_LPI_SIZE);
- if (ret)
- return ret;
-
- ret = gic_lpi_tables_init(gic_lpi_base, cpu_numcores());
+ ret = gic_lpi_tables_init();
if (ret)
debug("%s: failed to init gic-lpi-tables\n", __func__);
@@ -445,20 +419,6 @@ int get_core_volt_from_fuse(void)
}
#elif defined(CONFIG_FSL_LSCH2)
-
-static void erratum_a009929(void)
-{
-#ifdef CONFIG_SYS_FSL_ERRATUM_A009929
- struct ccsr_gur *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR;
- u32 __iomem *dcsr_cop_ccp = (void *)CONFIG_SYS_DCSR_COP_CCP_ADDR;
- u32 rstrqmr1 = gur_in32(&gur->rstrqmr1);
-
- rstrqmr1 |= 0x00000400;
- gur_out32(&gur->rstrqmr1, rstrqmr1);
- writel(0x01000000, dcsr_cop_ccp);
-#endif
-}
-
/*
* This erratum requires setting a value to eddrtqcr1 to optimal
* the DDR performance. The eddrtqcr1 register is in SCFG space
@@ -724,7 +684,6 @@ void fsl_lsch2_early_init_f(void)
#endif
/* Erratum */
erratum_a008850_early(); /* part 1 of 2 */
- erratum_a009929();
erratum_a009660();
erratum_a010539();
erratum_a009008();
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/spintable.S b/arch/arm/cpu/armv8/fsl-layerscape/spintable.S
new file mode 100644
index 0000000000..363ded03e6
--- /dev/null
+++ b/arch/arm/cpu/armv8/fsl-layerscape/spintable.S
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * (C) Copyright 2014-2015 Freescale Semiconductor
+ * Copyright 2019 NXP
+ */
+
+#include <config.h>
+#include <linux/linkage.h>
+#include <asm/macro.h>
+#include <asm/system.h>
+#include <asm/arch/mp.h>
+
+.align 3
+.global secondary_boot_addr
+secondary_boot_addr:
+ .quad __secondary_boot_func
+
+.global secondary_boot_code_start
+secondary_boot_code_start:
+ .quad __secondary_boot_code_start
+
+.global secondary_boot_code_size
+secondary_boot_code_size:
+ .quad __secondary_boot_code_end - __secondary_boot_code_start
+
+ /* Using 64 bit alignment since the spin table is accessed as data */
+ .align 3
+ /* Secondary Boot Code starts here */
+__secondary_boot_code_start:
+__spin_table:
+ .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
+
+ .align 2
+__secondary_boot_func:
+ /*
+ * MPIDR_EL1 Fields:
+ * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
+ * MPIDR[7:2] = AFF0_RES
+ * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
+ * MPIDR[23:16] = AFF2_CLUSTERID
+ * MPIDR[24] = MT
+ * MPIDR[29:25] = RES0
+ * MPIDR[30] = U
+ * MPIDR[31] = ME
+ * MPIDR[39:32] = AFF3
+ *
+ * Linear Processor ID (LPID) calculation from MPIDR_EL1:
+ * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
+ * until AFF2_CLUSTERID and AFF3 have non-zero values)
+ *
+ * LPID = MPIDR[15:8] | MPIDR[1:0]
+ */
+ mrs x0, mpidr_el1
+ ubfm x1, x0, #8, #15
+ ubfm x2, x0, #0, #1
+ orr x10, x2, x1, lsl #2 /* x10 has LPID */
+ ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
+ /*
+ * offset of the spin table element for this core from start of spin
+ * table (each elem is padded to 64 bytes)
+ */
+ lsl x1, x10, #6
+ adr x0, __spin_table
+ /* physical address of this cpus spin table element */
+ add x11, x1, x0
+
+ adr x0, __real_cntfrq
+ ldr x0, [x0]
+ msr cntfrq_el0, x0 /* set with real frequency */
+ str x9, [x11, #16] /* LPID */
+ mov x4, #1
+ str x4, [x11, #8] /* STATUS */
+ dsb sy
+
+1:
+ wfe
+ ldr x4, [x11]
+ cbz x4, 1b
+ mrs x1, sctlr_el2
+ tbz x1, #25, 2f
+ rev x4, x4 /* BE to LE conversion */
+2:
+ ldr x6, =ES_TO_AARCH64
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
+ adr x5, 3f
+ switch_el x7, 0f, _dead_loop, _dead_loop
+0: armv8_switch_to_el2_m x5, x6, x7
+#endif
+3:
+ ldr x7, [x11, #24] /* ARCH_COMP */
+ cbz x7, 4f
+ ldr x6, =ES_TO_AARCH32
+4:
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
+ switch_el x7, _dead_loop, 0f, _dead_loop
+0: armv8_switch_to_el1_m x4, x6, x7
+#else
+ switch_el x7, 0f, _dead_loop, _dead_loop
+0: armv8_switch_to_el2_m x4, x6, x7
+#endif
+
+_dead_loop:
+ wfe
+ b _dead_loop
+
+ /* Ensure that the literals used by the secondary boot code are
+ * assembled within it (this is required so that we can protect
+ * this area with a single memreserve region
+ */
+ .ltorg
+
+ /* 64 bit alignment for elements accessed as data */
+ .align 3
+ .global __real_cntfrq
+__real_cntfrq:
+ .quad COUNTER_FREQUENCY
+ /* Secondary Boot Code ends here */
+__secondary_boot_code_end: