From 3d3fe8b12d1973b207ee0406709ff521eec83bf7 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Mon, 1 Jun 2020 21:53:26 +0200 Subject: armv8: layerscape: properly use CPU_RELEASE_ADDR The generic armv8 code already has support to bring up the secondary cores. Thus, don't hardcode the jump in the layerscape lowlevel_init to the spin table code; instead just return early and let the common armv8 code handle the jump. This way we can actually use the CPU_RELEASE_ADDR feature. Signed-off-by: Michael Walle [Rebased, Removed kontron_sl28.h change as file does not exist] Signed-off-by: Priyanka Jain --- arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'arch/arm/cpu') diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S index 2a8d592cc5..d75013eb9c 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S +++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S @@ -208,8 +208,13 @@ ENTRY(lowlevel_init) branch_if_master x0, x1, 2f #if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY) - ldr x0, =secondary_boot_func - blr x0 + /* + * Formerly, here was a jump to secondary_boot_func, but we just + * return early here and let the generic code in start.S handle + * the jump to secondary_boot_func. + */ + mov lr, x29 /* Restore LR */ + ret #endif 2: @@ -421,6 +426,11 @@ ENDPROC(__asm_flush_l3_dcache) #endif /* CONFIG_SYS_FSL_HAS_CCN504 */ #ifdef CONFIG_MP + .align 3 + .global secondary_boot_addr +secondary_boot_addr: + .quad secondary_boot_func + /* Keep literals not used by the secondary boot code outside it */ .ltorg -- cgit