diff options
author | Alison Wang <b18965@freescale.com> | 2016-11-10 10:49:03 +0800 |
---|---|---|
committer | York Sun <york.sun@nxp.com> | 2016-11-22 11:40:24 -0800 |
commit | ec6617c39741adc6c54952564579e32c3c09c66f (patch) | |
tree | 2c65fb7ab999e39eb6acc0be8890eced30c950a1 /arch/arm/cpu/armv8 | |
parent | 95e74a3df75bf01eaf69f5c28f9aa2db6568e901 (diff) |
armv8: Support loading 32-bit OS in AArch32 execution state
To support loading a 32-bit OS, the execution state will change from
AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image,
then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
Signed-off-by: Alison Wang <alison.wang@nxp.com>
Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
Reviewed-by: York Sun <york.sun@nxp.com>
Diffstat (limited to 'arch/arm/cpu/armv8')
-rw-r--r-- | arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 61 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/start.S | 8 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/transition.S | 23 |
3 files changed, 76 insertions, 16 deletions
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S index f7b49cb9fe..72f2c11baf 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S +++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S @@ -17,6 +17,7 @@ #include <asm/arch-fsl-layerscape/immap_lsch3.h> #include <asm/arch-fsl-layerscape/soc.h> #endif +#include <asm/u-boot.h> ENTRY(lowlevel_init) mov x29, lr /* Save LR */ @@ -359,11 +360,6 @@ ENTRY(secondary_boot_func) gic_wait_for_interrupt_m x0, w1 #endif - bl secondary_switch_to_el2 -#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 - bl secondary_switch_to_el1 -#endif - slave_cpu: wfe ldr x0, [x11] @@ -376,19 +372,64 @@ slave_cpu: tbz x1, #25, cpu_is_le rev x0, x0 /* BE to LE conversion */ cpu_is_le: - br x0 /* branch to the given address */ + ldr x5, [x11, #24] + ldr x6, =IH_ARCH_DEFAULT + cmp x6, x5 + b.eq 1f + +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 + adr x3, secondary_switch_to_el1 + ldr x4, =ES_TO_AARCH64 +#else + ldr x3, [x11] + ldr x4, =ES_TO_AARCH32 +#endif + bl secondary_switch_to_el2 + +1: +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 + adr x3, secondary_switch_to_el1 +#else + ldr x3, [x11] +#endif + ldr x4, =ES_TO_AARCH64 + bl secondary_switch_to_el2 + ENDPROC(secondary_boot_func) ENTRY(secondary_switch_to_el2) - switch_el x0, 1f, 0f, 0f + switch_el x5, 1f, 0f, 0f 0: ret -1: armv8_switch_to_el2_m x0 +1: armv8_switch_to_el2_m x3, x4, x5 ENDPROC(secondary_switch_to_el2) ENTRY(secondary_switch_to_el1) - switch_el x0, 0f, 1f, 0f + mrs x0, mpidr_el1 + ubfm x1, x0, #8, #15 + ubfm x2, x0, #0, #1 + orr x10, x2, x1, lsl #2 /* x10 has LPID */ + + lsl x1, x10, #6 + ldr x0, =__spin_table + /* physical address of this cpus spin table element */ + add x11, x1, x0 + + ldr x3, [x11] + + ldr x5, [x11, #24] + ldr x6, =IH_ARCH_DEFAULT + cmp x6, x5 + b.eq 2f + + ldr x4, =ES_TO_AARCH32 + bl switch_to_el1 + +2: ldr x4, =ES_TO_AARCH64 + +switch_to_el1: + switch_el x5, 0f, 1f, 0f 0: ret -1: armv8_switch_to_el1_m x0, x1 +1: armv8_switch_to_el1_m x3, x4, x5 ENDPROC(secondary_switch_to_el1) /* Ensure that the literals used by the secondary boot code are diff --git a/arch/arm/cpu/armv8/start.S b/arch/arm/cpu/armv8/start.S index 19c771dba3..4f5f6d8020 100644 --- a/arch/arm/cpu/armv8/start.S +++ b/arch/arm/cpu/armv8/start.S @@ -251,9 +251,17 @@ WEAK(lowlevel_init) /* * All slaves will enter EL2 and optionally EL1. */ + adr x3, lowlevel_in_el2 + ldr x4, =ES_TO_AARCH64 bl armv8_switch_to_el2 + +lowlevel_in_el2: #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 + adr x3, lowlevel_in_el1 + ldr x4, =ES_TO_AARCH64 bl armv8_switch_to_el1 + +lowlevel_in_el1: #endif #endif /* CONFIG_ARMV8_MULTIENTRY */ diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S index 253a39bd11..bbccf2b395 100644 --- a/arch/arm/cpu/armv8/transition.S +++ b/arch/arm/cpu/armv8/transition.S @@ -11,13 +11,24 @@ #include <asm/macro.h> ENTRY(armv8_switch_to_el2) - switch_el x0, 1f, 0f, 0f -0: ret -1: armv8_switch_to_el2_m x0 + switch_el x5, 1f, 0f, 0f +0: + /* + * x3 is kernel entry point or switch_to_el1 + * if CONFIG_ARMV8_SWITCH_TO_EL1 is defined. + * When running in EL2 now, jump to the + * address saved in x3. + */ + br x3 +1: armv8_switch_to_el2_m x3, x4, x5 ENDPROC(armv8_switch_to_el2) ENTRY(armv8_switch_to_el1) - switch_el x0, 0f, 1f, 0f -0: ret -1: armv8_switch_to_el1_m x0, x1 + switch_el x5, 0f, 1f, 0f +0: + /* x3 is kernel entry point. When running in EL1 + * now, jump to the address saved in x3. + */ + br x3 +1: armv8_switch_to_el1_m x3, x4, x5 ENDPROC(armv8_switch_to_el1) |