diff options
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r-- | arch/arm/cpu/armv7/ls102xa/cpu.c | 231 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/cache.S | 6 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/cache_v8.c | 18 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/fsl-lsch3/cpu.c | 62 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/fsl-lsch3/fdt.c | 28 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S | 132 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/fsl-lsch3/mp.c | 8 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/fsl-lsch3/mp.h | 1 | ||||
-rw-r--r-- | arch/arm/cpu/armv8/fsl-lsch3/speed.c | 16 |
9 files changed, 417 insertions, 85 deletions
diff --git a/arch/arm/cpu/armv7/ls102xa/cpu.c b/arch/arm/cpu/armv7/ls102xa/cpu.c index ce2d92f5a6..1a640bbb9c 100644 --- a/arch/arm/cpu/armv7/ls102xa/cpu.c +++ b/arch/arm/cpu/armv7/ls102xa/cpu.c @@ -8,14 +8,214 @@ #include <asm/arch/clock.h> #include <asm/io.h> #include <asm/arch/immap_ls102xa.h> +#include <asm/cache.h> +#include <asm/system.h> #include <tsec.h> #include <netdev.h> #include <fsl_esdhc.h> #include "fsl_epu.h" +#define DCSR_RCPM2_BLOCK_OFFSET 0x223000 +#define DCSR_RCPM2_CPMFSMCR0 0x400 +#define DCSR_RCPM2_CPMFSMSR0 0x404 +#define DCSR_RCPM2_CPMFSMCR1 0x414 +#define DCSR_RCPM2_CPMFSMSR1 0x418 +#define CPMFSMSR_FSM_STATE_MASK 0x7f + DECLARE_GLOBAL_DATA_PTR; +#ifndef CONFIG_SYS_DCACHE_OFF + +/* + * Bit[1] of the descriptor indicates the descriptor type, + * and bit[0] indicates whether the descriptor is valid. + */ +#define PMD_TYPE_TABLE 0x3 +#define PMD_TYPE_SECT 0x1 + +/* AttrIndx[2:0] */ +#define PMD_ATTRINDX(t) ((t) << 2) + +/* Section */ +#define PMD_SECT_AF (1 << 10) + +#define BLOCK_SIZE_L1 (1UL << 30) +#define BLOCK_SIZE_L2 (1UL << 21) + +/* TTBCR flags */ +#define TTBCR_EAE (1 << 31) +#define TTBCR_T0SZ(x) ((x) << 0) +#define TTBCR_T1SZ(x) ((x) << 16) +#define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0)) +#define TTBCR_IRGN0_NC (0 << 8) +#define TTBCR_IRGN0_WBWA (1 << 8) +#define TTBCR_IRGN0_WT (2 << 8) +#define TTBCR_IRGN0_WBNWA (3 << 8) +#define TTBCR_IRGN0_MASK (3 << 8) +#define TTBCR_ORGN0_NC (0 << 10) +#define TTBCR_ORGN0_WBWA (1 << 10) +#define TTBCR_ORGN0_WT (2 << 10) +#define TTBCR_ORGN0_WBNWA (3 << 10) +#define TTBCR_ORGN0_MASK (3 << 10) +#define TTBCR_SHARED_NON (0 << 12) +#define TTBCR_SHARED_OUTER (2 << 12) +#define TTBCR_SHARED_INNER (3 << 12) +#define TTBCR_EPD0 (0 << 7) +#define TTBCR (TTBCR_SHARED_NON | \ + TTBCR_ORGN0_NC | \ + TTBCR_IRGN0_NC | \ + TTBCR_USING_TTBR0 | \ + TTBCR_EAE) + +/* + * Memory region attributes for LPAE (defined in pgtable): + * + * n = AttrIndx[2:0] + * + * n MAIR + * UNCACHED 000 00000000 + * BUFFERABLE 001 01000100 + * DEV_WC 001 01000100 + * WRITETHROUGH 010 10101010 + * WRITEBACK 011 11101110 + * DEV_CACHED 011 11101110 + * DEV_SHARED 100 00000100 + * DEV_NONSHARED 100 00000100 + * unused 101 + * unused 110 + * WRITEALLOC 111 11111111 + */ +#define MT_MAIR0 0xeeaa4400 +#define MT_MAIR1 0xff000004 +#define MT_STRONLY_ORDER 0 +#define MT_NORMAL_NC 1 +#define MT_DEVICE_MEM 4 +#define MT_NORMAL 7 + +/* The phy_addr must be aligned to 4KB */ +static inline void set_pgtable(u32 *page_table, u32 index, u32 phy_addr) +{ + u32 value = phy_addr | PMD_TYPE_TABLE; + + page_table[2 * index] = value; + page_table[2 * index + 1] = 0; +} + +/* The phy_addr must be aligned to 4KB */ +static inline void set_pgsection(u32 *page_table, u32 index, u64 phy_addr, + u32 memory_type) +{ + u64 value; + + value = phy_addr | PMD_TYPE_SECT | PMD_SECT_AF; + value |= PMD_ATTRINDX(memory_type); + page_table[2 * index] = value & 0xFFFFFFFF; + page_table[2 * index + 1] = (value >> 32) & 0xFFFFFFFF; +} + +/* + * Start MMU after DDR is available, we create MMU table in DRAM. + * The base address of TTLB is gd->arch.tlb_addr. We use two + * levels of translation tables here to cover 40-bit address space. + * + * The TTLBs are located at PHY 2G~4G. + * + * VA mapping: + * + * ------- <---- 0GB + * | | + * | | + * |-------| <---- 0x24000000 + * |///////| ===> 192MB VA map for PCIe1 with offset 0x40_0000_0000 + * |-------| <---- 0x300000000 + * | | + * |-------| <---- 0x34000000 + * |///////| ===> 192MB VA map for PCIe2 with offset 0x48_0000_0000 + * |-------| <---- 0x40000000 + * | | + * |-------| <---- 0x80000000 DDR0 space start + * |\\\\\\\| + *.|\\\\\\\| ===> 2GB VA map for 2GB DDR0 Memory space + * |\\\\\\\| + * ------- <---- 4GB DDR0 space end + */ +static void mmu_setup(void) +{ + u32 *level0_table = (u32 *)gd->arch.tlb_addr; + u32 *level1_table = (u32 *)(gd->arch.tlb_addr + 0x1000); + u64 va_start = 0; + u32 reg; + int i; + + /* Level 0 Table 2-3 are used to map DDR */ + set_pgsection(level0_table, 3, 3 * BLOCK_SIZE_L1, MT_NORMAL); + set_pgsection(level0_table, 2, 2 * BLOCK_SIZE_L1, MT_NORMAL); + /* Level 0 Table 1 is used to map device */ + set_pgsection(level0_table, 1, 1 * BLOCK_SIZE_L1, MT_DEVICE_MEM); + /* Level 0 Table 0 is used to map device including PCIe MEM */ + set_pgtable(level0_table, 0, (u32)level1_table); + + /* Level 1 has 512 entries */ + for (i = 0; i < 512; i++) { + /* Mapping for PCIe 1 */ + if (va_start >= CONFIG_SYS_PCIE1_VIRT_ADDR && + va_start < (CONFIG_SYS_PCIE1_VIRT_ADDR + + CONFIG_SYS_PCIE_MMAP_SIZE)) + set_pgsection(level1_table, i, + CONFIG_SYS_PCIE1_PHYS_BASE + va_start, + MT_DEVICE_MEM); + /* Mapping for PCIe 2 */ + else if (va_start >= CONFIG_SYS_PCIE2_VIRT_ADDR && + va_start < (CONFIG_SYS_PCIE2_VIRT_ADDR + + CONFIG_SYS_PCIE_MMAP_SIZE)) + set_pgsection(level1_table, i, + CONFIG_SYS_PCIE2_PHYS_BASE + va_start, + MT_DEVICE_MEM); + else + set_pgsection(level1_table, i, + va_start, + MT_DEVICE_MEM); + va_start += BLOCK_SIZE_L2; + } + + asm volatile("dsb sy;isb"); + asm volatile("mcr p15, 0, %0, c2, c0, 2" /* Write RT to TTBCR */ + : : "r" (TTBCR) : "memory"); + asm volatile("mcrr p15, 0, %0, %1, c2" /* TTBR 0 */ + : : "r" ((u32)level0_table), "r" (0) : "memory"); + asm volatile("mcr p15, 0, %0, c10, c2, 0" /* write MAIR 0 */ + : : "r" (MT_MAIR0) : "memory"); + asm volatile("mcr p15, 0, %0, c10, c2, 1" /* write MAIR 1 */ + : : "r" (MT_MAIR1) : "memory"); + + /* Set the access control to all-supervisor */ + asm volatile("mcr p15, 0, %0, c3, c0, 0" + : : "r" (~0)); + + /* Enable the mmu */ + reg = get_cr(); + set_cr(reg | CR_M); +} + +/* + * This function is called from lib/board.c. It recreates MMU + * table in main memory. MMU and i/d-cache are enabled here. + */ +void enable_caches(void) +{ + /* Invalidate all TLB */ + mmu_page_table_flush(gd->arch.tlb_addr, + gd->arch.tlb_addr + gd->arch.tlb_size); + /* Set up and enable mmu */ + mmu_setup(); + + /* Invalidate & Enable d-cache */ + invalidate_dcache_all(); + set_cr(get_cr() | CR_C); +} +#endif /* #ifndef CONFIG_SYS_DCACHE_OFF */ + #if defined(CONFIG_DISPLAY_CPUINFO) int print_cpuinfo(void) { @@ -78,16 +278,6 @@ int print_cpuinfo(void) } #endif -void enable_caches(void) -{ -#ifndef CONFIG_SYS_ICACHE_OFF - icache_enable(); -#endif -#ifndef CONFIG_SYS_DCACHE_OFF - dcache_enable(); -#endif -} - #ifdef CONFIG_FSL_ESDHC int cpu_mmc_init(bd_t *bis) { @@ -107,6 +297,27 @@ int cpu_eth_init(bd_t *bis) int arch_cpu_init(void) { void *epu_base = (void *)(CONFIG_SYS_DCSRBAR + EPU_BLOCK_OFFSET); + void *rcpm2_base = + (void *)(CONFIG_SYS_DCSRBAR + DCSR_RCPM2_BLOCK_OFFSET); + u32 state; + + /* + * The RCPM FSM state may not be reset after power-on. + * So, reset them. + */ + state = in_be32(rcpm2_base + DCSR_RCPM2_CPMFSMSR0) & + CPMFSMSR_FSM_STATE_MASK; + if (state != 0) { + out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR0, 0x80); + out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR0, 0x0); + } + + state = in_be32(rcpm2_base + DCSR_RCPM2_CPMFSMSR1) & + CPMFSMSR_FSM_STATE_MASK; + if (state != 0) { + out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR1, 0x80); + out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR1, 0x0); + } /* * After wakeup from deep sleep, Clear EPU registers diff --git a/arch/arm/cpu/armv8/cache.S b/arch/arm/cpu/armv8/cache.S index 9c6e8243bb..fa447bce16 100644 --- a/arch/arm/cpu/armv8/cache.S +++ b/arch/arm/cpu/armv8/cache.S @@ -155,3 +155,9 @@ ENTRY(__asm_invalidate_icache_all) isb sy ret ENDPROC(__asm_invalidate_icache_all) + +ENTRY(__asm_flush_l3_cache) + mov x0, #0 /* return status as success */ + ret +ENDPROC(__asm_flush_l3_cache) + .weak __asm_flush_l3_cache diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index 9dbcdf22af..c5ec5297cd 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -73,17 +73,21 @@ void invalidate_dcache_all(void) __asm_invalidate_dcache_all(); } -void __weak flush_l3_cache(void) -{ -} - /* - * Performs a clean & invalidation of the entire data cache at all levels + * Performs a clean & invalidation of the entire data cache at all levels. + * This function needs to be inline to avoid using stack. + * __asm_flush_l3_cache return status of timeout */ -void flush_dcache_all(void) +inline void flush_dcache_all(void) { + int ret; + __asm_flush_dcache_all(); - flush_l3_cache(); + ret = __asm_flush_l3_cache(); + if (ret) + debug("flushing dcache returns 0x%x\n", ret); + else + debug("flushing dcache successfully.\n"); } /* diff --git a/arch/arm/cpu/armv8/fsl-lsch3/cpu.c b/arch/arm/cpu/armv8/fsl-lsch3/cpu.c index 47b947f44f..49974878b9 100644 --- a/arch/arm/cpu/armv8/fsl-lsch3/cpu.c +++ b/arch/arm/cpu/armv8/fsl-lsch3/cpu.c @@ -10,10 +10,10 @@ #include <asm/armv8/mmu.h> #include <asm/io.h> #include <asm/arch-fsl-lsch3/immap_lsch3.h> +#include <fsl-mc/fsl_mc.h> #include "cpu.h" #include "mp.h" #include "speed.h" -#include <fsl_mc.h> DECLARE_GLOBAL_DATA_PTR; @@ -150,7 +150,7 @@ static inline void final_mmu_setup(void) * set level 2 table 0 to cache-inhibit, covering 0 to 1GB */ section_l1t0 = 0; - section_l1t1 = BLOCK_SIZE_L0; + section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE; section_l2 = 0; for (i = 0; i < 512; i++) { set_pgtable_section(level1_table_0, i, section_l1t0, @@ -168,10 +168,10 @@ static inline void final_mmu_setup(void) (u64)level2_table_0 | PMD_TYPE_TABLE; level1_table_0[2] = 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT | - PMD_ATTRINDX(MT_NORMAL); + PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL); level1_table_0[3] = 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT | - PMD_ATTRINDX(MT_NORMAL); + PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL); /* Rewrite table to enable cache */ set_pgtable_section(level2_table_0, @@ -243,59 +243,6 @@ int arch_cpu_init(void) } /* - * flush_l3_cache - * Dickens L3 cache can be flushed by transitioning from FAM to SFONLY power - * state, by writing to HP-F P-state request register. - * Fixme: This function should moved to a common file if other SoCs also use - * the same Dickens. - */ -#define HNF0_PSTATE_REQ 0x04200010 -#define HNF1_PSTATE_REQ 0x04210010 -#define HNF2_PSTATE_REQ 0x04220010 -#define HNF3_PSTATE_REQ 0x04230010 -#define HNF4_PSTATE_REQ 0x04240010 -#define HNF5_PSTATE_REQ 0x04250010 -#define HNF6_PSTATE_REQ 0x04260010 -#define HNF7_PSTATE_REQ 0x04270010 -#define HNFPSTAT_MASK (0xFFFFFFFFFFFFFFFC) -#define HNFPSTAT_FAM 0x3 -#define HNFPSTAT_SFONLY 0x01 - -static void hnf_pstate_req(u64 *ptr, u64 state) -{ - int timeout = 1000; - out_le64(ptr, (in_le64(ptr) & HNFPSTAT_MASK) | (state & 0x3)); - ptr++; - /* checking if the transition is completed */ - while (timeout > 0) { - if (((in_le64(ptr) & 0x0c) >> 2) == (state & 0x3)) - break; - udelay(100); - timeout--; - } -} - -void flush_l3_cache(void) -{ - hnf_pstate_req((u64 *)HNF0_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF1_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF2_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF3_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF4_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF5_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF6_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF7_PSTATE_REQ, HNFPSTAT_SFONLY); - hnf_pstate_req((u64 *)HNF0_PSTATE_REQ, HNFPSTAT_FAM); - hnf_pstate_req((u64 *)HNF1_PSTATE_REQ, HNFPSTAT_FAM); - hnf_pstate_req((u64 *)HNF2_PSTATE_REQ, HNFPSTAT_FAM); - hnf_pstate_req((u64 *)HNF3_PSTATE_REQ, HNFPSTAT_FAM); - hnf_pstate_req((u64 *)HNF4_PSTATE_REQ, HNFPSTAT_FAM); - hnf_pstate_req((u64 *)HNF5_PSTATE_REQ, HNFPSTAT_FAM); - hnf_pstate_req((u64 *)HNF6_PSTATE_REQ, HNFPSTAT_FAM); - hnf_pstate_req((u64 *)HNF7_PSTATE_REQ, HNFPSTAT_FAM); -} - -/* * This function is called from lib/board.c. * It recreates MMU table in main memory. MMU and d-cache are enabled earlier. * There is no need to disable d-cache for this operation. @@ -420,6 +367,7 @@ int print_cpuinfo(void) printf("\n Bus: %-4s MHz ", strmhz(buf, sysinfo.freq_systembus)); printf("DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus)); + printf(" DP-DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus2)); puts("\n"); return 0; diff --git a/arch/arm/cpu/armv8/fsl-lsch3/fdt.c b/arch/arm/cpu/armv8/fsl-lsch3/fdt.c index e392eb9149..7eb9b6aa4b 100644 --- a/arch/arm/cpu/armv8/fsl-lsch3/fdt.c +++ b/arch/arm/cpu/armv8/fsl-lsch3/fdt.c @@ -16,7 +16,7 @@ void ft_fixup_cpu(void *blob) __maybe_unused u64 spin_tbl_addr = (u64)get_spin_tbl_addr(); fdt32_t *reg; int addr_cells; - u64 val; + u64 val, core_id; size_t *boot_code_size = &(__secondary_boot_code_size); off = fdt_path_offset(blob, "/cpus"); @@ -29,15 +29,20 @@ void ft_fixup_cpu(void *blob) off = fdt_node_offset_by_prop_value(blob, -1, "device_type", "cpu", 4); while (off != -FDT_ERR_NOTFOUND) { reg = (fdt32_t *)fdt_getprop(blob, off, "reg", 0); + core_id = of_read_number(reg, addr_cells); if (reg) { - val = spin_tbl_addr; - val += id_to_core(of_read_number(reg, addr_cells)) - * SPIN_TABLE_ELEM_SIZE; - val = cpu_to_fdt64(val); - fdt_setprop_string(blob, off, "enable-method", - "spin-table"); - fdt_setprop(blob, off, "cpu-release-addr", - &val, sizeof(val)); + if (core_id == 0 || (is_core_online(core_id))) { + val = spin_tbl_addr; + val += id_to_core(core_id) * + SPIN_TABLE_ELEM_SIZE; + val = cpu_to_fdt64(val); + fdt_setprop_string(blob, off, "enable-method", + "spin-table"); + fdt_setprop(blob, off, "cpu-release-addr", + &val, sizeof(val)); + } else { + debug("skipping offline core\n"); + } } else { puts("Warning: found cpu node without reg property\n"); } @@ -55,4 +60,9 @@ void ft_cpu_setup(void *blob, bd_t *bd) #ifdef CONFIG_MP ft_fixup_cpu(blob); #endif + +#ifdef CONFIG_SYS_NS16550 + do_fixup_by_compat_u32(blob, "ns16550", + "clock-frequency", CONFIG_SYS_NS16550_CLK, 1); +#endif } diff --git a/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S b/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S index 2a88aab283..886576ef99 100644 --- a/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S +++ b/arch/arm/cpu/armv8/fsl-lsch3/lowlevel.S @@ -42,10 +42,142 @@ ENTRY(lowlevel_init) ldr x0, =secondary_boot_func blr x0 2: + +#ifdef CONFIG_FSL_TZPC_BP147 + /* Set Non Secure access for all devices protected via TZPC */ + ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */ + orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */ + str w0, [x1] + + isb + dsb sy +#endif + +#ifdef CONFIG_FSL_TZASC_400 + /* Set TZASC so that: + * a. We use only Region0 whose global secure write/read is EN + * b. We use only Region0 whose NSAID write/read is EN + * + * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just + * placeholders. + */ + ldr x1, =TZASC_GATE_KEEPER(0) + ldr x0, [x1] /* Filter 0 Gate Keeper Register */ + orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */ + str x0, [x1] + + ldr x1, =TZASC_GATE_KEEPER(1) + ldr x0, [x1] /* Filter 0 Gate Keeper Register */ + orr x0, x0, #1 << 0 /* Set open_request for Filter 0 */ + str x0, [x1] + + ldr x1, =TZASC_REGION_ATTRIBUTES_0(0) + ldr x0, [x1] /* Region-0 Attributes Register */ + orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */ + orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */ + str x0, [x1] + + ldr x1, =TZASC_REGION_ATTRIBUTES_0(1) + ldr x0, [x1] /* Region-1 Attributes Register */ + orr x0, x0, #1 << 31 /* Set Sec global write en, Bit[31] */ + orr x0, x0, #1 << 30 /* Set Sec global read en, Bit[30] */ + str x0, [x1] + + ldr x1, =TZASC_REGION_ID_ACCESS_0(0) + ldr w0, [x1] /* Region-0 Access Register */ + mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */ + str w0, [x1] + + ldr x1, =TZASC_REGION_ID_ACCESS_0(1) + ldr w0, [x1] /* Region-1 Attributes Register */ + mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */ + str w0, [x1] + + isb + dsb sy +#endif mov lr, x29 /* Restore LR */ ret ENDPROC(lowlevel_init) +hnf_pstate_poll: + /* x0 has the desired status, return 0 for success, 1 for timeout + * clobber x1, x2, x3, x4, x6, x7 + */ + mov x1, x0 + mov x7, #0 /* flag for timeout */ + mrs x3, cntpct_el0 /* read timer */ + add x3, x3, #1200 /* timeout after 100 microseconds */ + mov x0, #0x18 + movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */ + mov w6, #8 /* HN-F node count */ +1: + ldr x2, [x0] + cmp x2, x1 /* check status */ + b.eq 2f + mrs x4, cntpct_el0 + cmp x4, x3 + b.ls 1b + mov x7, #1 /* timeout */ + b 3f +2: + add x0, x0, #0x10000 /* move to next node */ + subs w6, w6, #1 + cbnz w6, 1b +3: + mov x0, x7 + ret + +hnf_set_pstate: + /* x0 has the desired state, clobber x1, x2, x6 */ + mov x1, x0 + /* power state to SFONLY */ + mov w6, #8 /* HN-F node count */ + mov x0, #0x10 + movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */ +1: /* set pstate to sfonly */ + ldr x2, [x0] + and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */ + orr x2, x2, x1 + str x2, [x0] + add x0, x0, #0x10000 /* move to next node */ + subs w6, w6, #1 + cbnz w6, 1b + + ret + +ENTRY(__asm_flush_l3_cache) + /* + * Return status in x0 + * success 0 + * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both + */ + mov x29, lr + mov x8, #0 + + dsb sy + mov x0, #0x1 /* HNFPSTAT_SFONLY */ + bl hnf_set_pstate + + mov x0, #0x4 /* SFONLY status */ + bl hnf_pstate_poll + cbz x0, 1f + mov x8, #1 /* timeout */ +1: + dsb sy + mov x0, #0x3 /* HNFPSTAT_FAM */ + bl hnf_set_pstate + + mov x0, #0xc /* FAM status */ + bl hnf_pstate_poll + cbz x0, 1f + add x8, x8, #0x2 +1: + mov x0, x8 + mov lr, x29 + ret +ENDPROC(__asm_flush_l3_cache) + /* Keep literals not used by the secondary boot code outside it */ .ltorg diff --git a/arch/arm/cpu/armv8/fsl-lsch3/mp.c b/arch/arm/cpu/armv8/fsl-lsch3/mp.c index 94998bf37b..ce9c0c1bdb 100644 --- a/arch/arm/cpu/armv8/fsl-lsch3/mp.c +++ b/arch/arm/cpu/armv8/fsl-lsch3/mp.c @@ -83,6 +83,14 @@ int is_core_valid(unsigned int core) return !!((1 << core) & cpu_mask()); } +int is_core_online(u64 cpu_id) +{ + u64 *table; + int pos = id_to_core(cpu_id); + table = (u64 *)get_spin_tbl_addr() + pos * WORDS_PER_SPIN_TABLE_ENTRY; + return table[SPIN_TABLE_ELEM_STATUS_IDX] == 1; +} + int cpu_reset(int nr) { puts("Feature is not implemented.\n"); diff --git a/arch/arm/cpu/armv8/fsl-lsch3/mp.h b/arch/arm/cpu/armv8/fsl-lsch3/mp.h index 06ac0bcf36..66144d6101 100644 --- a/arch/arm/cpu/armv8/fsl-lsch3/mp.h +++ b/arch/arm/cpu/armv8/fsl-lsch3/mp.h @@ -32,5 +32,6 @@ int fsl_lsch3_wake_seconday_cores(void); void *get_spin_tbl_addr(void); phys_addr_t determine_mp_bootpg(void); void secondary_boot_func(void); +int is_core_online(u64 cpu_id); #endif #endif /* _FSL_CH3_MP_H */ diff --git a/arch/arm/cpu/armv8/fsl-lsch3/speed.c b/arch/arm/cpu/armv8/fsl-lsch3/speed.c index dc4a34bce5..72cd999c5f 100644 --- a/arch/arm/cpu/armv8/fsl-lsch3/speed.c +++ b/arch/arm/cpu/armv8/fsl-lsch3/speed.c @@ -77,8 +77,10 @@ void get_sys_info(struct sys_info *sys_info) sys_info->freq_systembus = sysclk; #ifdef CONFIG_DDR_CLK_FREQ sys_info->freq_ddrbus = CONFIG_DDR_CLK_FREQ; + sys_info->freq_ddrbus2 = CONFIG_DDR_CLK_FREQ; #else sys_info->freq_ddrbus = sysclk; + sys_info->freq_ddrbus2 = sysclk; #endif sys_info->freq_systembus *= (in_le32(&gur->rcwsr[0]) >> @@ -87,6 +89,9 @@ void get_sys_info(struct sys_info *sys_info) sys_info->freq_ddrbus *= (in_le32(&gur->rcwsr[0]) >> FSL_CHASSIS3_RCWSR0_MEM_PLL_RAT_SHIFT) & FSL_CHASSIS3_RCWSR0_MEM_PLL_RAT_MASK; + sys_info->freq_ddrbus2 *= (in_le32(&gur->rcwsr[0]) >> + FSL_CHASSIS3_RCWSR0_MEM2_PLL_RAT_SHIFT) & + FSL_CHASSIS3_RCWSR0_MEM2_PLL_RAT_MASK; for (i = 0; i < CONFIG_SYS_FSL_NUM_CC_PLLS; i++) { /* @@ -129,7 +134,7 @@ int get_clocks(void) gd->cpu_clk = sys_info.freq_processor[0]; gd->bus_clk = sys_info.freq_systembus; gd->mem_clk = sys_info.freq_ddrbus; - + gd->arch.mem2_clk = sys_info.freq_ddrbus2; #if defined(CONFIG_FSL_ESDHC) gd->arch.sdhc_clk = gd->bus_clk / 2; #endif /* defined(CONFIG_FSL_ESDHC) */ @@ -156,11 +161,18 @@ ulong get_bus_freq(ulong dummy) * get_ddr_freq * return ddr bus freq in Hz *********************************************/ -ulong get_ddr_freq(ulong dummy) +ulong get_ddr_freq(ulong ctrl_num) { if (!gd->mem_clk) get_clocks(); + /* + * DDR controller 0 & 1 are on memory complex 0 + * DDR controler 2 is on memory complext 1 + */ + if (ctrl_num >= 2) + return gd->arch.mem2_clk; + return gd->mem_clk; } |