2 * Copyright 2014 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
10 #include <asm/armv8/mmu.h>
12 #include <asm/arch-fsl-lsch3/immap_lsch3.h>
13 #include <fsl_debug_server.h>
14 #include <fsl-mc/fsl_mc.h>
19 DECLARE_GLOBAL_DATA_PTR;
21 #ifndef CONFIG_SYS_DCACHE_OFF
23 * To start MMU before DDR is available, we create MMU table in SRAM.
24 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
25 * levels of translation tables here to cover 40-bit address space.
26 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
27 * Level 0 IA[39], table address @0
28 * Level 1 IA[31:30], table address @0x1000, 0x2000
29 * Level 2 IA[29:21], table address @0x3000, 0x4000
30 * Address above 0x5000 is free for other purpose.
33 #define SECTION_SHIFT_L0 39UL
34 #define SECTION_SHIFT_L1 30UL
35 #define SECTION_SHIFT_L2 21UL
36 #define BLOCK_SIZE_L0 0x8000000000UL
37 #define BLOCK_SIZE_L1 (1 << SECTION_SHIFT_L1)
38 #define BLOCK_SIZE_L2 (1 << SECTION_SHIFT_L2)
39 #define CONFIG_SYS_IFC_BASE 0x30000000
40 #define CONFIG_SYS_IFC_SIZE 0x10000000
41 #define CONFIG_SYS_IFC_BASE2 0x500000000
42 #define CONFIG_SYS_IFC_SIZE2 0x100000000
43 #define TCR_EL2_PS_40BIT (2 << 16)
44 #define LSCH3_VA_BITS (40)
45 #define LSCH3_TCR (TCR_TG0_4K | \
50 TCR_T0SZ(LSCH3_VA_BITS))
54 * Let's start from the same layout as early MMU and modify as needed.
55 * IFC regions will be cache-inhibit.
57 #define FINAL_QBMAN_CACHED_MEM 0x818000000UL
58 #define FINAL_QBMAN_CACHED_SIZE 0x4000000
61 static inline void early_mmu_setup(void)
65 u64 section_l1t0, section_l1t1, section_l2t0, section_l2t1;
66 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
67 u64 *level1_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
68 u64 *level1_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
69 u64 *level2_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
70 u64 *level2_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
73 (u64)level1_table_0 | PMD_TYPE_TABLE;
75 (u64)level1_table_1 | PMD_TYPE_TABLE;
78 * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
79 * set level 1 table 1 to cache enabled, covering 512GB to 1TB
80 * set level 2 table to cache-inhibit, covering 0 to 1GB
83 section_l1t1 = BLOCK_SIZE_L0;
85 section_l2t1 = CONFIG_SYS_FLASH_BASE;
86 for (i = 0; i < 512; i++) {
87 set_pgtable_section(level1_table_0, i, section_l1t0,
89 set_pgtable_section(level1_table_1, i, section_l1t1,
91 set_pgtable_section(level2_table_0, i, section_l2t0,
93 set_pgtable_section(level2_table_1, i, section_l2t1,
95 section_l1t0 += BLOCK_SIZE_L1;
96 section_l1t1 += BLOCK_SIZE_L1;
97 section_l2t0 += BLOCK_SIZE_L2;
98 section_l2t1 += BLOCK_SIZE_L2;
102 (u64)level2_table_0 | PMD_TYPE_TABLE;
104 0x40000000 | PMD_SECT_AF | PMD_TYPE_SECT |
105 PMD_ATTRINDX(MT_DEVICE_NGNRNE);
107 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
108 PMD_ATTRINDX(MT_NORMAL);
110 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
111 PMD_ATTRINDX(MT_NORMAL);
113 /* Rewerite table to enable cache for OCRAM */
114 set_pgtable_section(level2_table_0,
115 CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
116 CONFIG_SYS_FSL_OCRAM_BASE,
119 #if defined(CONFIG_SYS_NOR0_CSPR_EARLY) && defined(CONFIG_SYS_NOR_AMASK_EARLY)
120 /* Rewrite table to enable cache for two entries (4MB) */
121 section_l2t1 = CONFIG_SYS_IFC_BASE;
122 set_pgtable_section(level2_table_0,
123 section_l2t1 >> SECTION_SHIFT_L2,
126 section_l2t1 += BLOCK_SIZE_L2;
127 set_pgtable_section(level2_table_0,
128 section_l2t1 >> SECTION_SHIFT_L2,
133 /* Create a mapping for 256MB IFC region to final flash location */
134 level1_table_0[CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1] =
135 (u64)level2_table_1 | PMD_TYPE_TABLE;
136 section_l2t1 = CONFIG_SYS_IFC_BASE;
137 for (i = 0; i < 0x10000000 >> SECTION_SHIFT_L2; i++) {
138 set_pgtable_section(level2_table_1, i,
139 section_l2t1, MT_DEVICE_NGNRNE);
140 section_l2t1 += BLOCK_SIZE_L2;
144 set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR, MEMORY_ATTRIBUTES);
145 set_sctlr(get_sctlr() | CR_M);
149 * This final tale looks similar to early table, but different in detail.
150 * These tables are in regular memory. Cache on IFC is disabled. One sub table
151 * is added to enable cache for QBMan.
153 static inline void final_mmu_setup(void)
156 u64 i, tbl_base, tbl_limit, section_base;
157 u64 section_l1t0, section_l1t1, section_l2;
158 u64 *level0_table = (u64 *)gd->arch.tlb_addr;
159 u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + 0x1000);
160 u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + 0x2000);
161 u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + 0x3000);
162 u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
166 (u64)level1_table_0 | PMD_TYPE_TABLE;
168 (u64)level1_table_1 | PMD_TYPE_TABLE;
171 * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
172 * set level 1 table 1 to cache enabled, covering 512GB to 1TB
173 * set level 2 table 0 to cache-inhibit, covering 0 to 1GB
176 section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE;
178 for (i = 0; i < 512; i++) {
179 set_pgtable_section(level1_table_0, i, section_l1t0,
181 set_pgtable_section(level1_table_1, i, section_l1t1,
183 set_pgtable_section(level2_table_0, i, section_l2,
185 section_l1t0 += BLOCK_SIZE_L1;
186 section_l1t1 += BLOCK_SIZE_L1;
187 section_l2 += BLOCK_SIZE_L2;
191 (u64)level2_table_0 | PMD_TYPE_TABLE;
193 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
194 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
196 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
197 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
199 /* Rewrite table to enable cache */
200 set_pgtable_section(level2_table_0,
201 CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
202 CONFIG_SYS_FSL_OCRAM_BASE,
206 * Fill in other part of tables if cache is needed
207 * If finer granularity than 1GB is needed, sub table
210 section_base = FINAL_QBMAN_CACHED_MEM & ~(BLOCK_SIZE_L1 - 1);
211 i = section_base >> SECTION_SHIFT_L1;
212 level1_table_0[i] = (u64)level2_table_1 | PMD_TYPE_TABLE;
213 section_l2 = section_base;
214 for (i = 0; i < 512; i++) {
215 set_pgtable_section(level2_table_1, i, section_l2,
217 section_l2 += BLOCK_SIZE_L2;
219 tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1);
220 tbl_limit = (FINAL_QBMAN_CACHED_MEM + FINAL_QBMAN_CACHED_SIZE) &
222 for (i = tbl_base >> SECTION_SHIFT_L2;
223 i < tbl_limit >> SECTION_SHIFT_L2; i++) {
224 section_l2 = section_base + (i << SECTION_SHIFT_L2);
225 set_pgtable_section(level2_table_1, i,
226 section_l2, MT_NORMAL);
229 /* flush new MMU table */
230 flush_dcache_range(gd->arch.tlb_addr,
231 gd->arch.tlb_addr + gd->arch.tlb_size);
233 /* point TTBR to the new table */
235 asm volatile("dsb sy");
237 asm volatile("msr ttbr0_el1, %0"
238 : : "r" ((u64)level0_table) : "memory");
239 } else if (el == 2) {
240 asm volatile("msr ttbr0_el2, %0"
241 : : "r" ((u64)level0_table) : "memory");
242 } else if (el == 3) {
243 asm volatile("msr ttbr0_el3, %0"
244 : : "r" ((u64)level0_table) : "memory");
251 * MMU is already enabled, just need to invalidate TLB to load the
252 * new table. The new table is compatible with the current table, if
253 * MMU somehow walks through the new table before invalidation TLB,
254 * it still works. So we don't need to turn off MMU here.
258 int arch_cpu_init(void)
261 __asm_invalidate_dcache_all();
262 __asm_invalidate_tlb_all();
264 set_sctlr(get_sctlr() | CR_C);
269 * This function is called from lib/board.c.
270 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
271 * There is no need to disable d-cache for this operation.
273 void enable_caches(void)
276 __asm_invalidate_tlb_all();
280 static inline u32 initiator_type(u32 cluster, int init_id)
282 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
283 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
284 u32 type = in_le32(&gur->tp_ityp[idx]);
286 if (type & TP_ITYP_AV)
294 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
295 int i = 0, count = 0;
296 u32 cluster, type, mask = 0;
300 cluster = in_le32(&gur->tp_cluster[i].lower);
301 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
302 type = initiator_type(cluster, j);
304 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
310 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
316 * Return the number of cores on this SOC.
318 int cpu_numcores(void)
320 return hweight32(cpu_mask());
323 int fsl_qoriq_core_to_cluster(unsigned int core)
325 struct ccsr_gur __iomem *gur =
326 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
327 int i = 0, count = 0;
332 cluster = in_le32(&gur->tp_cluster[i].lower);
333 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
334 if (initiator_type(cluster, j)) {
341 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
343 return -1; /* cannot identify the cluster */
346 u32 fsl_qoriq_core_to_type(unsigned int core)
348 struct ccsr_gur __iomem *gur =
349 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
350 int i = 0, count = 0;
355 cluster = in_le32(&gur->tp_cluster[i].lower);
356 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
357 type = initiator_type(cluster, j);
365 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
367 return -1; /* cannot identify the cluster */
370 #ifdef CONFIG_DISPLAY_CPUINFO
371 int print_cpuinfo(void)
373 struct sys_info sysinfo;
375 unsigned int i, core;
378 get_sys_info(&sysinfo);
379 puts("Clock Configuration:");
380 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
383 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
384 printf("CPU%d(%s):%-4s MHz ", core,
385 type == TY_ITYP_VER_A7 ? "A7 " :
386 (type == TY_ITYP_VER_A53 ? "A53" :
387 (type == TY_ITYP_VER_A57 ? "A57" : " ")),
388 strmhz(buf, sysinfo.freq_processor[core]));
390 printf("\n Bus: %-4s MHz ",
391 strmhz(buf, sysinfo.freq_systembus));
392 printf("DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus));
393 printf(" DP-DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus2));
400 int cpu_eth_init(bd_t *bis)
404 #ifdef CONFIG_FSL_MC_ENET
405 error = fsl_mc_ldpaa_init(bis);
410 int arch_early_init_r(void)
413 rv = fsl_lsch3_wake_seconday_cores();
416 printf("Did not wake secondary cores\n");
423 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
424 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
425 #ifdef COUNTER_FREQUENCY_REAL
426 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
428 /* Update with accurate clock frequency */
429 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
432 /* Enable timebase for all clusters.
433 * It is safe to do so even some clusters are not enabled.
435 out_le32(cltbenr, 0xf);
437 /* Enable clock for timer
438 * This is a global setting.
440 out_le32(cntcr, 0x1);
445 void reset_cpu(ulong addr)
447 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
450 /* Raise RESET_REQ_B */
451 val = in_le32(rstcr);
453 out_le32(rstcr, val);