]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - arch/arm/cpu/armv8/fsl-lsch3/cpu.c
armv8/fsl-ch3: Add support to print SoC personality
[karo-tx-uboot.git] / arch / arm / cpu / armv8 / fsl-lsch3 / cpu.c
1 /*
2  * Copyright 2014 Freescale Semiconductor, Inc.
3  *
4  * SPDX-License-Identifier:     GPL-2.0+
5  */
6
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/system.h>
10 #include <asm/armv8/mmu.h>
11 #include <asm/io.h>
12 #include <asm/arch-fsl-lsch3/soc.h>
13 #include <asm/arch-fsl-lsch3/immap_lsch3.h>
14 #include <fsl_debug_server.h>
15 #include <fsl-mc/fsl_mc.h>
16 #include <asm/arch/fsl_serdes.h>
17 #ifdef CONFIG_FSL_ESDHC
18 #include <fsl_esdhc.h>
19 #endif
20 #include "cpu.h"
21 #include "mp.h"
22 #include "speed.h"
23
24 DECLARE_GLOBAL_DATA_PTR;
25
26 static struct cpu_type cpu_type_list[] = {
27 #ifdef CONFIG_LS2085A
28         CPU_TYPE_ENTRY(LS2085, LS2085, 8),
29         CPU_TYPE_ENTRY(LS2080, LS2080, 8),
30         CPU_TYPE_ENTRY(LS2045, LS2045, 4),
31 #endif
32 };
33
34 void cpu_name(char *name)
35 {
36         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
37         unsigned int i, svr, ver;
38
39         svr = in_le32(&gur->svr);
40         ver = SVR_SOC_VER(svr);
41
42         for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
43                 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
44                         strcpy(name, cpu_type_list[i].name);
45
46                         if (IS_E_PROCESSOR(svr))
47                                 strcat(name, "E");
48                         break;
49                 }
50
51         if (i == ARRAY_SIZE(cpu_type_list))
52                 strcpy(name, "unknown");
53 }
54
55 #ifndef CONFIG_SYS_DCACHE_OFF
56 /*
57  * To start MMU before DDR is available, we create MMU table in SRAM.
58  * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
59  * levels of translation tables here to cover 40-bit address space.
60  * We use 4KB granule size, with 40 bits physical address, T0SZ=24
61  * Level 0 IA[39], table address @0
62  * Level 1 IA[31:30], table address @0x1000, 0x2000
63  * Level 2 IA[29:21], table address @0x3000, 0x4000
64  * Address above 0x5000 is free for other purpose.
65  */
66
67 #define SECTION_SHIFT_L0        39UL
68 #define SECTION_SHIFT_L1        30UL
69 #define SECTION_SHIFT_L2        21UL
70 #define BLOCK_SIZE_L0           0x8000000000UL
71 #define BLOCK_SIZE_L1           (1 << SECTION_SHIFT_L1)
72 #define BLOCK_SIZE_L2           (1 << SECTION_SHIFT_L2)
73 #define CONFIG_SYS_IFC_BASE     0x30000000
74 #define CONFIG_SYS_IFC_SIZE     0x10000000
75 #define CONFIG_SYS_IFC_BASE2    0x500000000
76 #define CONFIG_SYS_IFC_SIZE2    0x100000000
77 #define TCR_EL2_PS_40BIT        (2 << 16)
78 #define LSCH3_VA_BITS           (40)
79 #define LSCH3_TCR       (TCR_TG0_4K             | \
80                         TCR_EL2_PS_40BIT        | \
81                         TCR_SHARED_NON          | \
82                         TCR_ORGN_NC             | \
83                         TCR_IRGN_NC             | \
84                         TCR_T0SZ(LSCH3_VA_BITS))
85
86 /*
87  * Final MMU
88  * Let's start from the same layout as early MMU and modify as needed.
89  * IFC regions will be cache-inhibit.
90  */
91 #define FINAL_QBMAN_CACHED_MEM  0x818000000UL
92 #define FINAL_QBMAN_CACHED_SIZE 0x4000000
93
94
95 static inline void early_mmu_setup(void)
96 {
97         int el;
98         u64 i;
99         u64 section_l1t0, section_l1t1, section_l2t0, section_l2t1;
100         u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
101         u64 *level1_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
102         u64 *level1_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
103         u64 *level2_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
104         u64 *level2_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
105
106         level0_table[0] =
107                 (u64)level1_table_0 | PMD_TYPE_TABLE;
108         level0_table[1] =
109                 (u64)level1_table_1 | PMD_TYPE_TABLE;
110
111         /*
112          * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
113          * set level 1 table 1 to cache enabled, covering 512GB to 1TB
114          * set level 2 table to cache-inhibit, covering 0 to 1GB
115          */
116         section_l1t0 = 0;
117         section_l1t1 = BLOCK_SIZE_L0;
118         section_l2t0 = 0;
119         section_l2t1 = CONFIG_SYS_FLASH_BASE;
120         for (i = 0; i < 512; i++) {
121                 set_pgtable_section(level1_table_0, i, section_l1t0,
122                                     MT_DEVICE_NGNRNE);
123                 set_pgtable_section(level1_table_1, i, section_l1t1,
124                                     MT_NORMAL);
125                 set_pgtable_section(level2_table_0, i, section_l2t0,
126                                     MT_DEVICE_NGNRNE);
127                 set_pgtable_section(level2_table_1, i, section_l2t1,
128                                     MT_DEVICE_NGNRNE);
129                 section_l1t0 += BLOCK_SIZE_L1;
130                 section_l1t1 += BLOCK_SIZE_L1;
131                 section_l2t0 += BLOCK_SIZE_L2;
132                 section_l2t1 += BLOCK_SIZE_L2;
133         }
134
135         level1_table_0[0] =
136                 (u64)level2_table_0 | PMD_TYPE_TABLE;
137         level1_table_0[1] =
138                 0x40000000 | PMD_SECT_AF | PMD_TYPE_SECT |
139                 PMD_ATTRINDX(MT_DEVICE_NGNRNE);
140         level1_table_0[2] =
141                 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
142                 PMD_ATTRINDX(MT_NORMAL);
143         level1_table_0[3] =
144                 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
145                 PMD_ATTRINDX(MT_NORMAL);
146
147         /* Rewerite table to enable cache for OCRAM */
148         set_pgtable_section(level2_table_0,
149                             CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
150                             CONFIG_SYS_FSL_OCRAM_BASE,
151                             MT_NORMAL);
152
153 #if defined(CONFIG_SYS_NOR0_CSPR_EARLY) && defined(CONFIG_SYS_NOR_AMASK_EARLY)
154         /* Rewrite table to enable cache for two entries (4MB) */
155         section_l2t1 = CONFIG_SYS_IFC_BASE;
156         set_pgtable_section(level2_table_0,
157                             section_l2t1 >> SECTION_SHIFT_L2,
158                             section_l2t1,
159                             MT_NORMAL);
160         section_l2t1 += BLOCK_SIZE_L2;
161         set_pgtable_section(level2_table_0,
162                             section_l2t1 >> SECTION_SHIFT_L2,
163                             section_l2t1,
164                             MT_NORMAL);
165 #endif
166
167         /* Create a mapping for 256MB IFC region to final flash location */
168         level1_table_0[CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1] =
169                 (u64)level2_table_1 | PMD_TYPE_TABLE;
170         section_l2t1 = CONFIG_SYS_IFC_BASE;
171         for (i = 0; i < 0x10000000 >> SECTION_SHIFT_L2; i++) {
172                 set_pgtable_section(level2_table_1, i,
173                                     section_l2t1, MT_DEVICE_NGNRNE);
174                 section_l2t1 += BLOCK_SIZE_L2;
175         }
176
177         el = current_el();
178         set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR, MEMORY_ATTRIBUTES);
179         set_sctlr(get_sctlr() | CR_M);
180 }
181
182 /*
183  * This final tale looks similar to early table, but different in detail.
184  * These tables are in regular memory. Cache on IFC is disabled. One sub table
185  * is added to enable cache for QBMan.
186  */
187 static inline void final_mmu_setup(void)
188 {
189         int el;
190         u64 i, tbl_base, tbl_limit, section_base;
191         u64 section_l1t0, section_l1t1, section_l2;
192         u64 *level0_table = (u64 *)gd->arch.tlb_addr;
193         u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + 0x1000);
194         u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + 0x2000);
195         u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + 0x3000);
196         u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
197
198
199         level0_table[0] =
200                 (u64)level1_table_0 | PMD_TYPE_TABLE;
201         level0_table[1] =
202                 (u64)level1_table_1 | PMD_TYPE_TABLE;
203
204         /*
205          * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
206          * set level 1 table 1 to cache enabled, covering 512GB to 1TB
207          * set level 2 table 0 to cache-inhibit, covering 0 to 1GB
208          */
209         section_l1t0 = 0;
210         section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE;
211         section_l2 = 0;
212         for (i = 0; i < 512; i++) {
213                 set_pgtable_section(level1_table_0, i, section_l1t0,
214                                     MT_DEVICE_NGNRNE);
215                 set_pgtable_section(level1_table_1, i, section_l1t1,
216                                     MT_NORMAL);
217                 set_pgtable_section(level2_table_0, i, section_l2,
218                                     MT_DEVICE_NGNRNE);
219                 section_l1t0 += BLOCK_SIZE_L1;
220                 section_l1t1 += BLOCK_SIZE_L1;
221                 section_l2 += BLOCK_SIZE_L2;
222         }
223
224         level1_table_0[0] =
225                 (u64)level2_table_0 | PMD_TYPE_TABLE;
226         level1_table_0[2] =
227                 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
228                 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
229         level1_table_0[3] =
230                 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
231                 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
232
233         /* Rewrite table to enable cache */
234         set_pgtable_section(level2_table_0,
235                             CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
236                             CONFIG_SYS_FSL_OCRAM_BASE,
237                             MT_NORMAL);
238
239         /*
240          * Fill in other part of tables if cache is needed
241          * If finer granularity than 1GB is needed, sub table
242          * should be created.
243          */
244         section_base = FINAL_QBMAN_CACHED_MEM & ~(BLOCK_SIZE_L1 - 1);
245         i = section_base >> SECTION_SHIFT_L1;
246         level1_table_0[i] = (u64)level2_table_1 | PMD_TYPE_TABLE;
247         section_l2 = section_base;
248         for (i = 0; i < 512; i++) {
249                 set_pgtable_section(level2_table_1, i, section_l2,
250                                     MT_DEVICE_NGNRNE);
251                 section_l2 += BLOCK_SIZE_L2;
252         }
253         tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1);
254         tbl_limit = (FINAL_QBMAN_CACHED_MEM + FINAL_QBMAN_CACHED_SIZE) &
255                     (BLOCK_SIZE_L1 - 1);
256         for (i = tbl_base >> SECTION_SHIFT_L2;
257              i < tbl_limit >> SECTION_SHIFT_L2; i++) {
258                 section_l2 = section_base + (i << SECTION_SHIFT_L2);
259                 set_pgtable_section(level2_table_1, i,
260                                     section_l2, MT_NORMAL);
261         }
262
263         /* flush new MMU table */
264         flush_dcache_range(gd->arch.tlb_addr,
265                            gd->arch.tlb_addr +  gd->arch.tlb_size);
266
267         /* point TTBR to the new table */
268         el = current_el();
269         asm volatile("dsb sy");
270         if (el == 1) {
271                 asm volatile("msr ttbr0_el1, %0"
272                              : : "r" ((u64)level0_table) : "memory");
273         } else if (el == 2) {
274                 asm volatile("msr ttbr0_el2, %0"
275                              : : "r" ((u64)level0_table) : "memory");
276         } else if (el == 3) {
277                 asm volatile("msr ttbr0_el3, %0"
278                              : : "r" ((u64)level0_table) : "memory");
279         } else {
280                 hang();
281         }
282         asm volatile("isb");
283
284         /*
285          * MMU is already enabled, just need to invalidate TLB to load the
286          * new table. The new table is compatible with the current table, if
287          * MMU somehow walks through the new table before invalidation TLB,
288          * it still works. So we don't need to turn off MMU here.
289          */
290 }
291
292 int arch_cpu_init(void)
293 {
294         icache_enable();
295         __asm_invalidate_dcache_all();
296         __asm_invalidate_tlb_all();
297         early_mmu_setup();
298         set_sctlr(get_sctlr() | CR_C);
299         return 0;
300 }
301
302 /*
303  * This function is called from lib/board.c.
304  * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
305  * There is no need to disable d-cache for this operation.
306  */
307 void enable_caches(void)
308 {
309         final_mmu_setup();
310         __asm_invalidate_tlb_all();
311 }
312 #endif
313
314 static inline u32 initiator_type(u32 cluster, int init_id)
315 {
316         struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
317         u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
318         u32 type = in_le32(&gur->tp_ityp[idx]);
319
320         if (type & TP_ITYP_AV)
321                 return type;
322
323         return 0;
324 }
325
326 u32 cpu_mask(void)
327 {
328         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
329         int i = 0, count = 0;
330         u32 cluster, type, mask = 0;
331
332         do {
333                 int j;
334                 cluster = in_le32(&gur->tp_cluster[i].lower);
335                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
336                         type = initiator_type(cluster, j);
337                         if (type) {
338                                 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
339                                         mask |= 1 << count;
340                                 count++;
341                         }
342                 }
343                 i++;
344         } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
345
346         return mask;
347 }
348
349 /*
350  * Return the number of cores on this SOC.
351  */
352 int cpu_numcores(void)
353 {
354         return hweight32(cpu_mask());
355 }
356
357 int fsl_qoriq_core_to_cluster(unsigned int core)
358 {
359         struct ccsr_gur __iomem *gur =
360                 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
361         int i = 0, count = 0;
362         u32 cluster;
363
364         do {
365                 int j;
366                 cluster = in_le32(&gur->tp_cluster[i].lower);
367                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
368                         if (initiator_type(cluster, j)) {
369                                 if (count == core)
370                                         return i;
371                                 count++;
372                         }
373                 }
374                 i++;
375         } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
376
377         return -1;      /* cannot identify the cluster */
378 }
379
380 u32 fsl_qoriq_core_to_type(unsigned int core)
381 {
382         struct ccsr_gur __iomem *gur =
383                 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
384         int i = 0, count = 0;
385         u32 cluster, type;
386
387         do {
388                 int j;
389                 cluster = in_le32(&gur->tp_cluster[i].lower);
390                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
391                         type = initiator_type(cluster, j);
392                         if (type) {
393                                 if (count == core)
394                                         return type;
395                                 count++;
396                         }
397                 }
398                 i++;
399         } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
400
401         return -1;      /* cannot identify the cluster */
402 }
403
404 #ifdef CONFIG_DISPLAY_CPUINFO
405 int print_cpuinfo(void)
406 {
407         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
408         struct sys_info sysinfo;
409         char buf[32];
410         unsigned int i, core;
411         u32 type;
412
413         puts("SoC: ");
414
415         cpu_name(buf);
416         printf(" %s (0x%x)\n", buf, in_le32(&gur->svr));
417
418         memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
419
420         get_sys_info(&sysinfo);
421         puts("Clock Configuration:");
422         for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
423                 if (!(i % 3))
424                         puts("\n       ");
425                 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
426                 printf("CPU%d(%s):%-4s MHz  ", core,
427                        type == TY_ITYP_VER_A7 ? "A7 " :
428                        (type == TY_ITYP_VER_A53 ? "A53" :
429                         (type == TY_ITYP_VER_A57 ? "A57" : "   ")),
430                        strmhz(buf, sysinfo.freq_processor[core]));
431         }
432         printf("\n       Bus:      %-4s MHz  ",
433                strmhz(buf, sysinfo.freq_systembus));
434         printf("DDR:      %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
435         printf("     DP-DDR:   %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus2));
436         puts("\n");
437
438         /* Display the RCW, so that no one gets confused as to what RCW
439          * we're actually using for this boot.
440          */
441         puts("Reset Configuration Word (RCW):");
442         for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
443                 u32 rcw = in_le32(&gur->rcwsr[i]);
444
445                 if ((i % 4) == 0)
446                         printf("\n       %02x:", i * 4);
447                 printf(" %08x", rcw);
448         }
449         puts("\n");
450
451         return 0;
452 }
453 #endif
454
455 #ifdef CONFIG_FSL_ESDHC
456 int cpu_mmc_init(bd_t *bis)
457 {
458         return fsl_esdhc_mmc_init(bis);
459 }
460 #endif
461
462 int cpu_eth_init(bd_t *bis)
463 {
464         int error = 0;
465
466 #ifdef CONFIG_FSL_MC_ENET
467         error = fsl_mc_ldpaa_init(bis);
468 #endif
469         return error;
470 }
471
472 int arch_early_init_r(void)
473 {
474         int rv;
475         rv = fsl_lsch3_wake_seconday_cores();
476
477         if (rv)
478                 printf("Did not wake secondary cores\n");
479
480 #ifdef CONFIG_SYS_HAS_SERDES
481         fsl_serdes_init();
482 #endif
483         return 0;
484 }
485
486 int timer_init(void)
487 {
488         u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
489         u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
490 #ifdef COUNTER_FREQUENCY_REAL
491         unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
492
493         /* Update with accurate clock frequency */
494         asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
495 #endif
496
497         /* Enable timebase for all clusters.
498          * It is safe to do so even some clusters are not enabled.
499          */
500         out_le32(cltbenr, 0xf);
501
502         /* Enable clock for timer
503          * This is a global setting.
504          */
505         out_le32(cntcr, 0x1);
506
507         return 0;
508 }
509
510 void reset_cpu(ulong addr)
511 {
512         u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
513         u32 val;
514
515         /* Raise RESET_REQ_B */
516         val = in_le32(rstcr);
517         val |= 0x02;
518         out_le32(rstcr, val);
519 }