]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - arch/arm/cpu/armv8/fsl-lsch3/cpu.c
Merge branch 'master' of git://www.denx.de/git/u-boot-imx
[karo-tx-uboot.git] / arch / arm / cpu / armv8 / fsl-lsch3 / cpu.c
1 /*
2  * Copyright 2014 Freescale Semiconductor, Inc.
3  *
4  * SPDX-License-Identifier:     GPL-2.0+
5  */
6
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/system.h>
10 #include <asm/armv8/mmu.h>
11 #include <asm/io.h>
12 #include <asm/arch-fsl-lsch3/soc.h>
13 #include <asm/arch-fsl-lsch3/immap_lsch3.h>
14 #include <fsl_debug_server.h>
15 #include <fsl-mc/fsl_mc.h>
16 #include <asm/arch/fsl_serdes.h>
17 #ifdef CONFIG_FSL_ESDHC
18 #include <fsl_esdhc.h>
19 #endif
20 #include "cpu.h"
21 #include "mp.h"
22 #include "speed.h"
23
24 DECLARE_GLOBAL_DATA_PTR;
25
26 static struct cpu_type cpu_type_list[] = {
27 #ifdef CONFIG_LS2085A
28         CPU_TYPE_ENTRY(LS2085, LS2085, 8),
29         CPU_TYPE_ENTRY(LS2080, LS2080, 8),
30         CPU_TYPE_ENTRY(LS2045, LS2045, 4),
31 #endif
32 };
33
34 void cpu_name(char *name)
35 {
36         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
37         unsigned int i, svr, ver;
38
39         svr = in_le32(&gur->svr);
40         ver = SVR_SOC_VER(svr);
41
42         for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
43                 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
44                         strcpy(name, cpu_type_list[i].name);
45
46                         if (IS_E_PROCESSOR(svr))
47                                 strcat(name, "E");
48                         break;
49                 }
50
51         if (i == ARRAY_SIZE(cpu_type_list))
52                 strcpy(name, "unknown");
53 }
54
55 #ifndef CONFIG_SYS_DCACHE_OFF
56 /*
57  * To start MMU before DDR is available, we create MMU table in SRAM.
58  * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
59  * levels of translation tables here to cover 40-bit address space.
60  * We use 4KB granule size, with 40 bits physical address, T0SZ=24
61  * Level 0 IA[39], table address @0
62  * Level 1 IA[31:30], table address @0x1000, 0x2000
63  * Level 2 IA[29:21], table address @0x3000, 0x4000
64  * Address above 0x5000 is free for other purpose.
65  */
66
67 #define SECTION_SHIFT_L0        39UL
68 #define SECTION_SHIFT_L1        30UL
69 #define SECTION_SHIFT_L2        21UL
70 #define BLOCK_SIZE_L0           0x8000000000UL
71 #define BLOCK_SIZE_L1           (1 << SECTION_SHIFT_L1)
72 #define BLOCK_SIZE_L2           (1 << SECTION_SHIFT_L2)
73 #define CONFIG_SYS_IFC_BASE     0x30000000
74 #define CONFIG_SYS_IFC_SIZE     0x10000000
75 #define CONFIG_SYS_IFC_BASE2    0x500000000
76 #define CONFIG_SYS_IFC_SIZE2    0x100000000
77 #define TCR_EL2_PS_40BIT        (2 << 16)
78 #define LSCH3_VA_BITS           (40)
79 #define LSCH3_TCR       (TCR_TG0_4K             | \
80                         TCR_EL2_PS_40BIT        | \
81                         TCR_SHARED_NON          | \
82                         TCR_ORGN_NC             | \
83                         TCR_IRGN_NC             | \
84                         TCR_T0SZ(LSCH3_VA_BITS))
85 #define LSCH3_TCR_FINAL (TCR_TG0_4K             | \
86                         TCR_EL2_PS_40BIT        | \
87                         TCR_SHARED_OUTER        | \
88                         TCR_ORGN_WBWA           | \
89                         TCR_IRGN_WBWA           | \
90                         TCR_T0SZ(LSCH3_VA_BITS))
91
92 /*
93  * Final MMU
94  * Let's start from the same layout as early MMU and modify as needed.
95  * IFC regions will be cache-inhibit.
96  */
97 #define FINAL_QBMAN_CACHED_MEM  0x818000000UL
98 #define FINAL_QBMAN_CACHED_SIZE 0x4000000
99
100
101 static inline void early_mmu_setup(void)
102 {
103         int el;
104         u64 i;
105         u64 section_l1t0, section_l1t1, section_l2t0, section_l2t1;
106         u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
107         u64 *level1_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
108         u64 *level1_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
109         u64 *level2_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
110         u64 *level2_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000);
111
112         level0_table[0] =
113                 (u64)level1_table_0 | PMD_TYPE_TABLE;
114         level0_table[1] =
115                 (u64)level1_table_1 | PMD_TYPE_TABLE;
116
117         /*
118          * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
119          * set level 1 table 1 to cache enabled, covering 512GB to 1TB
120          * set level 2 table to cache-inhibit, covering 0 to 1GB
121          */
122         section_l1t0 = 0;
123         section_l1t1 = BLOCK_SIZE_L0;
124         section_l2t0 = 0;
125         section_l2t1 = CONFIG_SYS_FLASH_BASE;
126         for (i = 0; i < 512; i++) {
127                 set_pgtable_section(level1_table_0, i, section_l1t0,
128                                     MT_DEVICE_NGNRNE);
129                 set_pgtable_section(level1_table_1, i, section_l1t1,
130                                     MT_NORMAL);
131                 set_pgtable_section(level2_table_0, i, section_l2t0,
132                                     MT_DEVICE_NGNRNE);
133                 set_pgtable_section(level2_table_1, i, section_l2t1,
134                                     MT_DEVICE_NGNRNE);
135                 section_l1t0 += BLOCK_SIZE_L1;
136                 section_l1t1 += BLOCK_SIZE_L1;
137                 section_l2t0 += BLOCK_SIZE_L2;
138                 section_l2t1 += BLOCK_SIZE_L2;
139         }
140
141         level1_table_0[0] =
142                 (u64)level2_table_0 | PMD_TYPE_TABLE;
143         level1_table_0[1] =
144                 0x40000000 | PMD_SECT_AF | PMD_TYPE_SECT |
145                 PMD_ATTRINDX(MT_DEVICE_NGNRNE);
146         level1_table_0[2] =
147                 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
148                 PMD_ATTRINDX(MT_NORMAL);
149         level1_table_0[3] =
150                 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
151                 PMD_ATTRINDX(MT_NORMAL);
152
153         /* Rewerite table to enable cache for OCRAM */
154         set_pgtable_section(level2_table_0,
155                             CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
156                             CONFIG_SYS_FSL_OCRAM_BASE,
157                             MT_NORMAL);
158
159 #if defined(CONFIG_SYS_NOR0_CSPR_EARLY) && defined(CONFIG_SYS_NOR_AMASK_EARLY)
160         /* Rewrite table to enable cache for two entries (4MB) */
161         section_l2t1 = CONFIG_SYS_IFC_BASE;
162         set_pgtable_section(level2_table_0,
163                             section_l2t1 >> SECTION_SHIFT_L2,
164                             section_l2t1,
165                             MT_NORMAL);
166         section_l2t1 += BLOCK_SIZE_L2;
167         set_pgtable_section(level2_table_0,
168                             section_l2t1 >> SECTION_SHIFT_L2,
169                             section_l2t1,
170                             MT_NORMAL);
171 #endif
172
173         /* Create a mapping for 256MB IFC region to final flash location */
174         level1_table_0[CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1] =
175                 (u64)level2_table_1 | PMD_TYPE_TABLE;
176         section_l2t1 = CONFIG_SYS_IFC_BASE;
177         for (i = 0; i < 0x10000000 >> SECTION_SHIFT_L2; i++) {
178                 set_pgtable_section(level2_table_1, i,
179                                     section_l2t1, MT_DEVICE_NGNRNE);
180                 section_l2t1 += BLOCK_SIZE_L2;
181         }
182
183         el = current_el();
184         set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR, MEMORY_ATTRIBUTES);
185         set_sctlr(get_sctlr() | CR_M);
186 }
187
188 /*
189  * This final tale looks similar to early table, but different in detail.
190  * These tables are in regular memory. Cache on IFC is disabled. One sub table
191  * is added to enable cache for QBMan.
192  */
193 static inline void final_mmu_setup(void)
194 {
195         int el;
196         u64 i, tbl_base, tbl_limit, section_base;
197         u64 section_l1t0, section_l1t1, section_l2;
198         u64 *level0_table = (u64 *)gd->arch.tlb_addr;
199         u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + 0x1000);
200         u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + 0x2000);
201         u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + 0x3000);
202         u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
203
204
205         level0_table[0] =
206                 (u64)level1_table_0 | PMD_TYPE_TABLE;
207         level0_table[1] =
208                 (u64)level1_table_1 | PMD_TYPE_TABLE;
209
210         /*
211          * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
212          * set level 1 table 1 to cache enabled, covering 512GB to 1TB
213          * set level 2 table 0 to cache-inhibit, covering 0 to 1GB
214          */
215         section_l1t0 = 0;
216         section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE;
217         section_l2 = 0;
218         for (i = 0; i < 512; i++) {
219                 set_pgtable_section(level1_table_0, i, section_l1t0,
220                                     MT_DEVICE_NGNRNE);
221                 set_pgtable_section(level1_table_1, i, section_l1t1,
222                                     MT_NORMAL);
223                 set_pgtable_section(level2_table_0, i, section_l2,
224                                     MT_DEVICE_NGNRNE);
225                 section_l1t0 += BLOCK_SIZE_L1;
226                 section_l1t1 += BLOCK_SIZE_L1;
227                 section_l2 += BLOCK_SIZE_L2;
228         }
229
230         level1_table_0[0] =
231                 (u64)level2_table_0 | PMD_TYPE_TABLE;
232         level1_table_0[2] =
233                 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
234                 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
235         level1_table_0[3] =
236                 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
237                 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
238
239         /* Rewrite table to enable cache */
240         set_pgtable_section(level2_table_0,
241                             CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
242                             CONFIG_SYS_FSL_OCRAM_BASE,
243                             MT_NORMAL);
244
245         /*
246          * Fill in other part of tables if cache is needed
247          * If finer granularity than 1GB is needed, sub table
248          * should be created.
249          */
250         section_base = FINAL_QBMAN_CACHED_MEM & ~(BLOCK_SIZE_L1 - 1);
251         i = section_base >> SECTION_SHIFT_L1;
252         level1_table_0[i] = (u64)level2_table_1 | PMD_TYPE_TABLE;
253         section_l2 = section_base;
254         for (i = 0; i < 512; i++) {
255                 set_pgtable_section(level2_table_1, i, section_l2,
256                                     MT_DEVICE_NGNRNE);
257                 section_l2 += BLOCK_SIZE_L2;
258         }
259         tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1);
260         tbl_limit = (FINAL_QBMAN_CACHED_MEM + FINAL_QBMAN_CACHED_SIZE) &
261                     (BLOCK_SIZE_L1 - 1);
262         for (i = tbl_base >> SECTION_SHIFT_L2;
263              i < tbl_limit >> SECTION_SHIFT_L2; i++) {
264                 section_l2 = section_base + (i << SECTION_SHIFT_L2);
265                 set_pgtable_section(level2_table_1, i,
266                                     section_l2, MT_NORMAL);
267         }
268
269         /* flush new MMU table */
270         flush_dcache_range(gd->arch.tlb_addr,
271                            gd->arch.tlb_addr +  gd->arch.tlb_size);
272
273         /* point TTBR to the new table */
274         el = current_el();
275         set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR_FINAL,
276                           MEMORY_ATTRIBUTES);
277         /*
278          * MMU is already enabled, just need to invalidate TLB to load the
279          * new table. The new table is compatible with the current table, if
280          * MMU somehow walks through the new table before invalidation TLB,
281          * it still works. So we don't need to turn off MMU here.
282          */
283 }
284
285 int arch_cpu_init(void)
286 {
287         icache_enable();
288         __asm_invalidate_dcache_all();
289         __asm_invalidate_tlb_all();
290         early_mmu_setup();
291         set_sctlr(get_sctlr() | CR_C);
292         return 0;
293 }
294
295 /*
296  * This function is called from lib/board.c.
297  * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
298  * There is no need to disable d-cache for this operation.
299  */
300 void enable_caches(void)
301 {
302         final_mmu_setup();
303         __asm_invalidate_tlb_all();
304 }
305 #endif
306
307 static inline u32 initiator_type(u32 cluster, int init_id)
308 {
309         struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
310         u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
311         u32 type = in_le32(&gur->tp_ityp[idx]);
312
313         if (type & TP_ITYP_AV)
314                 return type;
315
316         return 0;
317 }
318
319 u32 cpu_mask(void)
320 {
321         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
322         int i = 0, count = 0;
323         u32 cluster, type, mask = 0;
324
325         do {
326                 int j;
327                 cluster = in_le32(&gur->tp_cluster[i].lower);
328                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
329                         type = initiator_type(cluster, j);
330                         if (type) {
331                                 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
332                                         mask |= 1 << count;
333                                 count++;
334                         }
335                 }
336                 i++;
337         } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
338
339         return mask;
340 }
341
342 /*
343  * Return the number of cores on this SOC.
344  */
345 int cpu_numcores(void)
346 {
347         return hweight32(cpu_mask());
348 }
349
350 int fsl_qoriq_core_to_cluster(unsigned int core)
351 {
352         struct ccsr_gur __iomem *gur =
353                 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
354         int i = 0, count = 0;
355         u32 cluster;
356
357         do {
358                 int j;
359                 cluster = in_le32(&gur->tp_cluster[i].lower);
360                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
361                         if (initiator_type(cluster, j)) {
362                                 if (count == core)
363                                         return i;
364                                 count++;
365                         }
366                 }
367                 i++;
368         } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
369
370         return -1;      /* cannot identify the cluster */
371 }
372
373 u32 fsl_qoriq_core_to_type(unsigned int core)
374 {
375         struct ccsr_gur __iomem *gur =
376                 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
377         int i = 0, count = 0;
378         u32 cluster, type;
379
380         do {
381                 int j;
382                 cluster = in_le32(&gur->tp_cluster[i].lower);
383                 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
384                         type = initiator_type(cluster, j);
385                         if (type) {
386                                 if (count == core)
387                                         return type;
388                                 count++;
389                         }
390                 }
391                 i++;
392         } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
393
394         return -1;      /* cannot identify the cluster */
395 }
396
397 #ifdef CONFIG_DISPLAY_CPUINFO
398 int print_cpuinfo(void)
399 {
400         struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
401         struct sys_info sysinfo;
402         char buf[32];
403         unsigned int i, core;
404         u32 type;
405
406         puts("SoC: ");
407
408         cpu_name(buf);
409         printf(" %s (0x%x)\n", buf, in_le32(&gur->svr));
410
411         memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
412
413         get_sys_info(&sysinfo);
414         puts("Clock Configuration:");
415         for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
416                 if (!(i % 3))
417                         puts("\n       ");
418                 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
419                 printf("CPU%d(%s):%-4s MHz  ", core,
420                        type == TY_ITYP_VER_A7 ? "A7 " :
421                        (type == TY_ITYP_VER_A53 ? "A53" :
422                         (type == TY_ITYP_VER_A57 ? "A57" : "   ")),
423                        strmhz(buf, sysinfo.freq_processor[core]));
424         }
425         printf("\n       Bus:      %-4s MHz  ",
426                strmhz(buf, sysinfo.freq_systembus));
427         printf("DDR:      %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
428         printf("     DP-DDR:   %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus2));
429         puts("\n");
430
431         /* Display the RCW, so that no one gets confused as to what RCW
432          * we're actually using for this boot.
433          */
434         puts("Reset Configuration Word (RCW):");
435         for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
436                 u32 rcw = in_le32(&gur->rcwsr[i]);
437
438                 if ((i % 4) == 0)
439                         printf("\n       %02x:", i * 4);
440                 printf(" %08x", rcw);
441         }
442         puts("\n");
443
444         return 0;
445 }
446 #endif
447
448 #ifdef CONFIG_FSL_ESDHC
449 int cpu_mmc_init(bd_t *bis)
450 {
451         return fsl_esdhc_mmc_init(bis);
452 }
453 #endif
454
455 int cpu_eth_init(bd_t *bis)
456 {
457         int error = 0;
458
459 #ifdef CONFIG_FSL_MC_ENET
460         error = fsl_mc_ldpaa_init(bis);
461 #endif
462         return error;
463 }
464
465 int arch_early_init_r(void)
466 {
467         int rv;
468         rv = fsl_lsch3_wake_seconday_cores();
469
470         if (rv)
471                 printf("Did not wake secondary cores\n");
472
473 #ifdef CONFIG_SYS_HAS_SERDES
474         fsl_serdes_init();
475 #endif
476         return 0;
477 }
478
479 int timer_init(void)
480 {
481         u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
482         u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
483 #ifdef COUNTER_FREQUENCY_REAL
484         unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
485
486         /* Update with accurate clock frequency */
487         asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
488 #endif
489
490         /* Enable timebase for all clusters.
491          * It is safe to do so even some clusters are not enabled.
492          */
493         out_le32(cltbenr, 0xf);
494
495         /* Enable clock for timer
496          * This is a global setting.
497          */
498         out_le32(cntcr, 0x1);
499
500         return 0;
501 }
502
503 void reset_cpu(ulong addr)
504 {
505         u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
506         u32 val;
507
508         /* Raise RESET_REQ_B */
509         val = in_le32(rstcr);
510         val |= 0x02;
511         out_le32(rstcr, val);
512 }