2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/log2.h>
27 #include <linux/of_address.h>
29 #include <asm/cacheflush.h>
31 #include <asm/cputype.h>
32 #include <asm/hardware/cache-l2x0.h>
33 #include "cache-tauros3.h"
34 #include "cache-aurora-l2.h"
36 struct l2c_init_data {
40 void (*of_parse)(const struct device_node *, u32 *, u32 *);
41 void (*enable)(void __iomem *, u32, unsigned);
42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
43 void (*save)(void __iomem *);
44 void (*configure)(void __iomem *);
45 struct outer_cache_fns outer_cache;
48 #define CACHE_LINE_SIZE 32
50 static void __iomem *l2x0_base;
51 static const struct l2c_init_data *l2x0_data;
52 static DEFINE_RAW_SPINLOCK(l2x0_lock);
53 static u32 l2x0_way_mask; /* Bitmask of active ways */
55 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
57 struct l2x0_regs l2x0_saved_regs;
60 * Common code for all cache controllers.
62 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
64 /* wait for cache operation by line or way to complete */
65 while (readl_relaxed(reg) & mask)
70 * By default, we write directly to secure registers. Platforms must
71 * override this if they are running non-secure.
73 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
75 if (val == readl_relaxed(base + reg))
77 if (outer_cache.write_sec)
78 outer_cache.write_sec(val, reg);
80 writel_relaxed(val, base + reg);
84 * This should only be called when we have a requirement that the
85 * register be written due to a work-around, as platforms running
86 * in non-secure mode may not be able to access this register.
88 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
90 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
93 static void __l2c_op_way(void __iomem *reg)
95 writel_relaxed(l2x0_way_mask, reg);
96 l2c_wait_mask(reg, l2x0_way_mask);
99 static inline void l2c_unlock(void __iomem *base, unsigned num)
103 for (i = 0; i < num; i++) {
104 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
105 i * L2X0_LOCKDOWN_STRIDE);
106 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
107 i * L2X0_LOCKDOWN_STRIDE);
111 static void l2c_configure(void __iomem *base)
113 if (outer_cache.configure) {
114 outer_cache.configure(&l2x0_saved_regs);
118 if (l2x0_data->configure)
119 l2x0_data->configure(base);
121 l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
125 * Enable the L2 cache controller. This function must only be
126 * called when the cache controller is known to be disabled.
128 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
132 /* Do not touch the controller if already enabled. */
133 if (readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)
136 l2x0_saved_regs.aux_ctrl = aux;
139 l2c_unlock(base, num_lock);
141 local_irq_save(flags);
142 __l2c_op_way(base + L2X0_INV_WAY);
143 writel_relaxed(0, base + sync_reg_offset);
144 l2c_wait_mask(base + sync_reg_offset, 1);
145 local_irq_restore(flags);
147 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
150 static void l2c_disable(void)
152 void __iomem *base = l2x0_base;
154 outer_cache.flush_all();
155 l2c_write_sec(0, base, L2X0_CTRL);
159 #ifdef CONFIG_CACHE_PL310
160 static inline void cache_wait(void __iomem *reg, unsigned long mask)
162 /* cache operations by line are atomic on PL310 */
165 #define cache_wait l2c_wait_mask
168 static inline void cache_sync(void)
170 void __iomem *base = l2x0_base;
172 writel_relaxed(0, base + sync_reg_offset);
173 cache_wait(base + L2X0_CACHE_SYNC, 1);
176 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
177 static inline void debug_writel(unsigned long val)
179 l2c_set_debug(l2x0_base, val);
182 /* Optimised out for non-errata case */
183 static inline void debug_writel(unsigned long val)
188 static void l2x0_cache_sync(void)
192 raw_spin_lock_irqsave(&l2x0_lock, flags);
194 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
197 static void __l2x0_flush_all(void)
200 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
205 static void l2x0_flush_all(void)
210 raw_spin_lock_irqsave(&l2x0_lock, flags);
212 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
215 static void l2x0_disable(void)
219 raw_spin_lock_irqsave(&l2x0_lock, flags);
221 l2c_write_sec(0, l2x0_base, L2X0_CTRL);
223 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
226 static void l2c_save(void __iomem *base)
228 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
231 static void l2c_resume(void)
233 l2c_enable(l2x0_base, l2x0_saved_regs.aux_ctrl, l2x0_data->num_lock);
237 * L2C-210 specific code.
239 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
240 * ensure that no background operation is running. The way operations
241 * are all background tasks.
243 * While a background operation is in progress, any new operation is
244 * ignored (unspecified whether this causes an error.) Thankfully, not
247 * Never has a different sync register other than L2X0_CACHE_SYNC, but
248 * we use sync_reg_offset here so we can share some of this with L2C-310.
250 static void __l2c210_cache_sync(void __iomem *base)
252 writel_relaxed(0, base + sync_reg_offset);
255 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
258 while (start < end) {
259 writel_relaxed(start, reg);
260 start += CACHE_LINE_SIZE;
264 static void l2c210_inv_range(unsigned long start, unsigned long end)
266 void __iomem *base = l2x0_base;
268 if (start & (CACHE_LINE_SIZE - 1)) {
269 start &= ~(CACHE_LINE_SIZE - 1);
270 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
271 start += CACHE_LINE_SIZE;
274 if (end & (CACHE_LINE_SIZE - 1)) {
275 end &= ~(CACHE_LINE_SIZE - 1);
276 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
279 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
280 __l2c210_cache_sync(base);
283 static void l2c210_clean_range(unsigned long start, unsigned long end)
285 void __iomem *base = l2x0_base;
287 start &= ~(CACHE_LINE_SIZE - 1);
288 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
289 __l2c210_cache_sync(base);
292 static void l2c210_flush_range(unsigned long start, unsigned long end)
294 void __iomem *base = l2x0_base;
296 start &= ~(CACHE_LINE_SIZE - 1);
297 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
298 __l2c210_cache_sync(base);
301 static void l2c210_flush_all(void)
303 void __iomem *base = l2x0_base;
305 BUG_ON(!irqs_disabled());
307 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
308 __l2c210_cache_sync(base);
311 static void l2c210_sync(void)
313 __l2c210_cache_sync(l2x0_base);
316 static const struct l2c_init_data l2c210_data __initconst = {
320 .enable = l2c_enable,
323 .inv_range = l2c210_inv_range,
324 .clean_range = l2c210_clean_range,
325 .flush_range = l2c210_flush_range,
326 .flush_all = l2c210_flush_all,
327 .disable = l2c_disable,
329 .resume = l2c_resume,
334 * L2C-220 specific code.
336 * All operations are background operations: they have to be waited for.
337 * Conflicting requests generate a slave error (which will cause an
338 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
339 * sync register here.
341 * However, we can re-use the l2c210_resume call.
343 static inline void __l2c220_cache_sync(void __iomem *base)
345 writel_relaxed(0, base + L2X0_CACHE_SYNC);
346 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
349 static void l2c220_op_way(void __iomem *base, unsigned reg)
353 raw_spin_lock_irqsave(&l2x0_lock, flags);
354 __l2c_op_way(base + reg);
355 __l2c220_cache_sync(base);
356 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
359 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
360 unsigned long end, unsigned long flags)
362 raw_spinlock_t *lock = &l2x0_lock;
364 while (start < end) {
365 unsigned long blk_end = start + min(end - start, 4096UL);
367 while (start < blk_end) {
368 l2c_wait_mask(reg, 1);
369 writel_relaxed(start, reg);
370 start += CACHE_LINE_SIZE;
374 raw_spin_unlock_irqrestore(lock, flags);
375 raw_spin_lock_irqsave(lock, flags);
382 static void l2c220_inv_range(unsigned long start, unsigned long end)
384 void __iomem *base = l2x0_base;
387 raw_spin_lock_irqsave(&l2x0_lock, flags);
388 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
389 if (start & (CACHE_LINE_SIZE - 1)) {
390 start &= ~(CACHE_LINE_SIZE - 1);
391 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
392 start += CACHE_LINE_SIZE;
395 if (end & (CACHE_LINE_SIZE - 1)) {
396 end &= ~(CACHE_LINE_SIZE - 1);
397 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
398 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
402 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
404 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
405 __l2c220_cache_sync(base);
406 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
409 static void l2c220_clean_range(unsigned long start, unsigned long end)
411 void __iomem *base = l2x0_base;
414 start &= ~(CACHE_LINE_SIZE - 1);
415 if ((end - start) >= l2x0_size) {
416 l2c220_op_way(base, L2X0_CLEAN_WAY);
420 raw_spin_lock_irqsave(&l2x0_lock, flags);
421 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
423 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
424 __l2c220_cache_sync(base);
425 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
428 static void l2c220_flush_range(unsigned long start, unsigned long end)
430 void __iomem *base = l2x0_base;
433 start &= ~(CACHE_LINE_SIZE - 1);
434 if ((end - start) >= l2x0_size) {
435 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
439 raw_spin_lock_irqsave(&l2x0_lock, flags);
440 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
442 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
443 __l2c220_cache_sync(base);
444 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
447 static void l2c220_flush_all(void)
449 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
452 static void l2c220_sync(void)
456 raw_spin_lock_irqsave(&l2x0_lock, flags);
457 __l2c220_cache_sync(l2x0_base);
458 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
461 static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
464 * Always enable non-secure access to the lockdown registers -
465 * we write to them as part of the L2C enable sequence so they
466 * need to be accessible.
468 aux |= L220_AUX_CTRL_NS_LOCKDOWN;
470 l2c_enable(base, aux, num_lock);
473 static const struct l2c_init_data l2c220_data = {
477 .enable = l2c220_enable,
480 .inv_range = l2c220_inv_range,
481 .clean_range = l2c220_clean_range,
482 .flush_range = l2c220_flush_range,
483 .flush_all = l2c220_flush_all,
484 .disable = l2c_disable,
486 .resume = l2c_resume,
491 * L2C-310 specific code.
493 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
494 * and the way operations are all background tasks. However, issuing an
495 * operation while a background operation is in progress results in a
496 * SLVERR response. We can reuse:
498 * __l2c210_cache_sync (using sync_reg_offset)
500 * l2c210_inv_range (if 588369 is not applicable)
502 * l2c210_flush_range (if 588369 is not applicable)
503 * l2c210_flush_all (if 727915 is not applicable)
506 * 588369: PL310 R0P0->R1P0, fixed R2P0.
507 * Affects: all clean+invalidate operations
508 * clean and invalidate skips the invalidate step, so we need to issue
509 * separate operations. We also require the above debug workaround
510 * enclosing this code fragment on affected parts. On unaffected parts,
511 * we must not use this workaround without the debug register writes
512 * to avoid exposing a problem similar to 727915.
514 * 727915: PL310 R2P0->R3P0, fixed R3P1.
515 * Affects: clean+invalidate by way
516 * clean and invalidate by way runs in the background, and a store can
517 * hit the line between the clean operation and invalidate operation,
518 * resulting in the store being lost.
520 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
521 * Affects: 8x64-bit (double fill) line fetches
522 * double fill line fetches can fail to cause dirty data to be evicted
523 * from the cache before the new data overwrites the second line.
525 * 753970: PL310 R3P0, fixed R3P1.
527 * prevents merging writes after the sync operation, until another L2C
528 * operation is performed (or a number of other conditions.)
530 * 769419: PL310 R0P0->R3P1, fixed R3P2.
531 * Affects: store buffer
532 * store buffer is not automatically drained.
534 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
536 void __iomem *base = l2x0_base;
538 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
541 /* Erratum 588369 for both clean+invalidate operations */
542 raw_spin_lock_irqsave(&l2x0_lock, flags);
543 l2c_set_debug(base, 0x03);
545 if (start & (CACHE_LINE_SIZE - 1)) {
546 start &= ~(CACHE_LINE_SIZE - 1);
547 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
548 writel_relaxed(start, base + L2X0_INV_LINE_PA);
549 start += CACHE_LINE_SIZE;
552 if (end & (CACHE_LINE_SIZE - 1)) {
553 end &= ~(CACHE_LINE_SIZE - 1);
554 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
555 writel_relaxed(end, base + L2X0_INV_LINE_PA);
558 l2c_set_debug(base, 0x00);
559 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
562 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
563 __l2c210_cache_sync(base);
566 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
568 raw_spinlock_t *lock = &l2x0_lock;
570 void __iomem *base = l2x0_base;
572 raw_spin_lock_irqsave(lock, flags);
573 while (start < end) {
574 unsigned long blk_end = start + min(end - start, 4096UL);
576 l2c_set_debug(base, 0x03);
577 while (start < blk_end) {
578 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
579 writel_relaxed(start, base + L2X0_INV_LINE_PA);
580 start += CACHE_LINE_SIZE;
582 l2c_set_debug(base, 0x00);
585 raw_spin_unlock_irqrestore(lock, flags);
586 raw_spin_lock_irqsave(lock, flags);
589 raw_spin_unlock_irqrestore(lock, flags);
590 __l2c210_cache_sync(base);
593 static void l2c310_flush_all_erratum(void)
595 void __iomem *base = l2x0_base;
598 raw_spin_lock_irqsave(&l2x0_lock, flags);
599 l2c_set_debug(base, 0x03);
600 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
601 l2c_set_debug(base, 0x00);
602 __l2c210_cache_sync(base);
603 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
606 static void __init l2c310_save(void __iomem *base)
612 l2x0_saved_regs.tag_latency = readl_relaxed(base +
613 L310_TAG_LATENCY_CTRL);
614 l2x0_saved_regs.data_latency = readl_relaxed(base +
615 L310_DATA_LATENCY_CTRL);
616 l2x0_saved_regs.filter_end = readl_relaxed(base +
617 L310_ADDR_FILTER_END);
618 l2x0_saved_regs.filter_start = readl_relaxed(base +
619 L310_ADDR_FILTER_START);
621 revision = readl_relaxed(base + L2X0_CACHE_ID) &
622 L2X0_CACHE_ID_RTL_MASK;
624 /* From r2p0, there is Prefetch offset/control register */
625 if (revision >= L310_CACHE_ID_RTL_R2P0)
626 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
629 /* From r3p0, there is Power control register */
630 if (revision >= L310_CACHE_ID_RTL_R3P0)
631 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
635 static void l2c310_configure(void __iomem *base)
639 /* restore pl310 setup */
640 l2c_write_sec(l2x0_saved_regs.tag_latency, base,
641 L310_TAG_LATENCY_CTRL);
642 l2c_write_sec(l2x0_saved_regs.data_latency, base,
643 L310_DATA_LATENCY_CTRL);
644 l2c_write_sec(l2x0_saved_regs.filter_end, base,
645 L310_ADDR_FILTER_END);
646 l2c_write_sec(l2x0_saved_regs.filter_start, base,
647 L310_ADDR_FILTER_START);
649 revision = readl_relaxed(base + L2X0_CACHE_ID) &
650 L2X0_CACHE_ID_RTL_MASK;
652 if (revision >= L310_CACHE_ID_RTL_R2P0)
653 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
655 if (revision >= L310_CACHE_ID_RTL_R3P0)
656 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
660 static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
662 switch (act & ~CPU_TASKS_FROZEN) {
664 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
667 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
673 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
675 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
676 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
678 if (rev >= L310_CACHE_ID_RTL_R2P0) {
680 aux |= L310_AUX_CTRL_EARLY_BRESP;
681 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
682 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
683 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
684 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
689 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
690 u32 acr = get_auxcr();
692 pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
694 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
695 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
697 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
698 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
700 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
701 aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
702 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
704 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
705 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
706 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
709 /* r3p0 or later has power control register */
710 if (rev >= L310_CACHE_ID_RTL_R3P0)
711 l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
715 * Always enable non-secure access to the lockdown registers -
716 * we write to them as part of the L2C enable sequence so they
717 * need to be accessible.
719 aux |= L310_AUX_CTRL_NS_LOCKDOWN;
721 l2c_enable(base, aux, num_lock);
723 /* Read back resulting AUX_CTRL value as it could have been altered. */
724 aux = readl_relaxed(base + L2X0_AUX_CTRL);
726 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
727 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
729 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
730 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
731 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
732 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
735 /* r3p0 or later has power control register */
736 if (rev >= L310_CACHE_ID_RTL_R3P0) {
739 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
740 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
741 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
742 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
745 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
746 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
747 cpu_notifier(l2c310_cpu_enable_flz, 0);
751 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
752 struct outer_cache_fns *fns)
754 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
755 const char *errata[8];
758 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
759 revision < L310_CACHE_ID_RTL_R2P0 &&
760 /* For bcm compatibility */
761 fns->inv_range == l2c210_inv_range) {
762 fns->inv_range = l2c310_inv_range_erratum;
763 fns->flush_range = l2c310_flush_range_erratum;
764 errata[n++] = "588369";
767 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
768 revision >= L310_CACHE_ID_RTL_R2P0 &&
769 revision < L310_CACHE_ID_RTL_R3P1) {
770 fns->flush_all = l2c310_flush_all_erratum;
771 errata[n++] = "727915";
774 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
775 revision < L310_CACHE_ID_RTL_R3P2) {
776 u32 val = l2x0_saved_regs.prefetch_ctrl;
777 /* I don't think bit23 is required here... but iMX6 does so */
778 if (val & (BIT(30) | BIT(23))) {
779 val &= ~(BIT(30) | BIT(23));
780 l2x0_saved_regs.prefetch_ctrl = val;
781 errata[n++] = "752271";
785 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
786 revision == L310_CACHE_ID_RTL_R3P0) {
787 sync_reg_offset = L2X0_DUMMY_REG;
788 errata[n++] = "753970";
791 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
792 errata[n++] = "769419";
797 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
798 for (i = 0; i < n; i++)
799 pr_cont(" %s", errata[i]);
800 pr_cont(" enabled\n");
804 static void l2c310_disable(void)
807 * If full-line-of-zeros is enabled, we must first disable it in the
808 * Cortex-A9 auxiliary control register before disabling the L2 cache.
810 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
811 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
816 static void l2c310_resume(void)
820 /* Re-enable full-line-of-zeros for Cortex-A9 */
821 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
822 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
825 static const struct l2c_init_data l2c310_init_fns __initconst = {
829 .enable = l2c310_enable,
830 .fixup = l2c310_fixup,
832 .configure = l2c310_configure,
834 .inv_range = l2c210_inv_range,
835 .clean_range = l2c210_clean_range,
836 .flush_range = l2c210_flush_range,
837 .flush_all = l2c210_flush_all,
838 .disable = l2c310_disable,
840 .resume = l2c310_resume,
844 static int __init __l2c_init(const struct l2c_init_data *data,
845 u32 aux_val, u32 aux_mask, u32 cache_id)
847 struct outer_cache_fns fns;
848 unsigned way_size_bits, ways;
852 * Save the pointer globally so that callbacks which do not receive
853 * context from callers can access the structure.
855 l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
860 * Sanity check the aux values. aux_mask is the bits we preserve
861 * from reading the hardware register, and aux_val is the bits we
864 if (aux_val & aux_mask)
865 pr_alert("L2C: platform provided aux values permit register corruption.\n");
867 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
872 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
875 /* Determine the number of ways */
876 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
877 case L2X0_CACHE_ID_PART_L310:
878 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
879 pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
886 case L2X0_CACHE_ID_PART_L210:
887 case L2X0_CACHE_ID_PART_L220:
888 ways = (aux >> 13) & 0xf;
891 case AURORA_CACHE_ID:
892 ways = (aux >> 13) & 0xf;
893 ways = 2 << ((ways + 1) >> 2);
897 /* Assume unknown chips have 8 ways */
902 l2x0_way_mask = (1 << ways) - 1;
905 * way_size_0 is the size that a way_size value of zero would be
906 * given the calculation: way_size = way_size_0 << way_size_bits.
907 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
908 * then way_size_0 would be 8k.
910 * L2 cache size = number of ways * way size.
912 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
913 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
914 l2x0_size = ways * (data->way_size_0 << way_size_bits);
916 fns = data->outer_cache;
917 fns.write_sec = outer_cache.write_sec;
918 fns.configure = outer_cache.configure;
920 data->fixup(l2x0_base, cache_id, &fns);
923 * Check if l2x0 controller is already enabled. If we are booting
924 * in non-secure mode accessing the below registers will fault.
926 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
927 data->enable(l2x0_base, aux, data->num_lock);
932 * It is strange to save the register state before initialisation,
933 * but hey, this is what the DT implementations decided to do.
936 data->save(l2x0_base);
938 /* Re-read it in case some bits are reserved. */
939 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
941 pr_info("%s cache controller enabled, %d ways, %d kB\n",
942 data->type, ways, l2x0_size >> 10);
943 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
944 data->type, cache_id, aux);
949 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
951 const struct l2c_init_data *data;
956 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
958 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
960 case L2X0_CACHE_ID_PART_L210:
964 case L2X0_CACHE_ID_PART_L220:
968 case L2X0_CACHE_ID_PART_L310:
969 data = &l2c310_init_fns;
973 /* Read back current (default) hardware configuration */
975 data->save(l2x0_base);
977 __l2c_init(data, aux_val, aux_mask, cache_id);
981 static int l2_wt_override;
983 /* Aurora don't have the cache ID register available, so we have to
984 * pass it though the device tree */
985 static u32 cache_id_part_number_from_dt;
988 * l2x0_cache_size_of_parse() - read cache size parameters from DT
989 * @np: the device tree node for the l2 cache
990 * @aux_val: pointer to machine-supplied auxilary register value, to
991 * be augmented by the call (bits to be set to 1)
992 * @aux_mask: pointer to machine-supplied auxilary register mask, to
993 * be augmented by the call (bits to be set to 0)
994 * @associativity: variable to return the calculated associativity in
995 * @max_way_size: the maximum size in bytes for the cache ways
997 static int __init l2x0_cache_size_of_parse(const struct device_node *np,
998 u32 *aux_val, u32 *aux_mask,
1002 u32 mask = 0, val = 0;
1003 u32 cache_size = 0, sets = 0;
1004 u32 way_size_bits = 1;
1009 of_property_read_u32(np, "cache-size", &cache_size);
1010 of_property_read_u32(np, "cache-sets", &sets);
1011 of_property_read_u32(np, "cache-block-size", &block_size);
1012 of_property_read_u32(np, "cache-line-size", &line_size);
1014 if (!cache_size || !sets)
1017 /* All these l2 caches have the same line = block size actually */
1020 /* If linesize if not given, it is equal to blocksize */
1021 line_size = block_size;
1023 /* Fall back to known size */
1024 pr_warn("L2C OF: no cache block/line size given: "
1025 "falling back to default size %d bytes\n",
1027 line_size = CACHE_LINE_SIZE;
1031 if (line_size != CACHE_LINE_SIZE)
1032 pr_warn("L2C OF: DT supplied line size %d bytes does "
1033 "not match hardware line size of %d bytes\n",
1039 * set size = cache size / sets
1040 * ways = cache size / (sets * line size)
1041 * way size = cache size / (cache size / (sets * line size))
1042 * way size = sets * line size
1043 * associativity = ways = cache size / way size
1045 way_size = sets * line_size;
1046 *associativity = cache_size / way_size;
1048 if (way_size > max_way_size) {
1049 pr_err("L2C OF: set size %dKB is too large\n", way_size);
1053 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
1054 cache_size, cache_size >> 10);
1055 pr_info("L2C OF: override line size: %d bytes\n", line_size);
1056 pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
1057 way_size, way_size >> 10);
1058 pr_info("L2C OF: override associativity: %d\n", *associativity);
1061 * Calculates the bits 17:19 to set for way size:
1062 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
1064 way_size_bits = ilog2(way_size >> 10) - 3;
1065 if (way_size_bits < 1 || way_size_bits > 6) {
1066 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1071 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
1072 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
1081 static void __init l2x0_of_parse(const struct device_node *np,
1082 u32 *aux_val, u32 *aux_mask)
1084 u32 data[2] = { 0, 0 };
1087 u32 val = 0, mask = 0;
1091 of_property_read_u32(np, "arm,tag-latency", &tag);
1093 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1094 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1097 of_property_read_u32_array(np, "arm,data-latency",
1098 data, ARRAY_SIZE(data));
1099 if (data[0] && data[1]) {
1100 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1101 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1102 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1103 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1106 of_property_read_u32(np, "arm,dirty-latency", &dirty);
1108 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1109 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1112 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1117 pr_err("l2x0 of: cache setting yield too high associativity\n");
1118 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
1120 mask |= L2X0_AUX_CTRL_ASSOC_MASK;
1121 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
1129 static const struct l2c_init_data of_l2c210_data __initconst = {
1131 .way_size_0 = SZ_8K,
1133 .of_parse = l2x0_of_parse,
1134 .enable = l2c_enable,
1137 .inv_range = l2c210_inv_range,
1138 .clean_range = l2c210_clean_range,
1139 .flush_range = l2c210_flush_range,
1140 .flush_all = l2c210_flush_all,
1141 .disable = l2c_disable,
1142 .sync = l2c210_sync,
1143 .resume = l2c_resume,
1147 static const struct l2c_init_data of_l2c220_data __initconst = {
1149 .way_size_0 = SZ_8K,
1151 .of_parse = l2x0_of_parse,
1152 .enable = l2c220_enable,
1155 .inv_range = l2c220_inv_range,
1156 .clean_range = l2c220_clean_range,
1157 .flush_range = l2c220_flush_range,
1158 .flush_all = l2c220_flush_all,
1159 .disable = l2c_disable,
1160 .sync = l2c220_sync,
1161 .resume = l2c_resume,
1165 static void __init l2c310_of_parse(const struct device_node *np,
1166 u32 *aux_val, u32 *aux_mask)
1168 u32 data[3] = { 0, 0, 0 };
1169 u32 tag[3] = { 0, 0, 0 };
1170 u32 filter[2] = { 0, 0 };
1176 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1177 if (tag[0] && tag[1] && tag[2])
1178 l2x0_saved_regs.tag_latency =
1179 L310_LATENCY_CTRL_RD(tag[0] - 1) |
1180 L310_LATENCY_CTRL_WR(tag[1] - 1) |
1181 L310_LATENCY_CTRL_SETUP(tag[2] - 1);
1183 of_property_read_u32_array(np, "arm,data-latency",
1184 data, ARRAY_SIZE(data));
1185 if (data[0] && data[1] && data[2])
1186 l2x0_saved_regs.data_latency =
1187 L310_LATENCY_CTRL_RD(data[0] - 1) |
1188 L310_LATENCY_CTRL_WR(data[1] - 1) |
1189 L310_LATENCY_CTRL_SETUP(data[2] - 1);
1191 of_property_read_u32_array(np, "arm,filter-ranges",
1192 filter, ARRAY_SIZE(filter));
1194 l2x0_saved_regs.filter_end =
1195 ALIGN(filter[0] + filter[1], SZ_1M);
1196 l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
1197 | L310_ADDR_FILTER_EN;
1200 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1206 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1207 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1208 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1211 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1212 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1215 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1220 prefetch = l2x0_saved_regs.prefetch_ctrl;
1222 ret = of_property_read_u32(np, "arm,double-linefill", &val);
1225 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
1227 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
1228 } else if (ret != -EINVAL) {
1229 pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
1232 ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
1235 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1237 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
1238 } else if (ret != -EINVAL) {
1239 pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
1242 ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
1245 prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1247 prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
1248 } else if (ret != -EINVAL) {
1249 pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
1252 ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
1255 prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
1257 prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
1258 } else if (ret != -EINVAL) {
1259 pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
1262 ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
1264 prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
1265 prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
1266 } else if (ret != -EINVAL) {
1267 pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
1270 l2x0_saved_regs.prefetch_ctrl = prefetch;
1273 static const struct l2c_init_data of_l2c310_data __initconst = {
1275 .way_size_0 = SZ_8K,
1277 .of_parse = l2c310_of_parse,
1278 .enable = l2c310_enable,
1279 .fixup = l2c310_fixup,
1280 .save = l2c310_save,
1281 .configure = l2c310_configure,
1283 .inv_range = l2c210_inv_range,
1284 .clean_range = l2c210_clean_range,
1285 .flush_range = l2c210_flush_range,
1286 .flush_all = l2c210_flush_all,
1287 .disable = l2c310_disable,
1288 .sync = l2c210_sync,
1289 .resume = l2c310_resume,
1294 * This is a variant of the of_l2c310_data with .sync set to
1295 * NULL. Outer sync operations are not needed when the system is I/O
1296 * coherent, and potentially harmful in certain situations (PCIe/PL310
1297 * deadlock on Armada 375/38x due to hardware I/O coherency). The
1298 * other operations are kept because they are infrequent (therefore do
1299 * not cause the deadlock in practice) and needed for secondary CPU
1300 * boot and other power management activities.
1302 static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1303 .type = "L2C-310 Coherent",
1304 .way_size_0 = SZ_8K,
1306 .of_parse = l2c310_of_parse,
1307 .enable = l2c310_enable,
1308 .fixup = l2c310_fixup,
1309 .save = l2c310_save,
1310 .configure = l2c310_configure,
1312 .inv_range = l2c210_inv_range,
1313 .clean_range = l2c210_clean_range,
1314 .flush_range = l2c210_flush_range,
1315 .flush_all = l2c210_flush_all,
1316 .disable = l2c310_disable,
1317 .resume = l2c310_resume,
1322 * Note that the end addresses passed to Linux primitives are
1323 * noninclusive, while the hardware cache range operations use
1324 * inclusive start and end addresses.
1326 static unsigned long calc_range_end(unsigned long start, unsigned long end)
1329 * Limit the number of cache lines processed at once,
1330 * since cache range operations stall the CPU pipeline
1333 if (end > start + MAX_RANGE_SIZE)
1334 end = start + MAX_RANGE_SIZE;
1337 * Cache range operations can't straddle a page boundary.
1339 if (end > PAGE_ALIGN(start+1))
1340 end = PAGE_ALIGN(start+1);
1346 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
1347 * and range operations only do a TLB lookup on the start address.
1349 static void aurora_pa_range(unsigned long start, unsigned long end,
1350 unsigned long offset)
1352 unsigned long flags;
1354 raw_spin_lock_irqsave(&l2x0_lock, flags);
1355 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
1356 writel_relaxed(end, l2x0_base + offset);
1357 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1362 static void aurora_inv_range(unsigned long start, unsigned long end)
1365 * round start and end adresses up to cache line size
1367 start &= ~(CACHE_LINE_SIZE - 1);
1368 end = ALIGN(end, CACHE_LINE_SIZE);
1371 * Invalidate all full cache lines between 'start' and 'end'.
1373 while (start < end) {
1374 unsigned long range_end = calc_range_end(start, end);
1375 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1376 AURORA_INVAL_RANGE_REG);
1381 static void aurora_clean_range(unsigned long start, unsigned long end)
1384 * If L2 is forced to WT, the L2 will always be clean and we
1385 * don't need to do anything here.
1387 if (!l2_wt_override) {
1388 start &= ~(CACHE_LINE_SIZE - 1);
1389 end = ALIGN(end, CACHE_LINE_SIZE);
1390 while (start != end) {
1391 unsigned long range_end = calc_range_end(start, end);
1392 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1393 AURORA_CLEAN_RANGE_REG);
1399 static void aurora_flush_range(unsigned long start, unsigned long end)
1401 start &= ~(CACHE_LINE_SIZE - 1);
1402 end = ALIGN(end, CACHE_LINE_SIZE);
1403 while (start != end) {
1404 unsigned long range_end = calc_range_end(start, end);
1406 * If L2 is forced to WT, the L2 will always be clean and we
1407 * just need to invalidate.
1410 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1411 AURORA_INVAL_RANGE_REG);
1413 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1414 AURORA_FLUSH_RANGE_REG);
1419 static void aurora_save(void __iomem *base)
1421 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1422 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1426 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1427 * broadcasting of cache commands to L2.
1429 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1434 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1435 u |= AURORA_CTRL_FW; /* Set the FW bit */
1436 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1440 l2c_enable(base, aux, num_lock);
1443 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1444 struct outer_cache_fns *fns)
1446 sync_reg_offset = AURORA_SYNC_REG;
1449 static void __init aurora_of_parse(const struct device_node *np,
1450 u32 *aux_val, u32 *aux_mask)
1452 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1453 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1455 of_property_read_u32(np, "cache-id-part",
1456 &cache_id_part_number_from_dt);
1458 /* Determine and save the write policy */
1459 l2_wt_override = of_property_read_bool(np, "wt-override");
1461 if (l2_wt_override) {
1462 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1463 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1471 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1473 .way_size_0 = SZ_4K,
1475 .of_parse = aurora_of_parse,
1476 .enable = l2c_enable,
1477 .fixup = aurora_fixup,
1478 .save = aurora_save,
1480 .inv_range = aurora_inv_range,
1481 .clean_range = aurora_clean_range,
1482 .flush_range = aurora_flush_range,
1483 .flush_all = l2x0_flush_all,
1484 .disable = l2x0_disable,
1485 .sync = l2x0_cache_sync,
1486 .resume = l2c_resume,
1490 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1492 .way_size_0 = SZ_4K,
1494 .of_parse = aurora_of_parse,
1495 .enable = aurora_enable_no_outer,
1496 .fixup = aurora_fixup,
1497 .save = aurora_save,
1499 .resume = l2c_resume,
1504 * For certain Broadcom SoCs, depending on the address range, different offsets
1505 * need to be added to the address before passing it to L2 for
1506 * invalidation/clean/flush
1508 * Section Address Range Offset EMI
1509 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1510 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1511 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1513 * When the start and end addresses have crossed two different sections, we
1514 * need to break the L2 operation into two, each within its own section.
1515 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1516 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1517 * 0xC0000000 - 0xC0001000
1520 * By breaking a single L2 operation into two, we may potentially suffer some
1521 * performance hit, but keep in mind the cross section case is very rare
1524 * We do not need to handle the case when the start address is in
1525 * Section 1 and the end address is in Section 3, since it is not a valid use
1529 * Section 1 in practical terms can no longer be used on rev A2. Because of
1530 * that the code does not need to handle section 1 at all.
1533 #define BCM_SYS_EMI_START_ADDR 0x40000000UL
1534 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1536 #define BCM_SYS_EMI_OFFSET 0x40000000UL
1537 #define BCM_VC_EMI_OFFSET 0x80000000UL
1539 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1541 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1542 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1545 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1547 if (bcm_addr_is_sys_emi(addr))
1548 return addr + BCM_SYS_EMI_OFFSET;
1550 return addr + BCM_VC_EMI_OFFSET;
1553 static void bcm_inv_range(unsigned long start, unsigned long end)
1555 unsigned long new_start, new_end;
1557 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1559 if (unlikely(end <= start))
1562 new_start = bcm_l2_phys_addr(start);
1563 new_end = bcm_l2_phys_addr(end);
1565 /* normal case, no cross section between start and end */
1566 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1567 l2c210_inv_range(new_start, new_end);
1571 /* They cross sections, so it can only be a cross from section
1574 l2c210_inv_range(new_start,
1575 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1576 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1580 static void bcm_clean_range(unsigned long start, unsigned long end)
1582 unsigned long new_start, new_end;
1584 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1586 if (unlikely(end <= start))
1589 new_start = bcm_l2_phys_addr(start);
1590 new_end = bcm_l2_phys_addr(end);
1592 /* normal case, no cross section between start and end */
1593 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1594 l2c210_clean_range(new_start, new_end);
1598 /* They cross sections, so it can only be a cross from section
1601 l2c210_clean_range(new_start,
1602 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1603 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1607 static void bcm_flush_range(unsigned long start, unsigned long end)
1609 unsigned long new_start, new_end;
1611 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1613 if (unlikely(end <= start))
1616 if ((end - start) >= l2x0_size) {
1617 outer_cache.flush_all();
1621 new_start = bcm_l2_phys_addr(start);
1622 new_end = bcm_l2_phys_addr(end);
1624 /* normal case, no cross section between start and end */
1625 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1626 l2c210_flush_range(new_start, new_end);
1630 /* They cross sections, so it can only be a cross from section
1633 l2c210_flush_range(new_start,
1634 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1635 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1639 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
1640 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1641 .type = "BCM-L2C-310",
1642 .way_size_0 = SZ_8K,
1644 .of_parse = l2c310_of_parse,
1645 .enable = l2c310_enable,
1646 .save = l2c310_save,
1647 .configure = l2c310_configure,
1649 .inv_range = bcm_inv_range,
1650 .clean_range = bcm_clean_range,
1651 .flush_range = bcm_flush_range,
1652 .flush_all = l2c210_flush_all,
1653 .disable = l2c310_disable,
1654 .sync = l2c210_sync,
1655 .resume = l2c310_resume,
1659 static void __init tauros3_save(void __iomem *base)
1663 l2x0_saved_regs.aux2_ctrl =
1664 readl_relaxed(base + TAUROS3_AUX2_CTRL);
1665 l2x0_saved_regs.prefetch_ctrl =
1666 readl_relaxed(base + L310_PREFETCH_CTRL);
1669 static void tauros3_configure(void __iomem *base)
1671 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1672 base + TAUROS3_AUX2_CTRL);
1673 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1674 base + L310_PREFETCH_CTRL);
1677 static const struct l2c_init_data of_tauros3_data __initconst = {
1679 .way_size_0 = SZ_8K,
1681 .enable = l2c_enable,
1682 .save = tauros3_save,
1683 .configure = tauros3_configure,
1684 /* Tauros3 broadcasts L1 cache operations to L2 */
1686 .resume = l2c_resume,
1690 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1691 static const struct of_device_id l2x0_ids[] __initconst = {
1692 L2C_ID("arm,l210-cache", of_l2c210_data),
1693 L2C_ID("arm,l220-cache", of_l2c220_data),
1694 L2C_ID("arm,pl310-cache", of_l2c310_data),
1695 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1696 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1697 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1698 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1699 /* Deprecated IDs */
1700 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1704 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1706 const struct l2c_init_data *data;
1707 struct device_node *np;
1708 struct resource res;
1709 u32 cache_id, old_aux;
1711 np = of_find_matching_node(NULL, l2x0_ids);
1715 if (of_address_to_resource(np, 0, &res))
1718 l2x0_base = ioremap(res.start, resource_size(&res));
1722 l2x0_saved_regs.phy_base = res.start;
1724 data = of_match_node(l2x0_ids, np)->data;
1726 if (of_device_is_compatible(np, "arm,pl310-cache") &&
1727 of_property_read_bool(np, "arm,io-coherent"))
1728 data = &of_l2c310_coherent_data;
1730 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1731 if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1732 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
1733 old_aux, (old_aux & aux_mask) | aux_val);
1734 } else if (aux_mask != ~0U && aux_val != 0) {
1735 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
1738 /* All L2 caches are unified, so this property should be specified */
1739 if (!of_property_read_bool(np, "cache-unified"))
1740 pr_err("L2C: device tree omits to specify unified cache\n");
1742 /* Read back current (default) hardware configuration */
1744 data->save(l2x0_base);
1746 /* L2 configuration can only be changed if the cache is disabled */
1747 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1749 data->of_parse(np, &aux_val, &aux_mask);
1751 if (cache_id_part_number_from_dt)
1752 cache_id = cache_id_part_number_from_dt;
1754 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1756 return __l2c_init(data, aux_val, aux_mask, cache_id);