]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - arch/sh/cpu/sh3/cache.c
sh: Move cpu/$CPU to arch/sh/cpu/$CPU
[karo-tx-uboot.git] / arch / sh / cpu / sh3 / cache.c
1 /*
2  * (C) Copyright 2007
3  * Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
4  *
5  * (C) Copyright 2007
6  * Nobobuhiro Iwamatsu <iwamatsu@nigauri.org>
7  *
8  * See file CREDITS for list of people who contributed to this
9  * project.
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation; either version 2 of
14  * the License, or (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  * GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
24  * MA 02111-1307 USA
25  */
26
27 #include <common.h>
28 #include <command.h>
29 #include <asm/processor.h>
30 #include <asm/io.h>
31
32 /*
33  * Jump to P2 area.
34  * When handling TLB or caches, we need to do it from P2 area.
35  */
36 #define jump_to_P2()                    \
37   do {                                    \
38     unsigned long __dummy;              \
39     __asm__ __volatile__(                       \
40                 "mov.l  1f, %0\n\t"     \
41                 "or     %1, %0\n\t"     \
42                 "jmp    @%0\n\t"        \
43                 " nop\n\t"              \
44                 ".balign 4\n"           \
45                 "1:     .long 2f\n"     \
46                 "2:"                    \
47                 : "=&r" (__dummy)       \
48                 : "r" (0x20000000));    \
49   } while (0)
50
51 /*
52  * Back to P1 area.
53  */
54 #define back_to_P1()                                    \
55   do {                                                    \
56     unsigned long __dummy;                          \
57     __asm__ __volatile__(                           \
58                 "nop;nop;nop;nop;nop;nop;nop\n\t"       \
59                 "mov.l  1f, %0\n\t"                     \
60                 "jmp    @%0\n\t"                        \
61                 " nop\n\t"                              \
62                 ".balign 4\n"                           \
63                 "1:     .long 2f\n"                     \
64                 "2:"                                    \
65                 : "=&r" (__dummy));                     \
66   } while (0)
67
68 #define CACHE_VALID       1
69 #define CACHE_UPDATED     2
70
71 static inline void cache_wback_all(void)
72 {
73         unsigned long addr, data, i, j;
74
75         jump_to_P2();
76         for (i = 0; i < CACHE_OC_NUM_ENTRIES; i++) {
77                 for (j = 0; j < CACHE_OC_NUM_WAYS; j++) {
78                         addr = CACHE_OC_ADDRESS_ARRAY
79                                 | (j << CACHE_OC_WAY_SHIFT)
80                                 | (i << CACHE_OC_ENTRY_SHIFT);
81                         data = inl(addr);
82                         if (data & CACHE_UPDATED) {
83                                 data &= ~CACHE_UPDATED;
84                                 outl(data, addr);
85                         }
86                 }
87         }
88         back_to_P1();
89 }
90
91
92 #define CACHE_ENABLE      0
93 #define CACHE_DISABLE     1
94
95 int cache_control(unsigned int cmd)
96 {
97         unsigned long ccr;
98
99         jump_to_P2();
100         ccr = inl(CCR);
101
102         if (ccr & CCR_CACHE_ENABLE)
103                 cache_wback_all();
104
105         if (cmd == CACHE_DISABLE)
106                 outl(CCR_CACHE_STOP, CCR);
107         else
108                 outl(CCR_CACHE_INIT, CCR);
109         back_to_P1();
110
111         return 0;
112 }