]> git.kernelconcepts.de Git - karo-tx-uboot.git/blob - cpu/arm_cortexa8/s5pc1xx/cache.S
s5pc1xx: update cache routines
[karo-tx-uboot.git] / cpu / arm_cortexa8 / s5pc1xx / cache.S
1 /*
2  * Copyright (C) 2009 Samsung Electronics
3  * Minkyu Kang <mk7.kang@samsung.com>
4  *
5  * based on cpu/arm_cortexa8/omap3/cache.S
6  *
7  * See file CREDITS for list of people who contributed to this
8  * project.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License as
12  * published by the Free Software Foundation; either version 2 of
13  * the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
23  * MA 02111-1307 USA
24  */
25
26 #include <asm/arch/cpu.h>
27
28 .align 5
29 .global invalidate_dcache
30 .global l2_cache_enable
31 .global l2_cache_disable
32
33 /*
34  * invalidate_dcache()
35  * Invalidate the whole D-cache.
36  *
37  * Corrupted registers: r0-r5, r7, r9-r11
38  */
39 invalidate_dcache:
40         stmfd   r13!, {r0 - r5, r7, r9 - r12, r14}
41
42         cmp     r0, #0xC100                     @ check if the cpu is s5pc100
43
44         beq     finished_inval                  @ s5pc100 doesn't need this
45                                                 @ routine
46         mrc     p15, 1, r0, c0, c0, 1           @ read clidr
47         ands    r3, r0, #0x7000000              @ extract loc from clidr
48         mov     r3, r3, lsr #23                 @ left align loc bit field
49         beq     finished_inval                  @ if loc is 0, then no need to
50                                                 @ clean
51         mov     r10, #0                         @ start clean at cache level 0
52 inval_loop1:
53         add     r2, r10, r10, lsr #1            @ work out 3x current cache
54                                                 @ level
55         mov     r1, r0, lsr r2                  @ extract cache type bits from
56                                                 @ clidr
57         and     r1, r1, #7                      @ mask of the bits for current
58                                                 @ cache only
59         cmp     r1, #2                          @ see what cache we have at
60                                                 @ this level
61         blt     skip_inval                      @ skip if no cache, or just
62                                                 @ i-cache
63         mcr     p15, 2, r10, c0, c0, 0          @ select current cache level
64                                                 @ in cssr
65         mov     r2, #0                          @ operand for mcr SBZ
66         mcr     p15, 0, r2, c7, c5, 4           @ flush prefetch buffer to
67                                                 @ sych the new cssr&csidr,
68                                                 @ with armv7 this is 'isb',
69                                                 @ but we compile with armv5
70         mrc     p15, 1, r1, c0, c0, 0           @ read the new csidr
71         and     r2, r1, #7                      @ extract the length of the
72                                                 @ cache lines
73         add     r2, r2, #4                      @ add 4 (line length offset)
74         ldr     r4, =0x3ff
75         ands    r4, r4, r1, lsr #3              @ find maximum number on the
76                                                 @ way size
77         clz     r5, r4                          @ find bit position of way
78                                                 @ size increment
79         ldr     r7, =0x7fff
80         ands    r7, r7, r1, lsr #13             @ extract max number of the
81                                                 @ index size
82 inval_loop2:
83         mov     r9, r4                          @ create working copy of max
84                                                 @ way size
85 inval_loop3:
86         orr     r11, r10, r9, lsl r5            @ factor way and cache number
87                                                 @ into r11
88         orr     r11, r11, r7, lsl r2            @ factor index number into r11
89         mcr     p15, 0, r11, c7, c6, 2          @ invalidate by set/way
90         subs    r9, r9, #1                      @ decrement the way
91         bge     inval_loop3
92         subs    r7, r7, #1                      @ decrement the index
93         bge     inval_loop2
94 skip_inval:
95         add     r10, r10, #2                    @ increment cache number
96         cmp     r3, r10
97         bgt     inval_loop1
98 finished_inval:
99         mov     r10, #0                         @ swith back to cache level 0
100         mcr     p15, 2, r10, c0, c0, 0          @ select current cache level
101                                                 @ in cssr
102         mcr     p15, 0, r10, c7, c5, 4          @ flush prefetch buffer,
103                                                 @ with armv7 this is 'isb',
104                                                 @ but we compile with armv5
105
106         ldmfd   r13!, {r0 - r5, r7, r9 - r12, pc}
107
108 l2_cache_enable:
109         push    {r0, r1, r2, lr}
110         mrc     15, 0, r3, cr1, cr0, 1
111         orr     r3, r3, #2
112         mcr     15, 0, r3, cr1, cr0, 1
113         pop     {r1, r2, r3, pc}
114
115 l2_cache_disable:
116         push    {r0, r1, r2, lr}
117         mrc     15, 0, r3, cr1, cr0, 1
118         bic     r3, r3, #2
119         mcr     15, 0, r3, cr1, cr0, 1
120         pop     {r1, r2, r3, pc}