2 * Copyright (C) 2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Based on code by Carl van Schaik <carl@ok-labs.com>.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <asm/arch/cpu.h>
28 * SECURE_RAM to text_end :
29 * ._secure_text section
30 * text_end to ALIGN_PAGE(text_end):
32 * ALIGN_PAGE(text_end) to ALIGN_PAGE(text_end) + 0x1000)
33 * 1kB of stack per CPU (4 CPUs max).
36 .pushsection ._secure.text, "ax"
40 #define ONE_MS (CONFIG_SYS_CLK_FREQ / 1000)
41 #define TEN_MS (10 * ONE_MS)
42 #define GICD_BASE 0x1c81000
43 #define GICC_BASE 0x1c82000
45 .macro timer_wait reg, ticks
47 movw \reg, #(\ticks & 0xffff)
48 movt \reg, #(\ticks >> 16)
49 mcr p15, 0, \reg, c14, c2, 0
51 @ Enable physical timer, mask interrupt
53 mcr p15, 0, \reg, c14, c2, 1
54 @ Poll physical timer until ISTATUS is on
56 mrc p15, 0, \reg, c14, c2, 1
61 mcr p15, 0, \reg, c14, c2, 1
67 movw r4, #(GICD_BASE & 0xffff)
68 movt r4, #(GICD_BASE >> 16)
70 ldr r5, [r4, #GICD_IGROUPRn]
71 bic r5, r5, #(1 << 15) @ SGI15 as Group-0
72 str r5, [r4, #GICD_IGROUPRn]
74 mov r5, #0 @ Set SGI15 priority to 0
75 strb r5, [r4, #(GICD_IPRIORITYRn + 15)]
77 add r4, r4, #0x1000 @ GICC address
80 str r5, [r4, #GICC_PMR] @ Be cool with non-secure
82 ldr r5, [r4, #GICC_CTLR]
83 orr r5, r5, #(1 << 3) @ Switch FIQEn on
84 str r5, [r4, #GICC_CTLR]
86 mrc p15, 0, r5, c1, c1, 0 @ Read SCR
87 orr r5, r5, #4 @ Enable FIQ in monitor mode
88 bic r5, r5, #1 @ Secure mode
89 mcr p15, 0, r5, c1, c1, 0 @ Write SCR
92 mrc p15, 0, r4, c0, c0, 5 @ MPIDR
93 and r4, r4, #3 @ cpu number in cluster
94 mov r5, #0x400 @ 1kB of stack per CPU
97 adr r5, text_end @ end of text
98 add r5, r5, #0x2000 @ Skip two pages
99 lsr r5, r5, #12 @ Align to start of page
101 sub sp, r5, r4 @ here's our stack!
105 .globl psci_fiq_enter
110 mrc p15, 0, r7, c1, c1, 0
112 mcr p15, 0, r8, c1, c1, 0
115 @ Validate reason based on IAR and acknowledge
116 movw r8, #(GICC_BASE & 0xffff)
117 movt r8, #(GICC_BASE >> 16)
118 ldr r9, [r8, #GICC_IAR]
121 cmp r9, r10 @ skip spurious interrupt 1023
123 movw r10, #0x3fe @ ...and 1022
126 str r9, [r8, #GICC_EOIR] @ acknowledge the interrupt
133 movw r8, #(SUN7I_CPUCFG_BASE & 0xffff)
134 movt r8, #(SUN7I_CPUCFG_BASE >> 16)
136 @ Wait for the core to enter WFI
137 lsl r11, r9, #6 @ x64
140 1: ldr r10, [r11, #0x48]
143 timer_wait r10, ONE_MS
148 str r10, [r11, #0x40]
152 lsl r9, r10, r9 @ r9 is now CPU mask
153 ldr r10, [r8, #0x1e4]
155 str r10, [r8, #0x1e4]
158 ldr r10, [r8, #0x1b4]
160 str r10, [r8, #0x1b4]
161 timer_wait r10, ONE_MS
163 @ Activate power clamp
165 1: str r10, [r8, #0x1b0]
171 @ Restore security level
172 out: mcr p15, 0, r7, c1, c1, 0
185 movw r0, #(SUN7I_CPUCFG_BASE & 0xffff)
186 movt r0, #(SUN7I_CPUCFG_BASE >> 16)
189 and r1, r1, #3 @ only care about first cluster
193 adr r6, _sunxi_cpu_entry
194 str r6, [r0, #0x1a4] @ PRIVATE_REG (boot vector)
196 @ Assert reset on target CPU
198 lsl r5, r1, #6 @ 64 bytes per CPU
199 add r5, r5, #0x40 @ Offset from base
200 add r5, r5, r0 @ CPU control block
201 str r6, [r5] @ Reset CPU
213 @ Release power clamp
220 timer_wait r1, TEN_MS
227 @ Deassert reset on target CPU
236 mov r0, #ARM_PSCI_RET_SUCCESS @ Return PSCI_RET_SUCCESS
242 /* Imported from Linux kernel */
244 dmb @ ensure ordering with previous memory accesses
245 mrc p15, 1, r0, c0, c0, 1 @ read clidr
246 ands r3, r0, #0x7000000 @ extract loc from clidr
247 mov r3, r3, lsr #23 @ left align loc bit field
248 beq finished @ if loc is 0, then no need to clean
249 mov r10, #0 @ start clean at cache level 0
251 add r2, r10, r10, lsr #1 @ work out 3x current cache level
252 mov r1, r0, lsr r2 @ extract cache type bits from clidr
253 and r1, r1, #7 @ mask of the bits for current cache only
254 cmp r1, #2 @ see what cache we have at this level
255 blt skip @ skip if no cache, or just i-cache
256 mrs r9, cpsr @ make cssr&csidr read atomic
257 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
258 isb @ isb to sych the new cssr&csidr
259 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
261 and r2, r1, #7 @ extract the length of the cache lines
262 add r2, r2, #4 @ add 4 (line length offset)
264 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
265 clz r5, r4 @ find bit position of way size increment
267 ands r7, r7, r1, lsr #13 @ extract max number of the index size
269 mov r9, r7 @ create working copy of max index
271 orr r11, r10, r4, lsl r5 @ factor way and cache number into r11
272 orr r11, r11, r9, lsl r2 @ factor index number into r11
273 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
274 subs r9, r9, #1 @ decrement the index
276 subs r4, r4, #1 @ decrement the way
279 add r10, r10, #2 @ increment cache number
283 mov r10, #0 @ swith back to cache level 0
284 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
291 mrc p15, 0, r0, c1, c0, 1
293 mcr p15, 0, r0, c1, c0, 1
305 mrc p15, 0, r0, c1, c0, 0 @ SCTLR
306 bic r0, r0, #(1 << 2) @ Clear C bit
307 mcr p15, 0, r0, c1, c0, 0 @ SCTLR
311 bl v7_flush_dcache_all
315 mrc p15, 0, r0, c1, c0, 1 @ ACTLR
316 bic r0, r0, #(1 << 6) @ Clear SMP bit
317 mcr p15, 0, r0, c1, c0, 1 @ ACTLR
321 @ Ask CPU0 to pull the rug...
322 movw r0, #(GICD_BASE & 0xffff)
323 movt r0, #(GICD_BASE >> 16)
325 movt r1, #1 @ Target is CPU0
326 str r1, [r0, #GICD_SGIR]