2 * linux/arch/arm/boot/compressed/head.S
4 * Copyright (C) 1996-2002 Russell King
5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/linkage.h>
16 * Note that these macros must not contain any code which is not
17 * 100% relocatable. Any attempt to do so will result in a crash.
18 * Please select one of the following when turning on debugging.
22 #if defined(CONFIG_DEBUG_ICEDCC)
24 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
25 .macro loadsp, rb, tmp
28 mcr p14, 0, \ch, c0, c5, 0
30 #elif defined(CONFIG_CPU_V7)
31 .macro loadsp, rb, tmp
34 wait: mrc p14, 0, pc, c0, c1, 0
36 mcr p14, 0, \ch, c0, c5, 0
38 #elif defined(CONFIG_CPU_XSCALE)
39 .macro loadsp, rb, tmp
42 mcr p14, 0, \ch, c8, c0, 0
45 .macro loadsp, rb, tmp
48 mcr p14, 0, \ch, c1, c0, 0
54 #include <mach/debug-macro.S>
60 #if defined(CONFIG_ARCH_SA1100)
61 .macro loadsp, rb, tmp
62 mov \rb, #0x80000000 @ physical base address
63 #ifdef CONFIG_DEBUG_LL_SER3
64 add \rb, \rb, #0x00050000 @ Ser3
66 add \rb, \rb, #0x00010000 @ Ser1
69 #elif defined(CONFIG_ARCH_S3C2410)
70 .macro loadsp, rb, tmp
72 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
75 .macro loadsp, rb, tmp
93 .macro debug_reloc_start
96 kphex r6, 8 /* processor id */
98 kphex r7, 8 /* architecture id */
99 #ifdef CONFIG_CPU_CP15
101 mrc p15, 0, r0, c1, c0
102 kphex r0, 8 /* control reg */
105 kphex r5, 8 /* decompressed kernel start */
107 kphex r9, 8 /* decompressed kernel end */
109 kphex r4, 8 /* kernel execution address */
114 .macro debug_reloc_end
116 kphex r5, 8 /* end of kernel */
119 bl memdump /* dump 256 bytes at start of kernel */
123 .section ".start", #alloc, #execinstr
125 * sort out different calling conventions
128 .arm @ Always enter in ARM state
130 .type start,#function
136 THUMB( adr r12, BSYM(1f) )
139 .word 0x016f2818 @ Magic numbers to help the loader
140 .word start @ absolute load/run zImage address
141 .word _edata @ zImage end address
143 1: mov r7, r1 @ save architecture ID
144 mov r8, r2 @ save atags pointer
146 #ifndef __ARM_ARCH_2__
148 * Booting from Angel - need to enter SVC mode and disable
149 * FIQs/IRQs (numeric definitions from angel arm.h source).
150 * We only do this if we were in user mode on entry.
152 mrs r2, cpsr @ get current mode
153 tst r2, #3 @ not user?
155 mov r0, #0x17 @ angel_SWIreason_EnterSVC
156 ARM( swi 0x123456 ) @ angel_SWI_ARM
157 THUMB( svc 0xab ) @ angel_SWI_THUMB
159 mrs r2, cpsr @ turn off interrupts to
160 orr r2, r2, #0xc0 @ prevent angel from running
163 teqp pc, #0x0c000003 @ turn off interrupts
167 * Note that some cache flushing and other stuff may
168 * be needed here - is there an Angel SWI call for this?
172 * some architecture specific code can be inserted
173 * by the linker here, but it should preserve r7, r8, and r9.
178 #ifdef CONFIG_AUTO_ZRELADDR
179 @ determine final kernel image address
181 and r4, r4, #0xf8000000
182 add r4, r4, #TEXT_OFFSET
190 ldmia r0, {r1, r2, r3, r6, r9, r11, r12}
194 * We might be running at a different address. We need
195 * to fix up various pointers.
197 sub r0, r0, r1 @ calculate the delta offset
198 add r6, r6, r0 @ _edata
200 #ifndef CONFIG_ZBOOT_ROM
201 /* malloc space is above the relocated stack (64k max) */
203 add r10, sp, #0x10000
206 * With ZBOOT_ROM the bss/stack is non relocatable,
207 * but someone could still run this code from RAM,
208 * in which case our reference is _edata.
214 * Check to see if we will overwrite ourselves.
215 * r4 = final kernel address
216 * r9 = size of decompressed image
217 * r10 = end of this image, including bss/stack/malloc space if non XIP
220 * r4 + image length <= current position (pc) -> OK
231 * Relocate ourselves past the end of the decompressed kernel.
233 * r10 = end of the decompressed kernel
234 * Because we always copy ahead, we need to do it from the end and go
235 * backward in case the source and destination overlap.
238 * Bump to the next 256-byte boundary with the size of
239 * the relocation code added. This avoids overwriting
240 * ourself when the offset is small.
242 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
245 /* Get start of code we want to copy and align it down. */
249 sub r9, r6, r5 @ size to copy
250 add r9, r9, #31 @ rounded up to a multiple
251 bic r9, r9, #31 @ ... of 32 bytes
255 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
257 stmdb r9!, {r0 - r3, r10 - r12, lr}
260 /* Preserve offset to relocated code. */
263 #ifndef CONFIG_ZBOOT_ROM
264 /* cache_clean_flush may use the stack, so relocate it */
270 adr r0, BSYM(restart)
276 * If delta is zero, we are running at the address we were linked at.
280 * r4 = kernel execution address
281 * r7 = architecture ID
292 #ifndef CONFIG_ZBOOT_ROM
294 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
295 * we need to fix up pointers into the BSS region.
296 * Note that the stack pointer has already been fixed up.
302 * Relocate all entries in the GOT table.
304 1: ldr r1, [r11, #0] @ relocate entries in the GOT
305 add r1, r1, r0 @ table. This fixes up the
306 str r1, [r11], #4 @ C references.
312 * Relocate entries in the GOT table. We only relocate
313 * the entries that are outside the (relocated) BSS region.
315 1: ldr r1, [r11, #0] @ relocate entries in the GOT
316 cmp r1, r2 @ entry < bss_start ||
317 cmphs r3, r1 @ _end < entry
318 addlo r1, r1, r0 @ table. This fixes up the
319 str r1, [r11], #4 @ C references.
324 not_relocated: mov r0, #0
325 1: str r0, [r2], #4 @ clear bss
333 * The C runtime environment should now be setup sufficiently.
334 * Set up some pointers, and start decompressing.
335 * r4 = kernel execution address
336 * r7 = architecture ID
340 mov r1, sp @ malloc space above stack
341 add r2, sp, #0x10000 @ 64k max
346 mov r0, #0 @ must be zero
347 mov r1, r7 @ restore architecture number
348 mov r2, r8 @ restore atags pointer
349 mov pc, r4 @ call kernel
354 .word __bss_start @ r2
357 .word _image_size @ r9
358 .word _got_start @ r11
360 .word user_stack_end @ sp
363 #ifdef CONFIG_ARCH_RPC
365 params: ldr r0, =0x10000100 @ params_phys for RPC
372 * Turn on the cache. We need to setup some page tables so that we
373 * can have both the I and D caches on.
375 * We place the page tables 16k down from the kernel execution address,
376 * and we hope that nothing else is using it. If we're using it, we
380 * r4 = kernel execution address
381 * r7 = architecture number
384 * r0, r1, r2, r3, r9, r10, r12 corrupted
385 * This routine must preserve:
389 cache_on: mov r3, #8 @ cache_on function
393 * Initialize the highest priority protection region, PR7
394 * to cover all 32bit address and cacheable and bufferable.
396 __armv4_mpu_cache_on:
397 mov r0, #0x3f @ 4G, the whole
398 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
399 mcr p15, 0, r0, c6, c7, 1
402 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
403 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
404 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
407 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
408 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
411 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
412 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
413 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
414 mrc p15, 0, r0, c1, c0, 0 @ read control reg
415 @ ...I .... ..D. WC.M
416 orr r0, r0, #0x002d @ .... .... ..1. 11.1
417 orr r0, r0, #0x1000 @ ...1 .... .... ....
419 mcr p15, 0, r0, c1, c0, 0 @ write control reg
422 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
423 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
426 __armv3_mpu_cache_on:
427 mov r0, #0x3f @ 4G, the whole
428 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
431 mcr p15, 0, r0, c2, c0, 0 @ cache on
432 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
435 mcr p15, 0, r0, c5, c0, 0 @ access permission
438 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
440 * ?? ARMv3 MMU does not allow reading the control register,
441 * does this really work on ARMv3 MPU?
443 mrc p15, 0, r0, c1, c0, 0 @ read control reg
444 @ .... .... .... WC.M
445 orr r0, r0, #0x000d @ .... .... .... 11.1
446 /* ?? this overwrites the value constructed above? */
448 mcr p15, 0, r0, c1, c0, 0 @ write control reg
450 /* ?? invalidate for the second time? */
451 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
454 __setup_mmu: sub r3, r4, #16384 @ Page directory size
455 bic r3, r3, #0xff @ Align the pointer
458 * Initialise the page tables, turning on the cacheable and bufferable
459 * bits for the RAM area only.
463 mov r9, r9, lsl #18 @ start of RAM
464 add r10, r9, #0x10000000 @ a reasonable RAM size
468 1: cmp r1, r9 @ if virt > start of RAM
469 orrhs r1, r1, #0x0c @ set cacheable, bufferable
470 cmp r1, r10 @ if virt > end of RAM
471 bichs r1, r1, #0x0c @ clear cacheable, bufferable
472 str r1, [r0], #4 @ 1:1 mapping
477 * If ever we are running from Flash, then we surely want the cache
478 * to be enabled also for our execution instance... We map 2MB of it
479 * so there is no map overlap problem for up to 1 MB compressed kernel.
480 * If the execution is in RAM then we would only be duplicating the above.
486 orr r1, r1, r2, lsl #20
487 add r0, r3, r2, lsl #2
494 __armv4_mmu_cache_on:
499 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
500 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
501 mrc p15, 0, r0, c1, c0, 0 @ read control reg
502 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
504 #ifdef CONFIG_CPU_ENDIAN_BE8
505 orr r0, r0, #1 << 25 @ big-endian page tables
507 bl __common_mmu_cache_on
509 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
513 __armv7_mmu_cache_on:
516 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
520 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
522 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
524 mrc p15, 0, r0, c1, c0, 0 @ read control reg
525 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
526 orr r0, r0, #0x003c @ write buffer
528 #ifdef CONFIG_CPU_ENDIAN_BE8
529 orr r0, r0, #1 << 25 @ big-endian page tables
531 orrne r0, r0, #1 @ MMU enabled
533 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
534 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
536 mcr p15, 0, r0, c1, c0, 0 @ load control register
537 mrc p15, 0, r0, c1, c0, 0 @ and read it back
539 mcr p15, 0, r0, c7, c5, 4 @ ISB
546 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
547 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
548 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
549 mrc p15, 0, r0, c1, c0, 0 @ read control reg
550 orr r0, r0, #0x1000 @ I-cache enable
551 bl __common_mmu_cache_on
553 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
560 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
561 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
563 bl __common_mmu_cache_on
565 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
568 __common_mmu_cache_on:
569 #ifndef CONFIG_THUMB2_KERNEL
571 orr r0, r0, #0x000d @ Write buffer, mmu
574 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
575 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
577 .align 5 @ cache line aligned
578 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
579 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
580 sub pc, lr, r0, lsr #32 @ properly flush pipeline
584 * Here follow the relocatable cache support functions for the
585 * various processors. This is a generic hook for locating an
586 * entry and jumping to an instruction at the specified offset
587 * from the start of the block. Please note this is all position
597 call_cache_fn: adr r12, proc_types
598 #ifdef CONFIG_CPU_CP15
599 mrc p15, 0, r9, c0, c0 @ get processor ID
601 ldr r9, =CONFIG_PROCESSOR_ID
603 1: ldr r1, [r12, #0] @ get value
604 ldr r2, [r12, #4] @ get mask
605 eor r1, r1, r9 @ (real ^ match)
607 ARM( addeq pc, r12, r3 ) @ call cache function
608 THUMB( addeq r12, r3 )
609 THUMB( moveq pc, r12 ) @ call cache function
614 * Table for cache operations. This is basically:
617 * - 'cache on' method instruction
618 * - 'cache off' method instruction
619 * - 'cache flush' method instruction
621 * We match an entry using: ((real_id ^ match) & mask) == 0
623 * Writethrough caches generally only need 'on' and 'off'
624 * methods. Writeback caches _must_ have the flush method
628 .type proc_types,#object
630 .word 0x41560600 @ ARM6/610
632 W(b) __arm6_mmu_cache_off @ works, but slow
633 W(b) __arm6_mmu_cache_off
636 @ b __arm6_mmu_cache_on @ untested
637 @ b __arm6_mmu_cache_off
638 @ b __armv3_mmu_cache_flush
640 .word 0x00000000 @ old ARM ID
649 .word 0x41007000 @ ARM7/710
651 W(b) __arm7_mmu_cache_off
652 W(b) __arm7_mmu_cache_off
656 .word 0x41807200 @ ARM720T (writethrough)
658 W(b) __armv4_mmu_cache_on
659 W(b) __armv4_mmu_cache_off
663 .word 0x41007400 @ ARM74x
665 W(b) __armv3_mpu_cache_on
666 W(b) __armv3_mpu_cache_off
667 W(b) __armv3_mpu_cache_flush
669 .word 0x41009400 @ ARM94x
671 W(b) __armv4_mpu_cache_on
672 W(b) __armv4_mpu_cache_off
673 W(b) __armv4_mpu_cache_flush
675 .word 0x00007000 @ ARM7 IDs
684 @ Everything from here on will be the new ID system.
686 .word 0x4401a100 @ sa110 / sa1100
688 W(b) __armv4_mmu_cache_on
689 W(b) __armv4_mmu_cache_off
690 W(b) __armv4_mmu_cache_flush
692 .word 0x6901b110 @ sa1110
694 W(b) __armv4_mmu_cache_on
695 W(b) __armv4_mmu_cache_off
696 W(b) __armv4_mmu_cache_flush
699 .word 0xffffff00 @ PXA9xx
700 W(b) __armv4_mmu_cache_on
701 W(b) __armv4_mmu_cache_off
702 W(b) __armv4_mmu_cache_flush
704 .word 0x56158000 @ PXA168
706 W(b) __armv4_mmu_cache_on
707 W(b) __armv4_mmu_cache_off
708 W(b) __armv5tej_mmu_cache_flush
710 .word 0x56050000 @ Feroceon
712 W(b) __armv4_mmu_cache_on
713 W(b) __armv4_mmu_cache_off
714 W(b) __armv5tej_mmu_cache_flush
716 #ifdef CONFIG_CPU_FEROCEON_OLD_ID
717 /* this conflicts with the standard ARMv5TE entry */
718 .long 0x41009260 @ Old Feroceon
720 b __armv4_mmu_cache_on
721 b __armv4_mmu_cache_off
722 b __armv5tej_mmu_cache_flush
725 .word 0x66015261 @ FA526
727 W(b) __fa526_cache_on
728 W(b) __armv4_mmu_cache_off
729 W(b) __fa526_cache_flush
731 @ These match on the architecture ID
733 .word 0x00020000 @ ARMv4T
735 W(b) __armv4_mmu_cache_on
736 W(b) __armv4_mmu_cache_off
737 W(b) __armv4_mmu_cache_flush
739 .word 0x00050000 @ ARMv5TE
741 W(b) __armv4_mmu_cache_on
742 W(b) __armv4_mmu_cache_off
743 W(b) __armv4_mmu_cache_flush
745 .word 0x00060000 @ ARMv5TEJ
747 W(b) __armv4_mmu_cache_on
748 W(b) __armv4_mmu_cache_off
749 W(b) __armv5tej_mmu_cache_flush
751 .word 0x0007b000 @ ARMv6
753 W(b) __armv4_mmu_cache_on
754 W(b) __armv4_mmu_cache_off
755 W(b) __armv6_mmu_cache_flush
757 .word 0x560f5810 @ Marvell PJ4 ARMv6
759 W(b) __armv4_mmu_cache_on
760 W(b) __armv4_mmu_cache_off
761 W(b) __armv6_mmu_cache_flush
763 .word 0x000f0000 @ new CPU Id
765 W(b) __armv7_mmu_cache_on
766 W(b) __armv7_mmu_cache_off
767 W(b) __armv7_mmu_cache_flush
769 .word 0 @ unrecognised type
778 .size proc_types, . - proc_types
781 * Turn off the Cache and MMU. ARMv3 does not support
782 * reading the control register, but ARMv4 does.
785 * r0, r1, r2, r3, r9, r12 corrupted
786 * This routine must preserve:
790 cache_off: mov r3, #12 @ cache_off function
793 __armv4_mpu_cache_off:
794 mrc p15, 0, r0, c1, c0
796 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
798 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
799 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
800 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
803 __armv3_mpu_cache_off:
804 mrc p15, 0, r0, c1, c0
806 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
808 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
811 __armv4_mmu_cache_off:
813 mrc p15, 0, r0, c1, c0
815 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
817 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
818 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
822 __armv7_mmu_cache_off:
823 mrc p15, 0, r0, c1, c0
829 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
831 bl __armv7_mmu_cache_flush
834 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
836 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
837 mcr p15, 0, r0, c7, c10, 4 @ DSB
838 mcr p15, 0, r0, c7, c5, 4 @ ISB
841 __arm6_mmu_cache_off:
842 mov r0, #0x00000030 @ ARM6 control reg.
843 b __armv3_mmu_cache_off
845 __arm7_mmu_cache_off:
846 mov r0, #0x00000070 @ ARM7 control reg.
847 b __armv3_mmu_cache_off
849 __armv3_mmu_cache_off:
850 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
852 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
853 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
857 * Clean and flush the cache to maintain consistency.
860 * r1, r2, r3, r9, r10, r11, r12 corrupted
861 * This routine must preserve:
869 __armv4_mpu_cache_flush:
872 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
873 mov r1, #7 << 5 @ 8 segments
874 1: orr r3, r1, #63 << 26 @ 64 entries
875 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
876 subs r3, r3, #1 << 26
877 bcs 2b @ entries 63 to 0
879 bcs 1b @ segments 7 to 0
882 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
883 mcr p15, 0, ip, c7, c10, 4 @ drain WB
888 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
889 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
890 mcr p15, 0, r1, c7, c10, 4 @ drain WB
893 __armv6_mmu_cache_flush:
895 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
896 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
897 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
898 mcr p15, 0, r1, c7, c10, 4 @ drain WB
901 __armv7_mmu_cache_flush:
902 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
903 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
906 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
909 mcr p15, 0, r10, c7, c10, 5 @ DMB
910 stmfd sp!, {r0-r7, r9-r11}
911 mrc p15, 1, r0, c0, c0, 1 @ read clidr
912 ands r3, r0, #0x7000000 @ extract loc from clidr
913 mov r3, r3, lsr #23 @ left align loc bit field
914 beq finished @ if loc is 0, then no need to clean
915 mov r10, #0 @ start clean at cache level 0
917 add r2, r10, r10, lsr #1 @ work out 3x current cache level
918 mov r1, r0, lsr r2 @ extract cache type bits from clidr
919 and r1, r1, #7 @ mask of the bits for current cache only
920 cmp r1, #2 @ see what cache we have at this level
921 blt skip @ skip if no cache, or just i-cache
922 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
923 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
924 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
925 and r2, r1, #7 @ extract the length of the cache lines
926 add r2, r2, #4 @ add 4 (line length offset)
928 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
929 clz r5, r4 @ find bit position of way size increment
931 ands r7, r7, r1, lsr #13 @ extract max number of the index size
933 mov r9, r4 @ create working copy of max way size
935 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
936 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
937 THUMB( lsl r6, r9, r5 )
938 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
939 THUMB( lsl r6, r7, r2 )
940 THUMB( orr r11, r11, r6 ) @ factor index number into r11
941 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
942 subs r9, r9, #1 @ decrement the way
944 subs r7, r7, #1 @ decrement the index
947 add r10, r10, #2 @ increment cache number
951 ldmfd sp!, {r0-r7, r9-r11}
952 mov r10, #0 @ swith back to cache level 0
953 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
955 mcr p15, 0, r10, c7, c10, 4 @ DSB
956 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
957 mcr p15, 0, r10, c7, c10, 4 @ DSB
958 mcr p15, 0, r10, c7, c5, 4 @ ISB
961 __armv5tej_mmu_cache_flush:
962 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
964 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
965 mcr p15, 0, r0, c7, c10, 4 @ drain WB
968 __armv4_mmu_cache_flush:
969 mov r2, #64*1024 @ default: 32K dcache size (*2)
970 mov r11, #32 @ default: 32 byte line size
971 mrc p15, 0, r3, c0, c0, 1 @ read cache type
972 teq r3, r9 @ cache ID register present?
977 mov r2, r2, lsl r1 @ base dcache size *2
978 tst r3, #1 << 14 @ test M bit
979 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
983 mov r11, r11, lsl r3 @ cache line size in bytes
986 bic r1, r1, #63 @ align to longest cache line
989 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
990 THUMB( ldr r3, [r1] ) @ s/w flush D cache
991 THUMB( add r1, r1, r11 )
995 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
996 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
997 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1000 __armv3_mmu_cache_flush:
1001 __armv3_mpu_cache_flush:
1003 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1007 * Various debugging routines for printing hex characters and
1008 * memory, which again must be relocatable.
1012 .type phexbuf,#object
1014 .size phexbuf, . - phexbuf
1016 @ phex corrupts {r0, r1, r2, r3}
1017 phex: adr r3, phexbuf
1031 @ puts corrupts {r0, r1, r2, r3}
1033 1: ldrb r2, [r0], #1
1046 @ putc corrupts {r0, r1, r2, r3}
1053 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1054 memdump: mov r12, r0
1057 2: mov r0, r11, lsl #2
1065 ldr r0, [r12, r11, lsl #2]
1087 .section ".stack", "aw", %nobits
1088 user_stack: .space 4096