]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'cortex/for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Wed, 14 Nov 2012 04:25:02 +0000 (15:25 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Wed, 14 Nov 2012 04:25:02 +0000 (15:25 +1100)
25 files changed:
arch/arm/include/asm/assembler.h
arch/arm/include/asm/cp15.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/glue-cache.h
arch/arm/include/asm/glue-df.h
arch/arm/include/asm/glue-proc.h
arch/arm/include/asm/irqflags.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/ptrace.h
arch/arm/include/asm/system_info.h
arch/arm/include/uapi/asm/ptrace.h
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/entry-v7m.S [new file with mode: 0644]
arch/arm/kernel/head-common.S
arch/arm/kernel/head-nommu.S
arch/arm/kernel/process.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/setup.c
arch/arm/kernel/traps.c
arch/arm/mm/alignment.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-v7m.S [new file with mode: 0644]

index 2ef95813fce00b887dbd05020b35675870a7c929..ab7c02c92ed352a9fb31e3c8c7c2bf06dc3d6d38 100644 (file)
  * assumes FIQs are enabled, and that the processor is in SVC mode.
  */
        .macro  save_and_disable_irqs, oldcpsr
+#ifdef CONFIG_CPU_V7M
+       mrs     \oldcpsr, primask
+#else
        mrs     \oldcpsr, cpsr
+#endif
        disable_irq
        .endm
 
  * guarantee that this will preserve the flags.
  */
        .macro  restore_irqs_notrace, oldcpsr
+#ifdef CONFIG_CPU_V7M
+       msr     primask, \oldcpsr
+#else
        msr     cpsr_c, \oldcpsr
+#endif
        .endm
 
        .macro restore_irqs, oldcpsr
 #endif
        .endm
 
-#ifdef CONFIG_THUMB2_KERNEL
+#if defined(CONFIG_CPU_V7M)
+       .macro  setmode, mode, reg
+       .endm
+#elif defined(CONFIG_THUMB2_KERNEL)
        .macro  setmode, mode, reg
        mov     \reg, #\mode
        msr     cpsr_c, \reg
index 5ef4d8015a6043432bbc86f8905e0d635deb1f23..d81443557617e1032f85a4a542a6153d2d97b320 100644 (file)
@@ -42,6 +42,8 @@
 #define vectors_high() (0)
 #endif
 
+#ifdef CONFIG_CPU_CP15
+
 extern unsigned long cr_no_alignment;  /* defined in entry-armv.S */
 extern unsigned long cr_alignment;     /* defined in entry-armv.S */
 
@@ -82,6 +84,13 @@ static inline void set_copro_access(unsigned int val)
        isb();
 }
 
-#endif
+#else /* ifdef CONFIG_CPU_CP15 */
+
+#define cr_no_alignment        UL(0)
+#define cr_alignment   UL(0)
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
+#endif /* ifndef __ASSEMBLY__ */
 
 #endif
index cb47d28cbe1f81c4cfb43de4d1f08dbcec9f0cf7..5bd8cb6d93ffc7fd531569b970626a9626bab055 100644 (file)
@@ -46,6 +46,9 @@ extern unsigned int processor_id;
                    : "cc");                                            \
                __val;                                                  \
        })
+#elif defined(CONFIG_CPU_V7M)
+#define read_cpuid(reg) (*(unsigned int *)0xe000ed00)
+#define read_cpuid_ext(reg) 0
 #else
 #define read_cpuid(reg) (processor_id)
 #define read_cpuid_ext(reg) 0
index cca9f15704ed82bb2726ce8898611be42d28b5ff..ea986586068a5e003c112e91d71b45dc6ab9fa00 100644 (file)
 # endif
 #endif
 
+#if defined(CONFIG_CPU_V7M)
+# ifdef _CACHE
+#  error "Multi-cache not supported on ARMv7-M"
+# else
+#  define _CACHE nop
+# endif
+#endif
+
 #if !defined(_CACHE) && !defined(MULTI_CACHE)
 #error Unknown cache maintenance model
 #endif
 
+#ifndef __ASSEMBLER__
+static inline void nop_flush_icache_all(void) { }
+static inline void nop_flush_kern_cache_all(void) { }
+static inline void nop_flush_kern_cache_louis(void) { }
+static inline void nop_flush_user_cache_all(void) { }
+static inline void nop_flush_user_cache_range(unsigned long a, unsigned long b, unsigned int c) { }
+
+static inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { }
+static inline int nop_coherent_user_range(unsigned long a, unsigned long b) { return 0; }
+static inline void nop_flush_kern_dcache_area(void *a, size_t s) { }
+
+static inline void nop_dma_flush_range(const void *a, const void *b) { }
+
+static inline void nop_dma_map_area(const void *s, size_t l, int f) { }
+static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
+#endif
+
 #ifndef MULTI_CACHE
 #define __cpuc_flush_icache_all                __glue(_CACHE,_flush_icache_all)
 #define __cpuc_flush_kern_all          __glue(_CACHE,_flush_kern_cache_all)
index 8cacbcda76da986d6cf0c95d6b604eb50c574b0c..1f2339cdac0b5134b57a918e9ad2164ce891e479 100644 (file)
 # endif
 #endif
 
+#ifdef CONFIG_CPU_ABRT_NOMMU
+# ifdef CPU_DABORT_HANDLER
+#  define MULTI_DABORT 1
+# else
+#  define CPU_DABORT_HANDLER nommu_early_abort
+# endif
+#endif
+
 #ifndef CPU_DABORT_HANDLER
 #error Unknown data abort handler type
 #endif
index ac1dd54724b6a073415cc6a6704284c81556d6da..f2f39bcf7945a29de90f59d6afdd79e67bd06f43 100644 (file)
 # endif
 #endif
 
+#ifdef CONFIG_CPU_V7M
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_v7m
+# endif
+#endif
+
 #ifndef MULTI_CPU
 #define cpu_proc_init                  __glue(CPU_NAME,_proc_init)
 #define cpu_proc_fin                   __glue(CPU_NAME,_proc_fin)
index 1e6cca55c750486b98ef85ca5a04c9af471d64ea..3b763d6652a0aee4ba7a81bc591715c2084d1b10 100644 (file)
@@ -8,6 +8,16 @@
 /*
  * CPU interrupt mask handling.
  */
+#ifdef CONFIG_CPU_V7M
+#define IRQMASK_REG_NAME_R "primask"
+#define IRQMASK_REG_NAME_W "primask"
+#define IRQMASK_I_BIT  1
+#else
+#define IRQMASK_REG_NAME_R "cpsr"
+#define IRQMASK_REG_NAME_W "cpsr_c"
+#define IRQMASK_I_BIT  PSR_I_BIT
+#endif
+
 #if __LINUX_ARM_ARCH__ >= 6
 
 static inline unsigned long arch_local_irq_save(void)
@@ -15,7 +25,7 @@ static inline unsigned long arch_local_irq_save(void)
        unsigned long flags;
 
        asm volatile(
-               "       mrs     %0, cpsr        @ arch_local_irq_save\n"
+               "       mrs     %0, " IRQMASK_REG_NAME_R "      @ arch_local_irq_save\n"
                "       cpsid   i"
                : "=r" (flags) : : "memory", "cc");
        return flags;
@@ -129,7 +139,7 @@ static inline unsigned long arch_local_save_flags(void)
 {
        unsigned long flags;
        asm volatile(
-               "       mrs     %0, cpsr        @ local_save_flags"
+               "       mrs     %0, " IRQMASK_REG_NAME_R "      @ local_save_flags"
                : "=r" (flags) : : "memory", "cc");
        return flags;
 }
@@ -140,7 +150,7 @@ static inline unsigned long arch_local_save_flags(void)
 static inline void arch_local_irq_restore(unsigned long flags)
 {
        asm volatile(
-               "       msr     cpsr_c, %0      @ local_irq_restore"
+               "       msr     " IRQMASK_REG_NAME_W ", %0      @ local_irq_restore"
                :
                : "r" (flags)
                : "memory", "cc");
@@ -148,8 +158,8 @@ static inline void arch_local_irq_restore(unsigned long flags)
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-       return flags & PSR_I_BIT;
+       return flags & IRQMASK_I_BIT;
 }
 
-#endif
-#endif
+#endif /* ifdef __KERNEL__ */
+#endif /* ifndef __ASM_ARM_IRQFLAGS_H */
index 06e7d509eaac218864cc9d089ce9e6f4177c22f9..5e61b88867963d65f8aad66b7b80aca784827fce 100644 (file)
@@ -49,7 +49,14 @@ struct thread_struct {
 #ifdef CONFIG_MMU
 #define nommu_start_thread(regs) do { } while (0)
 #else
+#ifndef CONFIG_CPU_V7M
 #define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data
+#else
+#define nommu_start_thread(regs) do {                                  \
+       regs->ARM_r10 = current->mm->start_data;                        \
+       regs->ARM_EXC_RET = 0xfffffffdL;                                \
+} while (0)
+#endif
 #endif
 
 #define start_thread(regs,pc,sp)                                       \
index 3d52ee1bfb3113e113f231807dba71bc739ceac0..67661e8003ee2684f0a62e914cdd7b65b57912fe 100644 (file)
 
 #ifndef __ASSEMBLY__
 struct pt_regs {
+#ifdef CONFIG_CPU_V7M
+       unsigned long uregs[20];
+#else
        unsigned long uregs[18];
+#endif
 };
 
 #define user_mode(regs)        \
@@ -45,6 +49,7 @@ struct pt_regs {
  */
 static inline int valid_user_regs(struct pt_regs *regs)
 {
+#ifndef CONFIG_CPU_V7M
        unsigned long mode = regs->ARM_cpsr & MODE_MASK;
 
        /*
@@ -67,6 +72,9 @@ static inline int valid_user_regs(struct pt_regs *regs)
                regs->ARM_cpsr |= USR_MODE;
 
        return 0;
+#else /* ifndef CONFIG_CPU_V7M */
+       return 1;
+#endif
 }
 
 static inline long regs_return_value(struct pt_regs *regs)
index dfd386d0c022189ad0b9574c21db22039122d405..720ea0320a6d58a28189911ed714a4e3262569e2 100644 (file)
@@ -11,6 +11,7 @@
 #define CPU_ARCH_ARMv5TEJ      7
 #define CPU_ARCH_ARMv6         8
 #define CPU_ARCH_ARMv7         9
+#define CPU_ARCH_ARMv7M                10
 
 #ifndef __ASSEMBLY__
 
index 96ee0929790f5d5ad2156f2262c9efb809f7cc3e..2ae7d1be24670ae3e16836163b0b6b6aa5e5a2b4 100644 (file)
 
 /*
  * PSR bits
+ * Note on V7M there is no mode contained in the PSR
  */
 #define USR26_MODE     0x00000000
 #define FIQ26_MODE     0x00000001
 #define IRQ26_MODE     0x00000002
 #define SVC26_MODE     0x00000003
+#if defined(__KERNEL__) && defined(CONFIG_CPU_V7M)
+/*
+ * Use 0 here to get code right that creates a userspace
+ * or kernel space thread.
+ */
+#define USR_MODE       0x00000000
+#define SVC_MODE       0x00000000
+#else
 #define USR_MODE       0x00000010
+#define SVC_MODE       0x00000013
+#endif
 #define FIQ_MODE       0x00000011
 #define IRQ_MODE       0x00000012
-#define SVC_MODE       0x00000013
 #define ABT_MODE       0x00000017
 #define HYP_MODE       0x0000001a
 #define UND_MODE       0x0000001b
 #define SYSTEM_MODE    0x0000001f
 #define MODE32_BIT     0x00000010
 #define MODE_MASK      0x0000001f
-#define PSR_T_BIT      0x00000020
-#define PSR_F_BIT      0x00000040
-#define PSR_I_BIT      0x00000080
-#define PSR_A_BIT      0x00000100
-#define PSR_E_BIT      0x00000200
-#define PSR_J_BIT      0x01000000
-#define PSR_Q_BIT      0x08000000
+
+#define V4_PSR_T_BIT   0x00000020      /* >= V4T, but not V7M */
+#define V7M_PSR_T_BIT  0x01000000
+#if defined(__KERNEL__) && defined(CONFIG_CPU_V7M)
+#define PSR_T_BIT      V7M_PSR_T_BIT
+#else
+/* for compatibility */
+#define PSR_T_BIT      V4_PSR_T_BIT
+#endif
+
+#define PSR_F_BIT      0x00000040      /* >= V4, but not V7M */
+#define PSR_I_BIT      0x00000080      /* >= V4, but not V7M */
+#define PSR_A_BIT      0x00000100      /* >= V6, but not V7M */
+#define PSR_E_BIT      0x00000200      /* >= V6, but not V7M */
+#define PSR_J_BIT      0x01000000      /* >= V5J, but not V7M */
+#define PSR_Q_BIT      0x08000000      /* >= V5E, including V7M */
 #define PSR_V_BIT      0x10000000
 #define PSR_C_BIT      0x20000000
 #define PSR_Z_BIT      0x40000000
@@ -125,6 +144,7 @@ struct pt_regs {
 #define ARM_r1         uregs[1]
 #define ARM_r0         uregs[0]
 #define ARM_ORIG_r0    uregs[17]
+#define ARM_EXC_RET    uregs[18]
 
 /*
  * The size of the user-visible VFP state as seen by PTRACE_GET/SETVFPREGS
index c985b481192c2643e938425a57e3e6fc9866ba90..5fe9ace8a6683bdb75d3360201fcc2aab0d29ea4 100644 (file)
@@ -93,6 +93,9 @@ int main(void)
   DEFINE(S_PC,                 offsetof(struct pt_regs, ARM_pc));
   DEFINE(S_PSR,                        offsetof(struct pt_regs, ARM_cpsr));
   DEFINE(S_OLD_R0,             offsetof(struct pt_regs, ARM_ORIG_r0));
+#ifdef CONFIG_CPU_V7M
+  DEFINE(S_EXC_RET,            offsetof(struct pt_regs, ARM_EXC_RET));
+#endif
   DEFINE(S_FRAME_SIZE,         sizeof(struct pt_regs));
   BLANK();
 #ifdef CONFIG_CACHE_L2X0
index 34711757ba59a958592d31ea527170c1356c282c..48d8dc033871fc3858366d446163780e5a9e74e6 100644 (file)
@@ -339,6 +339,9 @@ ENDPROC(ftrace_stub)
 
        .align  5
 ENTRY(vector_swi)
+#ifdef CONFIG_CPU_V7M
+       v7m_exception_entry
+#else
        sub     sp, sp, #S_FRAME_SIZE
        stmia   sp, {r0 - r12}                  @ Calling r0 - r12
  ARM(  add     r8, sp, #S_PC           )
@@ -349,6 +352,7 @@ ENTRY(vector_swi)
        str     lr, [sp, #S_PC]                 @ Save calling PC
        str     r8, [sp, #S_PSR]                @ Save CPSR
        str     r0, [sp, #S_OLD_R0]             @ Save OLD_R0
+#endif
        zero_fp
 
        /*
index 9a8531eadd3da437ccadd6f5b60eac6a16808607..33d9900d04ee13e06d13ee5c3e4094d39704bb58 100644 (file)
 #endif
        .endm
 
+#ifdef CONFIG_CPU_V7M
+/*
+ * ARMv7-M exception entry/exit macros.
+ *
+ * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
+ * automatically saved on the current stack (32 words) before
+ * switching to the exception stack (SP_main).
+ *
+ * If exception is taken while in user mode, SP_main is
+ * empty. Otherwise, SP_main is aligned to 64 bit automatically
+ * (CCR.STKALIGN set).
+ *
+ * Linux assumes that the interrupts are disabled when entering an
+ * exception handler and it may BUG if this is not the case. Interrupts
+ * are disabled during entry and reenabled in the exit macro.
+ *
+ * v7m_exception_fast_exit is used when returning from interrupts.
+ *
+ * v7m_exception_slow_exit is used when returning from SVC or PendSV.
+ * When returning to kernel mode, we don't return from exception.
+ */
+       .macro  v7m_exception_entry
+       @ determine the location of the registers saved by the core during
+       @ exception entry. Depending on the mode the cpu was in when the
+       @ exception happend that is either on the main or the process stack.
+       @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
+       @ was used.
+       tst     lr, #0x4
+       mrsne   r12, psp
+       moveq   r12, sp
+
+       @ we cannot rely on r0-r3 and r12 matching the value saved in the
+       @ exception frame because of tail-chaining. So these have to be
+       @ reloaded.
+       ldmia   r12!, {r0-r3}
+
+       @ Linux expects to have irqs off. Do it here before taking stack space
+       cpsid   i
+
+       sub     sp, #S_FRAME_SIZE-S_IP
+       stmdb   sp!, {r0-r11}
+
+       @ load saved r12, lr, return address and xPSR.
+       @ r0-r7 are used for signals and never touched from now on. Clobbering
+       @ r8-r12 is OK.
+       mov     r9, r12
+       ldmia   r9!, {r8, r10-r12}
+
+       @ calculate the original stack pointer value.
+       @ r9 currently points to the memory location just above the auto saved
+       @ xPSR. If the FP extension is implemented and bit 4 of EXC_RETURN is 0
+       @ then space was allocated for FP state. That is space for 18 32-bit
+       @ values. (If FP extension is unimplemented, bit 4 is 1.)
+       @ Additionally the cpu might automatically 8-byte align the stack. Bit 9
+       @ of the saved xPSR specifies if stack aligning took place. In this case
+       @ another 32-bit value is included in the stack.
+
+       tst     lr, #0x10
+       addeq   r9, r9, #576
+
+       tst     r12, 0x100
+       addne   r9, r9, #4
+
+       @ store saved r12 using str to have a register to hold the base for stm
+       str     r8, [sp, #S_IP]
+       add     r8, sp, #S_SP
+       @ store r13-r15, xPSR
+       stmia   r8!, {r9-r12}
+       @ store r0 once more and EXC_RETURN
+       stmia   r8, {r0, lr}
+       .endm
+
+       .macro  v7m_exception_fast_exit
+       @ registers r0-r3 and r12 are automatically restored on exception
+       @ return. r4-r7 were not clobbered in v7m_exception_entry so for
+       @ correctness they don't need to be restored. So only r8-r11 must be
+       @ restored here. The easiest way to do so is to restore r0-r7, too.
+       ldmia   sp!, {r0-r11}
+       add     sp, #S_FRAME_SIZE-S_IP
+       cpsie   i
+       bx      lr
+       .endm
+
+       .macro  v7m_exception_slow_exit ret_r0
+       cpsid   i
+       ldr     lr, [sp, #S_EXC_RET]    @ read exception LR
+       tst     lr, #0x8
+       bne     1f                      @ go to thread mode using exception return
+
+       /*
+        * return to kernel thread
+        * sp is already set up (and might be unset in pt_regs), so only
+        * restore r0-r12 and pc
+        */
+       ldmia   sp, {r0-r12}
+       ldr     lr, [sp, #S_PC]
+       add     sp, sp, #S_FRAME_SIZE
+       cpsie   i
+       bx      lr
+
+1:     /*
+        * return to userspace
+        */
+
+       @ read original r12, sp, lr, pc and xPSR
+       add     r12, sp, #S_IP
+       ldmia   r12, {r1-r5}
+
+       @ handle stack aligning
+       tst     r5, #0x100
+       subne   r2, r2, #4
+
+       @ skip over stack space for fp saving
+       tst     lr, #0x10
+       subeq   r2, r2, #576
+
+       @ write basic exception frame
+       stmdb   r2!, {r1, r3-r5}
+       ldmia   sp, {r1, r3-r5}
+       .if     \ret_r0
+       stmdb   r2!, {r0, r3-r5}
+       .else
+       stmdb   r2!, {r1, r3-r5}
+       .endif
+
+       @ restore process sp
+       msr     psp, r2
+
+       @ restore original r4-r11
+       ldmia   sp!, {r0-r11}
+
+       @ restore main sp
+       add     sp, sp, #S_FRAME_SIZE-S_IP
+
+       cpsie   i
+       bx      lr
+       .endm
+#endif /* CONFIG_CPU_V7M */
+
        @
        @ Store/load the USER SP and LR registers by switching to the SYS
        @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
        rfeia   sp!
        .endm
 
+#ifdef CONFIG_CPU_V7M
+       .macro  restore_user_regs, fast = 0, offset = 0
+       .if     \offset
+       add     sp, #\offset
+       .endif
+       v7m_exception_slow_exit ret_r0 = \fast
+       .endm
+#else  /* !CONFIG_CPU_V7M */
        .macro  restore_user_regs, fast = 0, offset = 0
        clrex                                   @ clear the exclusive monitor
        mov     r2, sp
        add     sp, sp, #S_FRAME_SIZE - S_SP
        movs    pc, lr                          @ return & move spsr_svc into cpsr
        .endm
+#endif /* CONFIG_CPU_V7M */
 
        .macro  get_thread_info, rd
        mov     \rd, sp
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
new file mode 100644 (file)
index 0000000..a0991dc
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * linux/arch/arm/kernel/entry-v7m.S
+ *
+ * Copyright (C) 2008 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Low-level vector interface routines for the ARMv7-M architecture
+ */
+#include <asm/memory.h>
+#include <asm/glue.h>
+#include <asm/thread_notify.h>
+
+#include <mach/entry-macro.S>
+
+#include "entry-header.S"
+
+#ifdef CONFIG_PREEMPT
+#error "CONFIG_PREEMPT not supported on the current ARMv7M implementation"
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
+#endif
+
+__invalid_entry:
+       v7m_exception_entry
+       adr     r0, strerr
+       mrs     r1, ipsr
+       mov     r2, lr
+       bl      printk
+       mov     r0, sp
+       bl      show_regs
+1:     b       1b
+ENDPROC(__invalid_entry)
+
+strerr:        .asciz  "\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
+
+       .align  2
+__irq_entry:
+       v7m_exception_entry
+
+       @
+       @ Invoke the IRQ handler
+       @
+       mrs     r0, ipsr
+       and     r0, #0xff
+       sub     r0, #16                 @ IRQ number
+       mov     r1, sp
+       @ routine called with r0 = irq number, r1 = struct pt_regs *
+       bl      asm_do_IRQ
+
+       @
+       @ Check for any pending work if returning to user
+       @
+       ldr     lr, [sp, #S_EXC_RET]
+       tst     lr, #0x8                @ check the return stack
+       beq     2f                      @ returning to handler mode
+       get_thread_info tsk
+       ldr     r1, [tsk, #TI_FLAGS]
+       tst     r1, #_TIF_WORK_MASK
+       beq     2f                      @ no work pending
+       ldr     r1, =0xe000ed04         @ ICSR
+       mov     r0, #1 << 28            @ ICSR.PENDSVSET
+       str     r0, [r1]                @ raise PendSV
+
+2:
+       v7m_exception_fast_exit
+ENDPROC(__irq_entry)
+
+__pendsv_entry:
+       v7m_exception_entry
+
+       ldr     r1, =0xe000ed04         @ ICSR
+       mov     r0, #1 << 27            @ ICSR.PENDSVCLR
+       str     r0, [r1]                @ clear PendSV
+
+       @ execute the pending work, including reschedule
+       get_thread_info tsk
+       mov     why, #0
+       b       ret_to_user
+ENDPROC(__pendsv_entry)
+
+/*
+ * Register switch for ARMv7-M processors.
+ * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
+ * previous and next are guaranteed not to be the same.
+ */
+ENTRY(__switch_to)
+       .fnstart
+       .cantunwind
+       add     ip, r1, #TI_CPU_SAVE
+       stmia   ip!, {r4 - r11}         @ Store most regs on stack
+       str     sp, [ip], #4
+       str     lr, [ip], #4
+       mov     r5, r0
+       add     r4, r2, #TI_CPU_SAVE
+       ldr     r0, =thread_notify_head
+       mov     r1, #THREAD_NOTIFY_SWITCH
+       bl      atomic_notifier_call_chain
+       mov     ip, r4
+       mov     r0, r5
+       ldmia   ip!, {r4 - r11}         @ Load all regs saved previously
+       ldr     sp, [ip], #4
+       ldr     pc, [ip]
+       .fnend
+ENDPROC(__switch_to)
+
+       .data
+       .align  8
+/*
+ * Vector table (64 words => 256 bytes natural alignment)
+ */
+ENTRY(vector_table)
+       .long   0                       @ 0 - Reset stack pointer
+       .long   __invalid_entry         @ 1 - Reset
+       .long   __invalid_entry         @ 2 - NMI
+       .long   __invalid_entry         @ 3 - HardFault
+       .long   __invalid_entry         @ 4 - MemManage
+       .long   __invalid_entry         @ 5 - BusFault
+       .long   __invalid_entry         @ 6 - UsageFault
+       .long   __invalid_entry         @ 7 - Reserved
+       .long   __invalid_entry         @ 8 - Reserved
+       .long   __invalid_entry         @ 9 - Reserved
+       .long   __invalid_entry         @ 10 - Reserved
+       .long   vector_swi              @ 11 - SVCall
+       .long   __invalid_entry         @ 12 - Debug Monitor
+       .long   __invalid_entry         @ 13 - Reserved
+       .long   __pendsv_entry          @ 14 - PendSV
+       .long   __invalid_entry         @ 15 - SysTick
+       .rept   64 - 16
+       .long   __irq_entry             @ 16..64 - External Interrupts
+       .endr
index 854bd22380d335dba0e6761c317ccf6f45d3b447..2f560c575e0d67d926259846d8b0568d30f2ded1 100644 (file)
@@ -98,8 +98,9 @@ __mmap_switched:
        str     r9, [r4]                        @ Save processor ID
        str     r1, [r5]                        @ Save machine type
        str     r2, [r6]                        @ Save atags pointer
-       bic     r4, r0, #CR_A                   @ Clear 'A' bit
-       stmia   r7, {r0, r4}                    @ Save control register values
+       cmp     r7, #0
+       bicne   r4, r0, #CR_A                   @ Clear 'A' bit
+       stmneia r7, {r0, r4}                    @ Save control register values
        b       start_kernel
 ENDPROC(__mmap_switched)
 
@@ -113,7 +114,11 @@ __mmap_switched_data:
        .long   processor_id                    @ r4
        .long   __machine_arch_type             @ r5
        .long   __atags_pointer                 @ r6
+#ifdef CONFIG_CPU_CP15
        .long   cr_alignment                    @ r7
+#else
+       .long   0
+#endif
        .long   init_thread_union + THREAD_START_SP @ sp
        .size   __mmap_switched_data, . - __mmap_switched_data
 
index 278cfc144f448012100d594c0f81041cdfbf474c..c391c05ba69f0d46847a514e27ae8eb0b692495e 100644 (file)
@@ -44,10 +44,13 @@ ENTRY(stext)
 
        setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
                                                @ and irqs disabled
-#ifndef CONFIG_CPU_CP15
-       ldr     r9, =CONFIG_PROCESSOR_ID
-#else
+#if defined(CONFIG_CPU_CP15)
        mrc     p15, 0, r9, c0, c0              @ get processor id
+#elif defined(CONFIG_CPU_V7M)
+       ldr     r9, =0xe000ed00                 @ CPUID register address
+       ldr     r9, [r9]
+#else
+       ldr     r9, =CONFIG_PROCESSOR_ID
 #endif
        bl      __lookup_processor_type         @ r5=procinfo r9=cpuid
        movs    r10, r5                         @ invalid processor (r5=0)?
index 44bc0b327e2b62a2e13acf1713cf18a57b715e46..c912f8a43c3c3d430a2931f83a58a418af3a5742 100644 (file)
@@ -387,6 +387,10 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
                *childregs = *regs;
                childregs->ARM_r0 = 0;
                childregs->ARM_sp = stack_start;
+#if defined CONFIG_CPU_V7M
+               /* Return to Thread mode with Process stack */
+               childregs->ARM_EXC_RET = 0xfffffffdUL;
+#endif
        } else {
                memset(childregs, 0, sizeof(struct pt_regs));
                thread->cpu_context.r4 = stk_sz;
index 739db3a1b2d279216336025d88586d3ffd07a0e0..55df1d59761f024aeb789d29d418cf2d2b51fa4b 100644 (file)
@@ -87,6 +87,9 @@ static const struct pt_regs_offset regoffset_table[] = {
        REG_OFFSET_NAME(pc),
        REG_OFFSET_NAME(cpsr),
        REG_OFFSET_NAME(ORIG_r0),
+#ifdef CONFIG_CPU_V7M
+       REG_OFFSET_NAME(EXC_RET),
+#endif
        REG_OFFSET_END,
 };
 
index da1d1aa20ad957ccd7021815014d12530de4f3a1..3cca0c80edd0c83c452919988b0d550f313db110 100644 (file)
@@ -128,7 +128,9 @@ struct stack {
        u32 und[3];
 } ____cacheline_aligned;
 
+#ifndef CONFIG_CPU_V7M
 static struct stack stacks[NR_CPUS];
+#endif
 
 char elf_platform[ELF_PLATFORM_SIZE];
 EXPORT_SYMBOL(elf_platform);
@@ -207,7 +209,7 @@ static const char *proc_arch[] = {
        "5TEJ",
        "6TEJ",
        "7",
-       "?(11)",
+       "7M",
        "?(12)",
        "?(13)",
        "?(14)",
@@ -216,6 +218,12 @@ static const char *proc_arch[] = {
        "?(17)",
 };
 
+#ifdef CONFIG_CPU_V7M
+static int __get_cpu_architecture(void)
+{
+       return CPU_ARCH_ARMv7M;
+}
+#else
 static int __get_cpu_architecture(void)
 {
        int cpu_arch;
@@ -248,6 +256,7 @@ static int __get_cpu_architecture(void)
 
        return cpu_arch;
 }
+#endif
 
 int __pure cpu_architecture(void)
 {
@@ -375,6 +384,7 @@ static void __init feat_v6_fixup(void)
  */
 void cpu_init(void)
 {
+#ifndef CONFIG_CPU_V7M
        unsigned int cpu = smp_processor_id();
        struct stack *stk = &stacks[cpu];
 
@@ -419,6 +429,7 @@ void cpu_init(void)
              "I" (offsetof(struct stack, und[0])),
              PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
            : "r14");
+#endif
 }
 
 int __cpu_logical_map[NR_CPUS];
index b0179b89a04ce26062184aaf23f86c521fb3009c..12d976b59bd8adfde359311b4fdf0c78df139e83 100644 (file)
@@ -819,6 +819,7 @@ static void __init kuser_get_tls_init(unsigned long vectors)
 
 void __init early_trap_init(void *vectors_base)
 {
+#ifndef CONFIG_CPU_V7M
        unsigned long vectors = (unsigned long)vectors_base;
        extern char __stubs_start[], __stubs_end[];
        extern char __vectors_start[], __vectors_end[];
@@ -850,4 +851,5 @@ void __init early_trap_init(void *vectors_base)
 
        flush_icache_range(vectors, vectors + PAGE_SIZE);
        modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
+#endif
 }
index b820edaf31843fb309fc29f882ce9d4e80c04c62..feeb3eaccb1c48a4bbafb1c1d232634ce893145a 100644 (file)
@@ -964,12 +964,14 @@ static int __init alignment_init(void)
                return -ENOMEM;
 #endif
 
+#ifdef CONFIG_CPU_CP15
        if (cpu_is_v6_unaligned()) {
                cr_alignment &= ~CR_A;
                cr_no_alignment &= ~CR_A;
                set_cr(cr_alignment);
                ai_usermode = safe_usermode(ai_usermode, false);
        }
+#endif
 
        hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
                        "alignment exception");
index 39719bb93caab493bc2e71ab5c86c20b955d6b13..08d04fced365e56dcea9bf811e07e57b171c1ab8 100644 (file)
@@ -97,6 +97,7 @@ static struct cachepolicy cache_policies[] __initdata = {
        }
 };
 
+#ifdef CONFIG_CPU_CP15
 /*
  * These are useful for identifying cache coherency
  * problems by allowing the cache or the cache and
@@ -195,6 +196,22 @@ void adjust_cr(unsigned long mask, unsigned long set)
 }
 #endif
 
+#else
+
+static int __init early_cachepolicy(char *p)
+{
+       pr_warning("cachepolicy kernel parameter not supported without cp15\n");
+}
+early_param("cachepolicy", early_cachepolicy);
+
+static int __init noalign_setup(char *__unused)
+{
+       pr_warning("noalign kernel parameter not supported without cp15\n");
+}
+__setup("noalign", noalign_setup);
+
+#endif
+
 #define PROT_PTE_DEVICE                L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
 #define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
index d51225f90ae2d5d3af909ec87d29910bea4f23d5..4bc8ae5ae3ab35740025bd8deb0e0b07aeb9d08b 100644 (file)
 
 void __init arm_mm_memblock_reserve(void)
 {
+#ifndef CONFIG_CPU_V7M
        /*
         * Register the exception vector page.
         * some architectures which the DRAM is the exception vector to trap,
         * alloc_page breaks with error, although it is not NULL, but "0."
         */
        memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
+#endif
 }
 
 void __init sanity_check_meminfo(void)
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
new file mode 100644 (file)
index 0000000..2b8eb97
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ *  linux/arch/arm/mm/proc-v7m.S
+ *
+ *  Copyright (C) 2008 ARM Ltd.
+ *  Copyright (C) 2001 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  This is the "shell" of the ARMv7-M processor support.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ENTRY(cpu_v7m_proc_init)
+       mov     pc, lr
+ENDPROC(cpu_v7m_proc_init)
+
+ENTRY(cpu_v7m_proc_fin)
+       mov     pc, lr
+ENDPROC(cpu_v7m_proc_fin)
+
+/*
+ *     cpu_v7m_reset(loc)
+ *
+ *     Perform a soft reset of the system.  Put the CPU into the
+ *     same state as it would be if it had been reset, and branch
+ *     to what would be the reset vector.
+ *
+ *     - loc   - location to jump to for soft reset
+ */
+       .align  5
+ENTRY(cpu_v7m_reset)
+       mov     pc, r0
+ENDPROC(cpu_v7m_reset)
+
+/*
+ *     cpu_v7m_do_idle()
+ *
+ *     Idle the processor (eg, wait for interrupt).
+ *
+ *     IRQs are already disabled.
+ */
+ENTRY(cpu_v7m_do_idle)
+       wfi
+       mov     pc, lr
+ENDPROC(cpu_v7m_do_idle)
+
+ENTRY(cpu_v7m_dcache_clean_area)
+       mov     pc, lr
+ENDPROC(cpu_v7m_dcache_clean_area)
+
+/*
+ * There is no MMU, so here is nothing to do.
+ */
+ENTRY(cpu_v7m_switch_mm)
+       mov     pc, lr
+ENDPROC(cpu_v7m_switch_mm)
+
+cpu_v7m_name:
+       .ascii  "ARMv7-M Processor"
+       .align
+
+       .section ".text.init", #alloc, #execinstr
+
+/*
+ *     __v7m_setup
+ *
+ *     This should be able to cover all ARMv7-M cores.
+ */
+__v7m_setup:
+       @ Configure the vector table base address
+       ldr     r0, =0xe000ed08         @ vector table base address
+       ldr     r12, =vector_table
+       str     r12, [r0]
+
+       @ Lower the priority of the SVC and PendSV exceptions
+       ldr     r0, =0xe000ed1c
+       mov     r5, #0x80000000
+       str     r5, [r0]                @ set SVC priority
+       ldr     r0, =0xe000ed20
+       mov     r5, #0x00800000
+       str     r5, [r0]                @ set PendSV priority
+
+       @ SVC to run the kernel in this mode
+       adr     r0, BSYM(1f)
+       ldr     r5, [r12, #11 * 4]      @ read the SVC vector entry
+       str     r0, [r12, #11 * 4]      @ write the temporary SVC vector entry
+       mov     r6, lr                  @ save LR
+       mov     r7, sp                  @ save SP
+       ldr     sp, =__v7m_setup_stack_top
+       cpsie   i
+       svc     #0
+1:     cpsid   i
+       str     r5, [r12, #11 * 4]      @ restore the original SVC vector entry
+       mov     lr, r6                  @ restore LR
+       mov     sp, r7                  @ restore SP
+
+       @ Special-purpose control register
+       mov     r0, #1
+       msr     control, r0             @ Thread mode has unpriviledged access
+
+       @ Configure the System Control Register
+       ldr     r0, =0xe000ed14         @ system control register
+       ldr     r12, [r0]
+       orr     r12, #1 << 9            @ STKALIGN
+       str     r12, [r0]
+       mov     pc, lr
+ENDPROC(__v7m_setup)
+
+       .align  2
+       .type   v7m_processor_functions, #object
+ENTRY(v7m_processor_functions)
+       .word   nommu_early_abort
+       .word   cpu_v7m_proc_init
+       .word   cpu_v7m_proc_fin
+       .word   cpu_v7m_reset
+       .word   cpu_v7m_do_idle
+       .word   cpu_v7m_dcache_clean_area
+       .word   cpu_v7m_switch_mm
+       .word   0                       @ cpu_v7m_set_pte_ext
+       .word   legacy_pabort
+       .size   v7m_processor_functions, . - v7m_processor_functions
+
+       .type   cpu_arch_name, #object
+cpu_arch_name:
+       .asciz  "armv7m"
+       .size   cpu_arch_name, . - cpu_arch_name
+
+       .type   cpu_elf_name, #object
+cpu_elf_name:
+       .asciz  "v7m"
+       .size   cpu_elf_name, . - cpu_elf_name
+       .align
+
+       .section ".proc.info.init", #alloc, #execinstr
+
+       /*
+        * Match any ARMv7-M processor core.
+        */
+       .type   __v7m_proc_info, #object
+__v7m_proc_info:
+       .long   0x000f0000              @ Required ID value
+       .long   0x000f0000              @ Mask for ID
+       .long   0                       @ proc_info_list.__cpu_mm_mmu_flags
+       .long   0                       @ proc_info_list.__cpu_io_mmu_flags
+       b       __v7m_setup             @ proc_info_list.__cpu_flush
+       .long   cpu_arch_name
+       .long   cpu_elf_name
+       .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+       .long   cpu_v7m_name
+       .long   v7m_processor_functions @ proc_info_list.proc
+       .long   0                       @ proc_info_list.tlb
+       .long   0                       @ proc_info_list.user
+       .long   0                       @ proc_info_list.cache
+       .size   __v7m_proc_info, . - __v7m_proc_info
+
+__v7m_setup_stack:
+       .space  4 * 8                           @ 8 registers
+__v7m_setup_stack_top: