]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'devel-stable' into for-linus
authorRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 3 Sep 2015 14:28:50 +0000 (15:28 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 3 Sep 2015 14:28:50 +0000 (15:28 +0100)
Conflicts:
drivers/perf/arm_pmu.c

74 files changed:
Documentation/devicetree/bindings/arm/l2cc.txt
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/common/mcpm_platsmp.c
arch/arm/include/asm/Kbuild
arch/arm/include/asm/assembler.h
arch/arm/include/asm/barrier.h
arch/arm/include/asm/bitops.h
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/fixmap.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/glue-cache.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/outercache.h
arch/arm/include/asm/pgtable-2level-hwdef.h
arch/arm/include/asm/smp.h
arch/arm/include/asm/smp_plat.h
arch/arm/include/asm/switch_to.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/head.S
arch/arm/kernel/irq.c
arch/arm/kernel/process.c
arch/arm/kernel/setup.c
arch/arm/kernel/signal.c
arch/arm/kernel/smp.c
arch/arm/kernel/swp_emulate.c
arch/arm/kernel/traps.c
arch/arm/kernel/vdso.c
arch/arm/lib/clear_user.S
arch/arm/lib/copy_from_user.S
arch/arm/lib/copy_to_user.S
arch/arm/lib/csumpartialcopyuser.S
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mach-mmp/pm-pxa910.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/common.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/include/mach/barriers.h [deleted file]
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/sleep44xx.S
arch/arm/mach-prima2/pm.c
arch/arm/mach-shmobile/common.h
arch/arm/mach-shmobile/platsmp.c
arch/arm/mach-shmobile/smp-r8a7790.c
arch/arm/mach-shmobile/smp-r8a7791.c
arch/arm/mach-shmobile/smp-sh73a0.c
arch/arm/mach-ux500/cache-l2x0.c
arch/arm/mm/Kconfig
arch/arm/mm/abort-ev4.S
arch/arm/mm/abort-ev5t.S
arch/arm/mm/abort-ev5tj.S
arch/arm/mm/abort-ev6.S
arch/arm/mm/abort-ev7.S
arch/arm/mm/abort-lv4t.S
arch/arm/mm/abort-macro.S
arch/arm/mm/cache-feroceon-l2.c
arch/arm/mm/cache-l2x0.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/dma.h [new file with mode: 0644]
arch/arm/mm/flush.c
arch/arm/mm/highmem.c
arch/arm/mm/mmu.c
arch/arm/mm/pgd.c
arch/arm/mm/proc-v7.S
arch/arm/vdso/Makefile
drivers/firmware/qcom_scm-32.c

index 2251dccb141eac294d9ef8fa43ba26023bd70493..06c88a4d28aced38cdc2386a10364900266b7c6e 100644 (file)
@@ -67,6 +67,12 @@ Optional properties:
   disable if zero.
 - arm,prefetch-offset : Override prefetch offset value. Valid values are
   0-7, 15, 23, and 31.
+- arm,shared-override : The default behavior of the pl310 cache controller with
+  respect to the shareable attribute is to transform "normal memory
+  non-cacheable transactions" into "cacheable no allocate" (for reads) or
+  "write through no write allocate" (for writes).
+  On systems where this may cause DMA buffer corruption, this property must be
+  specified to indicate that such transforms are precluded.
 - prefetch-data : Data prefetch. Value: <0> (forcibly disable), <1>
   (forcibly enable), property absent (retain settings set by firmware)
 - prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
index 74fe545af38a0fc20c9594fc0618c416be1a0f8f..45e635b879431fa95f0477d838076ec20e09ed48 100644 (file)
@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
 config ARCH_HAS_BANDGAP
        bool
 
+config FIX_EARLYCON_MEM
+       def_bool y if MMU
+
 config GENERIC_HWEIGHT
        bool
        default y
@@ -1701,6 +1704,21 @@ config HIGHPTE
          consumed by page tables.  Setting this option will allow
          user-space 2nd level page tables to reside in high memory.
 
+config CPU_SW_DOMAIN_PAN
+       bool "Enable use of CPU domains to implement privileged no-access"
+       depends on MMU && !ARM_LPAE
+       default y
+       help
+         Increase kernel security by ensuring that normal kernel accesses
+         are unable to access userspace addresses.  This can help prevent
+         use-after-free bugs becoming an exploitable privilege escalation
+         by ensuring that magic values (such as LIST_POISON) will always
+         fault when dereferenced.
+
+         CPUs with low-vector mappings use a best-efforts implementation.
+         Their lower 1MB needs to remain accessible for the vectors, but
+         the remainder of userspace will become appropriately inaccessible.
+
 config HW_PERF_EVENTS
        def_bool y
        depends on ARM_PMU
index 07ab3d203916732337f909ec5f903db4c7bb1294..7451b447cc2d2cb8cc68a9bf59f125f2dd2ce347 100644 (file)
@@ -312,6 +312,9 @@ INSTALL_TARGETS     = zinstall uinstall install
 
 PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
 
+bootpImage uImage: zImage
+zImage: Image
+
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
index 92e54d7c6f468ebf7aa2e20b85774e885ef5aabd..2b25b6038f6694287a038f7ba5264e4f380380c9 100644 (file)
@@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu)
        return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
 }
 
-static int mcpm_cpu_disable(unsigned int cpu)
+static bool mcpm_cpu_can_disable(unsigned int cpu)
 {
-       /*
-        * We assume all CPUs may be shut down.
-        * This would be the hook to use for eventual Secure
-        * OS migration requests as described in the PSCI spec.
-        */
-       return 0;
+       /* We assume all CPUs may be shut down. */
+       return true;
 }
 
 static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
        .smp_secondary_init     = mcpm_secondary_init,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_kill               = mcpm_cpu_kill,
-       .cpu_disable            = mcpm_cpu_disable,
+       .cpu_can_disable        = mcpm_cpu_can_disable,
        .cpu_die                = mcpm_cpu_die,
 #endif
 };
index 83c50193626ce03c1a5d590a8d8ff81fa861f180..517ef6dd22b9ec41f855def9df998f50fc105350 100644 (file)
@@ -12,7 +12,6 @@ generic-y += irq_regs.h
 generic-y += kdebug.h
 generic-y += local.h
 generic-y += local64.h
-generic-y += mcs_spinlock.h
 generic-y += msgbuf.h
 generic-y += param.h
 generic-y += parport.h
index 4abe57279c66f0ecf2c96c116ae7ce839e646b4c..7bbf325a4f31f12c9d867381853579a495590f89 100644 (file)
        .endm
 #endif
 
-       .macro asm_trace_hardirqs_off
+       .macro asm_trace_hardirqs_off, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
+       .if \save
        stmdb   sp!, {r0-r3, ip, lr}
+       .endif
        bl      trace_hardirqs_off
+       .if \save
        ldmia   sp!, {r0-r3, ip, lr}
+       .endif
 #endif
        .endm
 
-       .macro asm_trace_hardirqs_on_cond, cond
+       .macro asm_trace_hardirqs_on, cond=al, save=1
 #if defined(CONFIG_TRACE_IRQFLAGS)
        /*
         * actually the registers should be pushed and pop'd conditionally, but
         * after bl the flags are certainly clobbered
         */
+       .if \save
        stmdb   sp!, {r0-r3, ip, lr}
+       .endif
        bl\cond trace_hardirqs_on
+       .if \save
        ldmia   sp!, {r0-r3, ip, lr}
+       .endif
 #endif
        .endm
 
-       .macro asm_trace_hardirqs_on
-       asm_trace_hardirqs_on_cond al
-       .endm
-
-       .macro disable_irq
+       .macro disable_irq, save=1
        disable_irq_notrace
-       asm_trace_hardirqs_off
+       asm_trace_hardirqs_off \save
        .endm
 
        .macro enable_irq
 
        .macro restore_irqs, oldcpsr
        tst     \oldcpsr, #PSR_I_BIT
-       asm_trace_hardirqs_on_cond eq
+       asm_trace_hardirqs_on cond=eq
        restore_irqs_notrace \oldcpsr
        .endm
 
@@ -445,6 +449,53 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
 #endif
        .endm
 
+       .macro  uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /*
+        * Whenever we re-enter userspace, the domains should always be
+        * set appropriately.
+        */
+       mov     \tmp, #DACR_UACCESS_DISABLE
+       mcr     p15, 0, \tmp, c3, c0, 0         @ Set domain register
+       .if     \isb
+       instr_sync
+       .endif
+#endif
+       .endm
+
+       .macro  uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /*
+        * Whenever we re-enter userspace, the domains should always be
+        * set appropriately.
+        */
+       mov     \tmp, #DACR_UACCESS_ENABLE
+       mcr     p15, 0, \tmp, c3, c0, 0
+       .if     \isb
+       instr_sync
+       .endif
+#endif
+       .endm
+
+       .macro  uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       mrc     p15, 0, \tmp, c3, c0, 0
+       str     \tmp, [sp, #S_FRAME_SIZE]
+#endif
+       .endm
+
+       .macro  uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       ldr     r0, [sp, #S_FRAME_SIZE]
+       mcr     p15, 0, r0, c3, c0, 0
+#endif
+       .endm
+
+       .macro  uaccess_save_and_disable, tmp
+       uaccess_save \tmp
+       uaccess_disable \tmp
+       .endm
+
        .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
        .macro  ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
index 6c2327e1c7323d79831af30bb296c55a9409e9bd..3d8f1d3ad9a76e62f3c3e6ae0e3ba35b4500bfcf 100644 (file)
@@ -2,7 +2,6 @@
 #define __ASM_BARRIER_H
 
 #ifndef __ASSEMBLY__
-#include <asm/outercache.h>
 
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
 #define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
 #ifdef CONFIG_ARCH_HAS_BARRIERS
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb()           do { dsb(); outer_sync(); } while (0)
+#define mb()           __arm_heavy_mb()
 #define rmb()          dsb()
-#define wmb()          do { dsb(st); outer_sync(); } while (0)
+#define wmb()          __arm_heavy_mb(st)
 #define dma_rmb()      dmb(osh)
 #define dma_wmb()      dmb(oshst)
 #else
index 56380995f4c38364c620c0055083c877f85dcff0..e943e6cee254503cb642bf95caa573eca03de758 100644 (file)
@@ -35,9 +35,9 @@
 static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
 {
        unsigned long flags;
-       unsigned long mask = 1UL << (bit & 31);
+       unsigned long mask = BIT_MASK(bit);
 
-       p += bit >> 5;
+       p += BIT_WORD(bit);
 
        raw_local_irq_save(flags);
        *p |= mask;
@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
        unsigned long flags;
-       unsigned long mask = 1UL << (bit & 31);
+       unsigned long mask = BIT_MASK(bit);
 
-       p += bit >> 5;
+       p += BIT_WORD(bit);
 
        raw_local_irq_save(flags);
        *p &= ~mask;
@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
 {
        unsigned long flags;
-       unsigned long mask = 1UL << (bit & 31);
+       unsigned long mask = BIT_MASK(bit);
 
-       p += bit >> 5;
+       p += BIT_WORD(bit);
 
        raw_local_irq_save(flags);
        *p ^= mask;
@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
 {
        unsigned long flags;
        unsigned int res;
-       unsigned long mask = 1UL << (bit & 31);
+       unsigned long mask = BIT_MASK(bit);
 
-       p += bit >> 5;
+       p += BIT_WORD(bit);
 
        raw_local_irq_save(flags);
        res = *p;
@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
 {
        unsigned long flags;
        unsigned int res;
-       unsigned long mask = 1UL << (bit & 31);
+       unsigned long mask = BIT_MASK(bit);
 
-       p += bit >> 5;
+       p += BIT_WORD(bit);
 
        raw_local_irq_save(flags);
        res = *p;
@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
 {
        unsigned long flags;
        unsigned int res;
-       unsigned long mask = 1UL << (bit & 31);
+       unsigned long mask = BIT_MASK(bit);
 
-       p += bit >> 5;
+       p += BIT_WORD(bit);
 
        raw_local_irq_save(flags);
        res = *p;
index 4812cda8fd1759c979a8a19fe96c40a50fc70317..d5525bfc7e3e61879d278ae08b446e185c206982 100644 (file)
@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-#define dmac_map_area                  cpu_cache.dma_map_area
-#define dmac_unmap_area                        cpu_cache.dma_unmap_area
 #define dmac_flush_range               cpu_cache.dma_flush_range
 
 #else
@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
  * is visible to DMA, or data written by DMA to system memory is
  * visible to the CPU.
  */
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
 extern void dmac_flush_range(const void *, const void *);
 
 #endif
@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
                             void *kaddr, unsigned long len);
 
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side.  This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+       phys_addr_t phys = __pa(addr);
+
+       __cpuc_flush_dcache_area((void *)addr, size);
+       outer_flush_range(phys, phys + size);
+}
+
 #endif
index b52101d37ec74d7db81f0b33a12b051f7aca5527..a68b9d8a71fed8ee2357d833a023d2bed9696d69 100644 (file)
@@ -14,7 +14,7 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 
-#define DMA_ERROR_CODE (~0)
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
 extern struct dma_map_ops arm_dma_ops;
 extern struct dma_map_ops arm_coherent_dma_ops;
 
index 6ddbe446425e11524d927b5cd8b479bcd2419238..e878129f2fee5dfec3d36bec2e9fcb7d8ef046e4 100644 (file)
  */
 #ifndef CONFIG_IO_36
 #define DOMAIN_KERNEL  0
-#define DOMAIN_TABLE   0
 #define DOMAIN_USER    1
 #define DOMAIN_IO      2
 #else
 #define DOMAIN_KERNEL  2
-#define DOMAIN_TABLE   2
 #define DOMAIN_USER    1
 #define DOMAIN_IO      0
 #endif
+#define DOMAIN_VECTORS 3
 
 /*
  * Domain types
 #define DOMAIN_MANAGER 1
 #endif
 
-#define domain_val(dom,type)   ((type) << (2*(dom)))
+#define domain_mask(dom)       ((3) << (2 * (dom)))
+#define domain_val(dom,type)   ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+       (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+        domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+        domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+       (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+        domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+        domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+       domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+       domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+       domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE   \
+       (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE    \
+       (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+       unsigned int domain;
+
+       asm(
+       "mrc    p15, 0, %0, c3, c0      @ get domain"
+        : "=r" (domain));
+
+       return domain;
+}
+
 static inline void set_domain(unsigned val)
 {
        asm volatile(
@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
        isb();
 }
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)                                        \
        do {                                                    \
-       struct thread_info *thread = current_thread_info();     \
-       unsigned int domain = thread->cpu_domain;               \
-       domain &= ~domain_val(dom, DOMAIN_MANAGER);             \
-       thread->cpu_domain = domain | domain_val(dom, type);    \
-       set_domain(thread->cpu_domain);                         \
+               unsigned int domain = get_domain();             \
+               domain &= ~domain_mask(dom);                    \
+               domain = domain | domain_val(dom, type);        \
+               set_domain(domain);                             \
        } while (0)
 
 #else
-static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)  { }
 #endif
 
index 0415eae1df27419755c5cff78170ea3f6d029535..58cfe9f1a687e6e1c8794f5ff5f37c6b4fcd7cb9 100644 (file)
@@ -6,9 +6,13 @@
 #define FIXADDR_TOP            (FIXADDR_END - PAGE_SIZE)
 
 #include <asm/kmap_types.h>
+#include <asm/pgtable.h>
 
 enum fixed_addresses {
-       FIX_KMAP_BEGIN,
+       FIX_EARLYCON_MEM_BASE,
+       __end_of_permanent_fixed_addresses,
+
+       FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
        FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
 
        /* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@ enum fixed_addresses {
        __end_of_fixed_addresses
 };
 
+#define FIXMAP_PAGE_COMMON     (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL     (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO         (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE    FIXMAP_PAGE_IO
+
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
 
 #include <asm-generic/fixmap.h>
 
index 5eed82809d82b7aa9c74670fd9c9624bbd930803..6795368ad0238068c55fa4c8b56e0d67d9b9a5cf 100644 (file)
 #ifdef CONFIG_SMP
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
+({                                                             \
+       unsigned int __ua_flags;                                \
        smp_mb();                                               \
        prefetchw(uaddr);                                       \
+       __ua_flags = uaccess_save_and_enable();                 \
        __asm__ __volatile__(                                   \
        "1:     ldrex   %1, [%3]\n"                             \
        "       " insn "\n"                                     \
        __futex_atomic_ex_table("%5")                           \
        : "=&r" (ret), "=&r" (oldval), "=&r" (tmp)              \
        : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)              \
-       : "cc", "memory")
+       : "cc", "memory");                                      \
+       uaccess_restore(__ua_flags);                            \
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                              u32 oldval, u32 newval)
 {
+       unsigned int __ua_flags;
        int ret;
        u32 val;
 
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        smp_mb();
        /* Prefetching cannot fault */
        prefetchw(uaddr);
+       __ua_flags = uaccess_save_and_enable();
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
        "1:     ldrex   %1, [%4]\n"
        "       teq     %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        : "=&r" (ret), "=&r" (val)
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
        : "cc", "memory");
+       uaccess_restore(__ua_flags);
        smp_mb();
 
        *uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 #include <asm/domain.h>
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
+({                                                             \
+       unsigned int __ua_flags = uaccess_save_and_enable();    \
        __asm__ __volatile__(                                   \
        "1:     " TUSER(ldr) "  %1, [%3]\n"                     \
        "       " insn "\n"                                     \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        __futex_atomic_ex_table("%5")                           \
        : "=&r" (ret), "=&r" (oldval), "=&r" (tmp)              \
        : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)              \
-       : "cc", "memory")
+       : "cc", "memory");                                      \
+       uaccess_restore(__ua_flags);                            \
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                              u32 oldval, u32 newval)
 {
+       unsigned int __ua_flags;
        int ret = 0;
        u32 val;
 
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        preempt_disable();
+       __ua_flags = uaccess_save_and_enable();
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
        "1:     " TUSER(ldr) "  %1, [%4]\n"
        "       teq     %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        : "+r" (ret), "=&r" (val)
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
        : "cc", "memory");
+       uaccess_restore(__ua_flags);
 
        *uval = val;
        preempt_enable();
index a3c24cd5b7c8ee94e9f36a73abf00c340a747ca0..cab07f69382dea9efe9be62c0924a5b4f8d95b53 100644 (file)
@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
 #define __cpuc_coherent_user_range     __glue(_CACHE,_coherent_user_range)
 #define __cpuc_flush_dcache_area       __glue(_CACHE,_flush_kern_dcache_area)
 
-#define dmac_map_area                  __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area                        __glue(_CACHE,_dma_unmap_area)
 #define dmac_flush_range               __glue(_CACHE,_dma_flush_range)
 #endif
 
index 6f225acc07c56bdef12a3ca68ae526d8fe836bf4..b7f6fb462ea0da21e59e67dbb4e75de729ffdfcc 100644 (file)
@@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
  */
 static inline phys_addr_t __virt_to_idmap(unsigned long x)
 {
-       if (arch_virt_to_idmap)
+       if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
                return arch_virt_to_idmap(x);
        else
                return __virt_to_phys(x);
index 563b92fc2f41c3b2dd10bd56c1dd0f396cc8474a..c2bf24f40177ddce6ce472d75735a04b26e426fd 100644 (file)
@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
 
 #endif
 
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
-       if (outer_cache.sync)
-               outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
 #endif /* __ASM_OUTERCACHE_H */
index 5e68278e953e2513f904cc253d200705c999a673..d0131ee6f6af920d5c3bc6479c2e515e1d706539 100644 (file)
@@ -23,6 +23,7 @@
 #define PMD_PXNTABLE           (_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_BIT4               (_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)          (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK                PMD_DOMAIN(0x0f)
 #define PMD_PROTECTION         (_AT(pmdval_t, 1) << 9)         /* v5 */
 /*
  *   - section
index 2f3ac1ba6fb47329e70cfd63d40cc080edd4b63a..ef356659b4f43e6125443e7edd67aa1706bc8d2e 100644 (file)
@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@ struct smp_operations {
 #ifdef CONFIG_HOTPLUG_CPU
        int  (*cpu_kill)(unsigned int cpu);
        void (*cpu_die)(unsigned int cpu);
+       bool  (*cpu_can_disable)(unsigned int cpu);
        int  (*cpu_disable)(unsigned int cpu);
 #endif
 #endif
index 993e5224d8f7eeeb09f4c2a5358a97073c67c3a6..f9080717fc88c6dd4f3e79e40a426de22100d8db 100644 (file)
@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
 extern int platform_can_secondary_boot(void);
 extern int platform_can_cpu_hotplug(void);
 
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+       return 0;
+}
+#endif
+
 #endif
index c99e259469f7de9185f394dbb3c4453ff2f0f50f..12ebfcc1d539151bd8365594befaa2e9814fb3a9 100644 (file)
@@ -10,7 +10,9 @@
  * CPU.
  */
 #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
-#define finish_arch_switch(prev)       dsb(ish)
+#define __complete_pending_tlbi()      dsb(ish)
+#else
+#define __complete_pending_tlbi()
 #endif
 
 /*
@@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
 
 #define switch_to(prev,next,last)                                      \
 do {                                                                   \
+       __complete_pending_tlbi();                                      \
        last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));        \
 } while (0)
 
index bd32eded3e5061b49048e3110902b7edc63e3638..d0a1119dcaf38ba92d99354aaeba18bbf0452004 100644 (file)
@@ -74,9 +74,6 @@ struct thread_info {
        .flags          = 0,                                            \
        .preempt_count  = INIT_PREEMPT_COUNT,                           \
        .addr_limit     = KERNEL_DS,                                    \
-       .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_MANAGER) |     \
-                         domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |   \
-                         domain_val(DOMAIN_IO, DOMAIN_CLIENT),         \
 }
 
 #define init_thread_info       (init_thread_union.thread_info)
@@ -136,22 +133,18 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
 
 /*
  * thread information flags:
- *  TIF_SYSCALL_TRACE  - syscall trace active
- *  TIF_SYSCAL_AUDIT   - syscall auditing active
- *  TIF_SIGPENDING     - signal pending
- *  TIF_NEED_RESCHED   - rescheduling necessary
- *  TIF_NOTIFY_RESUME  - callback before returning to user
  *  TIF_USEDFPU                - FPU was used by this task this quantum (SMP)
  *  TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
  */
-#define TIF_SIGPENDING         0
-#define TIF_NEED_RESCHED       1
+#define TIF_SIGPENDING         0       /* signal pending */
+#define TIF_NEED_RESCHED       1       /* rescheduling necessary */
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
-#define TIF_UPROBE             7
-#define TIF_SYSCALL_TRACE      8
-#define TIF_SYSCALL_AUDIT      9
-#define TIF_SYSCALL_TRACEPOINT 10
-#define TIF_SECCOMP            11      /* seccomp syscall filtering active */
+#define TIF_UPROBE             3       /* breakpointed or singlestepping */
+#define TIF_SYSCALL_TRACE      4       /* syscall trace active */
+#define TIF_SYSCALL_AUDIT      5       /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 6       /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP            7       /* seccomp syscall filtering active */
+
 #define TIF_NOHZ               12      /* in adaptive nohz mode */
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
index 74b17d09ef7aa54cb98d22ccf5f068b4c39971be..8cc85a4ebec20a1223f966bb868e5def5f70928e 100644 (file)
@@ -49,6 +49,35 @@ struct exception_table_entry
 
 extern int fixup_exception(struct pt_regs *regs);
 
+/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       unsigned int old_domain = get_domain();
+
+       /* Set the current domain access to permit user accesses */
+       set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+                  domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+       return old_domain;
+#else
+       return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /* Restore the user access mask */
+       set_domain(flags);
+#endif
+}
+
 /*
  * These two are intentionally not defined anywhere - if the kernel
  * code generates any references to them, that's a bug.
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
                register typeof(x) __r2 asm("r2");                      \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
+               unsigned int __ua_flags = uaccess_save_and_enable();    \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
                        if (sizeof((x)) >= 8)                           \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
                        break;                                          \
                default: __e = __get_user_bad(); break;                 \
                }                                                       \
+               uaccess_restore(__ua_flags);                            \
                x = (typeof(*(p))) __r2;                                \
                __e;                                                    \
        })
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
                register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
+               unsigned int __ua_flags = uaccess_save_and_enable();    \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
                        __put_user_x(__r2, __p, __e, __l, 1);           \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
                        break;                                          \
                default: __e = __put_user_bad(); break;                 \
                }                                                       \
+               uaccess_restore(__ua_flags);                            \
                __e;                                                    \
        })
 
@@ -300,20 +333,23 @@ static inline void set_fs(mm_segment_t fs)
 do {                                                                   \
        unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
+       unsigned int __ua_flags;                                        \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
+       __ua_flags = uaccess_save_and_enable();                         \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __get_user_asm_byte(__gu_val, __gu_addr, err);  break;  \
        case 2: __get_user_asm_half(__gu_val, __gu_addr, err);  break;  \
        case 4: __get_user_asm_word(__gu_val, __gu_addr, err);  break;  \
        default: (__gu_val) = __get_user_bad();                         \
        }                                                               \
+       uaccess_restore(__ua_flags);                                    \
        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 } while (0)
 
-#define __get_user_asm_byte(x, addr, err)                      \
+#define __get_user_asm(x, addr, err, instr)                    \
        __asm__ __volatile__(                                   \
-       "1:     " TUSER(ldrb) " %1,[%2],#0\n"                   \
+       "1:     " TUSER(instr) " %1, [%2], #0\n"                \
        "2:\n"                                                  \
        "       .pushsection .text.fixup,\"ax\"\n"              \
        "       .align  2\n"                                    \
@@ -329,6 +365,9 @@ do {                                                                        \
        : "r" (addr), "i" (-EFAULT)                             \
        : "cc")
 
+#define __get_user_asm_byte(x, addr, err)                      \
+       __get_user_asm(x, addr, err, ldrb)
+
 #ifndef __ARMEB__
 #define __get_user_asm_half(x, __gu_addr, err)                 \
 ({                                                             \
@@ -348,22 +387,7 @@ do {                                                                       \
 #endif
 
 #define __get_user_asm_word(x, addr, err)                      \
-       __asm__ __volatile__(                                   \
-       "1:     " TUSER(ldr) "  %1,[%2],#0\n"                   \
-       "2:\n"                                                  \
-       "       .pushsection .text.fixup,\"ax\"\n"              \
-       "       .align  2\n"                                    \
-       "3:     mov     %0, %3\n"                               \
-       "       mov     %1, #0\n"                               \
-       "       b       2b\n"                                   \
-       "       .popsection\n"                                  \
-       "       .pushsection __ex_table,\"a\"\n"                \
-       "       .align  3\n"                                    \
-       "       .long   1b, 3b\n"                               \
-       "       .popsection"                                    \
-       : "+r" (err), "=&r" (x)                                 \
-       : "r" (addr), "i" (-EFAULT)                             \
-       : "cc")
+       __get_user_asm(x, addr, err, ldr)
 
 #define __put_user(x, ptr)                                             \
 ({                                                                     \
@@ -381,9 +405,11 @@ do {                                                                       \
 #define __put_user_err(x, ptr, err)                                    \
 do {                                                                   \
        unsigned long __pu_addr = (unsigned long)(ptr);                 \
+       unsigned int __ua_flags;                                        \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
+       __ua_flags = uaccess_save_and_enable();                         \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);  break;  \
        case 2: __put_user_asm_half(__pu_val, __pu_addr, err);  break;  \
@@ -391,11 +417,12 @@ do {                                                                      \
        case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break;  \
        default: __put_user_bad();                                      \
        }                                                               \
+       uaccess_restore(__ua_flags);                                    \
 } while (0)
 
-#define __put_user_asm_byte(x, __pu_addr, err)                 \
+#define __put_user_asm(x, __pu_addr, err, instr)               \
        __asm__ __volatile__(                                   \
-       "1:     " TUSER(strb) " %1,[%2],#0\n"                   \
+       "1:     " TUSER(instr) " %1, [%2], #0\n"                \
        "2:\n"                                                  \
        "       .pushsection .text.fixup,\"ax\"\n"              \
        "       .align  2\n"                                    \
@@ -410,6 +437,9 @@ do {                                                                        \
        : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
        : "cc")
 
+#define __put_user_asm_byte(x, __pu_addr, err)                 \
+       __put_user_asm(x, __pu_addr, err, strb)
+
 #ifndef __ARMEB__
 #define __put_user_asm_half(x, __pu_addr, err)                 \
 ({                                                             \
@@ -427,21 +457,7 @@ do {                                                                       \
 #endif
 
 #define __put_user_asm_word(x, __pu_addr, err)                 \
-       __asm__ __volatile__(                                   \
-       "1:     " TUSER(str) "  %1,[%2],#0\n"                   \
-       "2:\n"                                                  \
-       "       .pushsection .text.fixup,\"ax\"\n"              \
-       "       .align  2\n"                                    \
-       "3:     mov     %0, %3\n"                               \
-       "       b       2b\n"                                   \
-       "       .popsection\n"                                  \
-       "       .pushsection __ex_table,\"a\"\n"                \
-       "       .align  3\n"                                    \
-       "       .long   1b, 3b\n"                               \
-       "       .popsection"                                    \
-       : "+r" (err)                                            \
-       : "r" (x), "r" (__pu_addr), "i" (-EFAULT)               \
-       : "cc")
+       __put_user_asm(x, __pu_addr, err, str)
 
 #ifndef __ARMEB__
 #define        __reg_oper0     "%R2"
@@ -474,11 +490,46 @@ do {                                                                      \
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       unsigned int __ua_flags = uaccess_save_and_enable();
+       n = arm_copy_from_user(to, from, n);
+       uaccess_restore(__ua_flags);
+       return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       unsigned int __ua_flags = uaccess_save_and_enable();
+       n = arm_copy_to_user(to, from, n);
+       uaccess_restore(__ua_flags);
+       return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+       unsigned int __ua_flags = uaccess_save_and_enable();
+       n = arm_clear_user(addr, n);
+       uaccess_restore(__ua_flags);
+       return n;
+}
+
 #else
 #define __copy_from_user(to, from, n)  (memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to, from, n)    (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
        return n;
 }
 
+/* These are from lib/ code, and use __get_user() and friends */
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern __must_check long strlen_user(const char __user *str);
index 5e5a51a99e68ec77b38b8101611bc853eb8496af..f89811fb9a55f3a490c3633ef99ef52745c58129 100644 (file)
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
index cb4fb1e69778603d41356f3ed7a98695f4cc0cdb..3e1c26eb32b43e13a5fa3e70b2d09f91a07ebd4e 100644 (file)
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
 #define SPFIX(code...)
 #endif
 
-       .macro  svc_entry, stack_hole=0, trace=1
+       .macro  svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart               )
  UNWIND(.save {r0 - pc}                )
-       sub     sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+       sub     sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(        str     r0, [sp]        )       @ temporarily saved
  SPFIX(        mov     r0, sp          )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
        ldmia   r0, {r3 - r5}
        add     r7, sp, #S_SP - 4       @ here for interlock avoidance
        mov     r6, #-1                 @  ""  ""      ""       ""
-       add     r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+       add     r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
  SPFIX(        addeq   r2, r2, #4      )
        str     r3, [sp, #-4]!          @ save the "real" r0 copied
                                        @ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
        @
        stmia   r7, {r2 - r6}
 
+       uaccess_save r0
+       .if \uaccess
+       uaccess_disable r0
+       .endif
+
        .if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
 
        .align  5
 __dabt_svc:
-       svc_entry
+       svc_entry uaccess=0
        mov     r2, sp
        dabt_helper
  THUMB(        ldr     r5, [sp, #S_PSR]        )       @ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 
-       .macro  usr_entry, trace=1
+       .macro  usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )       @ don't unwind the user space
        sub     sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
  ARM(  stmdb   r0, {sp, lr}^                   )
  THUMB(        store_user_sp_lr r0, r1, S_SP - S_PC    )
 
+       .if \uaccess
+       uaccess_disable ip
+       .endif
+
        @ Enable the alignment trap while in kernel mode
  ATRAP(        teq     r8, r7)
  ATRAP( mcrne  p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
 
        .align  5
 __dabt_usr:
-       usr_entry
+       usr_entry uaccess=0
        kuser_cmpxchg_check
        mov     r2, sp
        dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
 
        .align  5
 __und_usr:
-       usr_entry
+       usr_entry uaccess=0
 
        mov     r2, r4
        mov     r3, r5
@@ -484,6 +493,8 @@ __und_usr:
 1:     ldrt    r0, [r4]
  ARM_BE8(rev   r0, r0)                         @ little endian instruction
 
+       uaccess_disable ip
+
        @ r0 = 32-bit ARM instruction which caused the exception
        @ r2 = PC value for the following instruction (:= regs->ARM_pc)
        @ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
 2:     ldrht   r5, [r4]
 ARM_BE8(rev16  r5, r5)                         @ little endian instruction
        cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
-       blo     __und_usr_fault_16              @ 16bit undefined instruction
+       blo     __und_usr_fault_16_pan          @ 16bit undefined instruction
 3:     ldrht   r0, [r2]
 ARM_BE8(rev16  r0, r0)                         @ little endian instruction
+       uaccess_disable ip
        add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
        str     r2, [sp, #S_PC]                 @ it's a 2x16bit instr, update
        orr     r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
 __und_usr_fault_32:
        mov     r1, #4
        b       1f
+__und_usr_fault_16_pan:
+       uaccess_disable ip
 __und_usr_fault_16:
        mov     r1, #2
 1:     mov     r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
        ldr     r4, [r2, #TI_TP_VALUE]
        ldr     r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
+       mrc     p15, 0, r6, c3, c0, 0           @ Get domain register
+       str     r6, [r1, #TI_CPU_DOMAIN]        @ Save old domain register
        ldr     r6, [r2, #TI_CPU_DOMAIN]
 #endif
        switch_tls r1, r4, r5, r3, r7
index 92828a1dec80c1c33d051d9b76063727598495d5..30a7228eaceba758fb368f9ef16511019b1cb5c5 100644 (file)
 
 
        .align  5
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
 /*
- * This is the fast syscall return path.  We do as little as
- * possible here, and this includes saving r0 back into the SVC
- * stack.
+ * This is the fast syscall return path.  We do as little as possible here,
+ * such as avoiding writing r0 to the stack.  We only use this path if we
+ * have tracing and context tracking disabled - the overheads from those
+ * features make this path too inefficient.
  */
 ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
-       disable_irq                             @ disable interrupts
+       disable_irq_notrace                     @ disable interrupts
        ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
-       tst     r1, #_TIF_SYSCALL_WORK
-       bne     __sys_trace_return
-       tst     r1, #_TIF_WORK_MASK
+       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
        bne     fast_work_pending
-       asm_trace_hardirqs_on
 
        /* perform architecture specific actions before user return */
        arch_ret_to_user r1, lr
-       ct_user_enter
 
        restore_user_regs fast = 1, offset = S_OFF
  UNWIND(.fnend         )
+ENDPROC(ret_fast_syscall)
 
-/*
- * Ok, we need to do extra processing, enter the slow path.
- */
+       /* Ok, we need to do extra processing, enter the slow path. */
 fast_work_pending:
        str     r0, [sp, #S_R0+S_OFF]!          @ returned r0
-work_pending:
+       /* fall through to work_pending */
+#else
+/*
+ * The "replacement" ret_fast_syscall for when tracing or context tracking
+ * is enabled.  As we will need to call out to some C functions, we save
+ * r0 first to avoid needing to save registers around each C function call.
+ */
+ret_fast_syscall:
+ UNWIND(.fnstart       )
+ UNWIND(.cantunwind    )
+       str     r0, [sp, #S_R0 + S_OFF]!        @ save returned r0
+       disable_irq_notrace                     @ disable interrupts
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+       tst     r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+       beq     no_work_pending
+ UNWIND(.fnend         )
+ENDPROC(ret_fast_syscall)
+
+       /* Slower path - fall through to work_pending */
+#endif
+
+       tst     r1, #_TIF_SYSCALL_WORK
+       bne     __sys_trace_return_nosave
+slow_work_pending:
        mov     r0, sp                          @ 'regs'
        mov     r2, why                         @ 'syscall'
        bl      do_work_pending
@@ -61,19 +81,23 @@ work_pending:
        movlt   scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
        ldmia   sp, {r0 - r6}                   @ have to reload r0 - r6
        b       local_restart                   @ ... and off we go
+ENDPROC(ret_fast_syscall)
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
+ * IRQs may be enabled here, so always disable them.  Note that we use the
+ * "notrace" version to avoid calling into the tracing code unnecessarily.
+ * do_work_pending() will update this state if necessary.
  */
 ENTRY(ret_to_user)
 ret_slow_syscall:
-       disable_irq                             @ disable interrupts
+       disable_irq_notrace                     @ disable interrupts
 ENTRY(ret_to_user_from_irq)
        ldr     r1, [tsk, #TI_FLAGS]
        tst     r1, #_TIF_WORK_MASK
-       bne     work_pending
+       bne     slow_work_pending
 no_work_pending:
-       asm_trace_hardirqs_on
+       asm_trace_hardirqs_on save = 0
 
        /* perform architecture specific actions before user return */
        arch_ret_to_user r1, lr
@@ -173,6 +197,8 @@ ENTRY(vector_swi)
  USER( ldr     scno, [lr, #-4]         )       @ get SWI instruction
 #endif
 
+       uaccess_disable tbl
+
        adr     tbl, sys_call_table             @ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
@@ -251,6 +277,12 @@ __sys_trace_return:
        bl      syscall_trace_exit
        b       ret_slow_syscall
 
+__sys_trace_return_nosave:
+       enable_irq_notrace
+       mov     r0, sp
+       bl      syscall_trace_exit
+       b       ret_slow_syscall
+
        .align  5
 #ifdef CONFIG_ALIGNMENT_TRAP
        .type   __cr_alignment, #object
index 1a0045abead7562be1e27163e0aee3c6afbe9b40..0d22ad206d5230ba05a40b4101bb1dd2e4addd10 100644 (file)
        msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
        .endm
 
-#ifndef CONFIG_THUMB2_KERNEL
+
        .macro  svc_exit, rpsr, irq = 0
        .if     \irq != 0
        @ IRQs already off
        blne    trace_hardirqs_off
 #endif
        .endif
+       uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+       @ ARM mode SVC restore
        msr     spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        @ We must avoid clrex due to Cortex-A15 erratum #830321
        strex   r1, r2, [r0]                    @ clear the exclusive monitor
 #endif
        ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
+#else
+       @ Thumb mode SVC restore
+       ldr     lr, [sp, #S_SP]                 @ top of the stack
+       ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
+
+       @ We must avoid clrex due to Cortex-A15 erratum #830321
+       strex   r2, r1, [sp, #S_LR]             @ clear the exclusive monitor
+
+       stmdb   lr!, {r0, r1, \rpsr}            @ calling lr and rfe context
+       ldmia   sp, {r0 - r12}
+       mov     sp, lr
+       ldr     lr, [sp], #4
+       rfeia   sp!
+#endif
        .endm
 
        @
        @ on the stack remains correct).
        @
        .macro  svc_exit_via_fiq
+       uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+       @ ARM mode restore
        mov     r0, sp
        ldmib   r0, {r1 - r14}  @ abort is deadly from here onward (it will
                                @ clobber state restored below)
        msr     spsr_cxsf, r9
        ldr     r0, [r0, #S_R0]
        ldmia   r8, {pc}^
+#else
+       @ Thumb mode restore
+       add     r0, sp, #S_R2
+       ldr     lr, [sp, #S_LR]
+       ldr     sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+                               @ clobber state restored below)
+       ldmia   r0, {r2 - r12}
+       mov     r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+       msr     cpsr_c, r1
+       sub     r0, #S_R2
+       add     r8, r0, #S_PC
+       ldmia   r0, {r0 - r1}
+       rfeia   r8
+#endif
        .endm
 
+
        .macro  restore_user_regs, fast = 0, offset = 0
+       uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+       @ ARM mode restore
        mov     r2, sp
        ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [r2, #\offset + S_PC]!      @ get pc
                                                @ after ldm {}^
        add     sp, sp, #\offset + S_FRAME_SIZE
        movs    pc, lr                          @ return & move spsr_svc into cpsr
-       .endm
-
-#else  /* CONFIG_THUMB2_KERNEL */
-       .macro  svc_exit, rpsr, irq = 0
-       .if     \irq != 0
-       @ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
-       @ The parent context IRQs must have been enabled to get here in
-       @ the first place, so there's no point checking the PSR I bit.
-       bl      trace_hardirqs_on
-#endif
-       .else
-       @ IRQs off again before pulling preserved data off the stack
-       disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
-       tst     \rpsr, #PSR_I_BIT
-       bleq    trace_hardirqs_on
-       tst     \rpsr, #PSR_I_BIT
-       blne    trace_hardirqs_off
-#endif
-       .endif
-       ldr     lr, [sp, #S_SP]                 @ top of the stack
-       ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
-
-       @ We must avoid clrex due to Cortex-A15 erratum #830321
-       strex   r2, r1, [sp, #S_LR]             @ clear the exclusive monitor
-
-       stmdb   lr!, {r0, r1, \rpsr}            @ calling lr and rfe context
-       ldmia   sp, {r0 - r12}
-       mov     sp, lr
-       ldr     lr, [sp], #4
-       rfeia   sp!
-       .endm
-
-       @
-       @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
-       @
-       @ For full details see non-Thumb implementation above.
-       @
-       .macro  svc_exit_via_fiq
-       add     r0, sp, #S_R2
-       ldr     lr, [sp, #S_LR]
-       ldr     sp, [sp, #S_SP] @ abort is deadly from here onward (it will
-                               @ clobber state restored below)
-       ldmia   r0, {r2 - r12}
-       mov     r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
-       msr     cpsr_c, r1
-       sub     r0, #S_R2
-       add     r8, r0, #S_PC
-       ldmia   r0, {r0 - r1}
-       rfeia   r8
-       .endm
-
-#ifdef CONFIG_CPU_V7M
-       /*
-        * Note we don't need to do clrex here as clearing the local monitor is
-        * part of each exception entry and exit sequence.
-        */
-       .macro  restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+       @ V7M restore.
+       @ Note that we don't need to do clrex here as clearing the local
+       @ monitor is part of the exception entry and exit sequence.
        .if     \offset
        add     sp, #\offset
        .endif
        v7m_exception_slow_exit ret_r0 = \fast
-       .endm
-#else  /* ifdef CONFIG_CPU_V7M */
-       .macro  restore_user_regs, fast = 0, offset = 0
+#else
+       @ Thumb mode restore
        mov     r2, sp
        load_user_sp_lr r2, r3, \offset + S_SP  @ calling sp, lr
        ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
        .endif
        add     sp, sp, #S_FRAME_SIZE - S_SP
        movs    pc, lr                          @ return & move spsr_svc into cpsr
-       .endm
-#endif /* ifdef CONFIG_CPU_V7M / else */
 #endif /* !CONFIG_THUMB2_KERNEL */
+       .endm
 
 /*
  * Context tracking subsystem.  Used to instrument transitions
index bd755d97e459d77ff05cc8a1264f336c58c1b598..04286fd9e09ce7a27259c4d375a05a965e3be0ea 100644 (file)
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
        sub     lr, r4, r5                      @ mmu has been enabled
        add     r3, r7, lr
        ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
+ARM_BE8(eor    r4, r4, r5)                     @ Swap r5 and r4 in BE:
+ARM_BE8(eor    r5, r4, r5)                     @ it can be done in 3 steps
+ARM_BE8(eor    r4, r4, r5)                     @ without using a temp reg.
        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
        badr    lr, __enable_mmu                @ return address
        mov     r13, r12                        @ __secondary_switched address
@@ -461,10 +464,7 @@ __enable_mmu:
 #ifdef CONFIG_ARM_LPAE
        mcrr    p15, 0, r4, r5, c2              @ load TTBR0
 #else
-       mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+       mov     r5, #DACR_INIT
        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
 #endif
index 350f188c92d29447b59cbdcf62e0f98be5eed484..b96c8ed1723abf29ceb73f1771b67bf560d925ab 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/export.h>
 
 #include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
 #include <asm/exception.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/irq.h>
index f192a2a4171935720cfc702953703c78078579cc..a3089bacb8d822ded284b432f1cb37ecd8b0ff66 100644 (file)
@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
        ledtrig_cpu(CPU_LED_IDLE_END);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
-       cpu_die();
-}
-#endif
-
 void __show_regs(struct pt_regs *regs)
 {
        unsigned long flags;
@@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs)
        buf[4] = '\0';
 
 #ifndef CONFIG_CPU_V7M
-       printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
-               buf, interrupts_enabled(regs) ? "n" : "ff",
-               fast_interrupts_enabled(regs) ? "n" : "ff",
-               processor_modes[processor_mode(regs)],
-               isa_modes[isa_mode(regs)],
-               get_fs() == get_ds() ? "kernel" : "user");
+       {
+               unsigned int domain = get_domain();
+               const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               /*
+                * Get the domain register for the parent context. In user
+                * mode, we don't save the DACR, so lets use what it should
+                * be. For other modes, we place it after the pt_regs struct.
+                */
+               if (user_mode(regs))
+                       domain = DACR_UACCESS_ENABLE;
+               else
+                       domain = *(unsigned int *)(regs + 1);
+#endif
+
+               if ((domain & domain_mask(DOMAIN_USER)) ==
+                   domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+                       segment = "none";
+               else if (get_fs() == get_ds())
+                       segment = "kernel";
+               else
+                       segment = "user";
+
+               printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+                       buf, interrupts_enabled(regs) ? "n" : "ff",
+                       fast_interrupts_enabled(regs) ? "n" : "ff",
+                       processor_modes[processor_mode(regs)],
+                       isa_modes[isa_mode(regs)], segment);
+       }
 #else
        printk("xPSR: %08lx\n", regs->ARM_cpsr);
 #endif
@@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs)
                buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
                {
-                       unsigned int transbase, dac;
+                       unsigned int transbase, dac = get_domain();
                        asm("mrc p15, 0, %0, c2, c0\n\t"
-                           "mrc p15, 0, %1, c3, c0\n"
-                           : "=r" (transbase), "=r" (dac));
+                           : "=r" (transbase));
                        snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
                                transbase, dac);
                }
@@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
        memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
+       /*
+        * Copy the initial value of the domain access control register
+        * from the current thread: thread->addr_limit will have been
+        * copied from the current thread via setup_thread_stack() in
+        * kernel/fork.c
+        */
+       thread->cpu_domain = get_domain();
+
        if (likely(!(p->flags & PF_KTHREAD))) {
                *childregs = *current_pt_regs();
                childregs->ARM_r0 = 0;
index 9c38bd42f04b6ecc17efc9eb62c6c301f5169410..20edd349d379f22c583438db7fbf52f46e1133ef 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
+#include <asm/fixmap.h>
 #include <asm/procinfo.h>
 #include <asm/psci.h>
 #include <asm/sections.h>
@@ -955,6 +956,9 @@ void __init setup_arch(char **cmdline_p)
        strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
        *cmdline_p = cmd_line;
 
+       if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+               early_fixmap_init();
+
        parse_early_param();
 
 #ifdef CONFIG_MMU
@@ -1016,7 +1020,7 @@ static int __init topology_init(void)
 
        for_each_possible_cpu(cpu) {
                struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
-               cpuinfo->cpu.hotpluggable = 1;
+               cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
                register_cpu(&cpuinfo->cpu, cpu);
        }
 
index 423663e23791e1349f877990b021f8371fe85c24..b6cda06b455fc6e3b6fcde6313a93ad052bb3ada 100644 (file)
@@ -562,6 +562,12 @@ static int do_signal(struct pt_regs *regs, int syscall)
 asmlinkage int
 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
+       /*
+        * The assembly code enters us with IRQs off, but it hasn't
+        * informed the tracing code of that for efficiency reasons.
+        * Update the trace code with the current status.
+        */
+       trace_hardirqs_off();
        do {
                if (likely(thread_flags & _TIF_NEED_RESCHED)) {
                        schedule();
index 3d6b7821cff8c952e73c72cad5a1f8d90cdf1758..ba0063c539c3fc3436f291c54a04e3cfa4b30793 100644 (file)
@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
        if (smp_ops.cpu_disable)
                return smp_ops.cpu_disable(cpu);
 
+       return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+       /* cpu_die must be specified to support hotplug */
+       if (!smp_ops.cpu_die)
+               return 0;
+
+       if (smp_ops.cpu_can_disable)
+               return smp_ops.cpu_can_disable(cpu);
+
        /*
         * By default, allow disabling all CPUs except the first one,
         * since this is special on a lot of platforms, e.g. because
         * of clock tick interrupts.
         */
-       return cpu == 0 ? -EPERM : 0;
+       return cpu != 0;
 }
+
 /*
  * __cpu_disable runs on the processor to be shutdown.
  */
@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
  * of the other hotplug-cpu capable cores, so presumably coming
  * out of idle fixes this.
  */
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
 {
        unsigned int cpu = smp_processor_id();
 
index 1361756782c73b49c499f50bc4f423647e2edd3f..5b26e7efa9ea415967b63ede27ab1edf2bc8e888 100644 (file)
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
 
        while (1) {
                unsigned long temp;
+               unsigned int __ua_flags;
 
+               __ua_flags = uaccess_save_and_enable();
                if (type == TYPE_SWPB)
                        __user_swpb_asm(*data, address, res, temp);
                else
                        __user_swp_asm(*data, address, res, temp);
+               uaccess_restore(__ua_flags);
 
                if (likely(res != -EAGAIN) || signal_pending(current))
                        break;
index d358226236f2951e3b09cbc629799c963884ca49..969f9d9e665f4d49b2951ed65cfde9c279e1aa55 100644 (file)
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
        kuser_init(vectors_base);
 
        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-       modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 #else /* ifndef CONFIG_CPU_V7M */
        /*
         * on V7-M there is no need to copy the vector table to a dedicated
index efe17dd9b9218b7ef16299700a0f2a6d74ca61c1..54a5aeab988d3526657b8e3089942ca8cfe4fe5e 100644 (file)
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        struct timespec64 *wtm = &tk->wall_to_monotonic;
 
        if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
 
        vdso_write_begin(vdso_data);
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->tk_is_cntvct                 = tk_is_cntvct(tk);
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = (u32)(tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift);
        vdso_data->wtm_clock_sec                = wtm->tv_sec;
        vdso_data->wtm_clock_nsec               = wtm->tv_nsec;
 
index 1710fd7db2d57d35ed342417336980f987db5985..970d6c0437743cda6a78620e1439eccb91398da2 100644 (file)
 
                .text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
                stmfd   sp!, {r1, lr}
                mov     r2, #0
                cmp     r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
 USER(          strnebt r2, [r0])
                mov     r0, #0
                ldmfd   sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 
                .pushsection .text.fixup,"ax"
index 7a235b9952be04e3ed8acd8892d5ca4d63ee27ff..1512bebfbf1b18ad317648891385a24e93d1f35f 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *     size_t __copy_from_user(void *to, const void *from, size_t n)
+ *     size_t arm_copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
 
        .text
 
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
 
        .pushsection .fixup,"ax"
        .align 0
index 9648b0675a3efc81dd412fa5b3be820c1a3d6242..caf5019d8161e2f1914a797a4c6800844a27d570 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *     size_t __copy_to_user(void *to, const void *from, size_t n)
+ *     size_t arm_copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
        .text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
 
        .pushsection .text.fixup,"ax"
index 1d0957e61f898ab6ab43759496e85c8b247218d1..1712f132b80d2402d94d72ea974a0c3326fa2f52 100644 (file)
 
                .text
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               .macro  save_regs
+               mrc     p15, 0, ip, c3, c0, 0
+               stmfd   sp!, {r1, r2, r4 - r8, ip, lr}
+               uaccess_enable ip
+               .endm
+
+               .macro  load_regs
+               ldmfd   sp!, {r1, r2, r4 - r8, ip, lr}
+               mcr     p15, 0, ip, c3, c0, 0
+               ret     lr
+               .endm
+#else
                .macro  save_regs
                stmfd   sp!, {r1, r2, r4 - r8, lr}
                .endm
@@ -24,6 +37,7 @@
                .macro  load_regs
                ldmfd   sp!, {r1, r2, r4 - r8, pc}
                .endm
+#endif
 
                .macro  load1b, reg1
                ldrusr  \reg1, r0, 1
index 3e58d710013c3ad9b377fc76e6dad58f377e88a7..d72b90905132487257220939a255ac7ed1d3754d 100644 (file)
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
        }
 
        /* the mmap semaphore is taken only if not in an atomic context */
-       atomic = in_atomic();
+       atomic = faulthandler_disabled();
 
        if (!atomic)
                down_read(&current->mm->mmap_sem);
@@ -136,7 +136,7 @@ out:
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        /*
         * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
        return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
        /* See rational for this in __copy_to_user() above. */
        if (n < 64)
index 04c9daf9f8d767a7225631a7f36e443fae260d88..7db5870d127fc0f2fda4f06c75055b6c269cf344 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <asm/mach-types.h>
+#include <asm/outercache.h>
 #include <mach/hardware.h>
 #include <mach/cputype.h>
 #include <mach/addr-map.h>
index ecc04ff13e9595213aa57178bb7b0c40183c77c9..8427997e09c4b977531d42c59b11780aa4d5db4d 100644 (file)
@@ -29,6 +29,7 @@ config ARCH_OMAP4
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
        select OMAP_INTERCONNECT
+       select OMAP_INTERCONNECT_BARRIER
        select PL310_ERRATA_588369 if CACHE_L2X0
        select PL310_ERRATA_727915 if CACHE_L2X0
        select PM_OPP if PM
@@ -46,6 +47,7 @@ config SOC_OMAP5
        select HAVE_ARM_TWD if SMP
        select HAVE_ARM_ARCH_TIMER
        select ARM_ERRATA_798181 if SMP
+       select OMAP_INTERCONNECT_BARRIER
 
 config SOC_AM33XX
        bool "TI AM33XX"
@@ -70,6 +72,7 @@ config SOC_DRA7XX
        select HAVE_ARM_ARCH_TIMER
        select IRQ_CROSSBAR
        select ARM_ERRATA_798181 if SMP
+       select OMAP_INTERCONNECT_BARRIER
 
 config ARCH_OMAP2PLUS
        bool
@@ -91,6 +94,10 @@ config ARCH_OMAP2PLUS
        help
          Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
 
+config OMAP_INTERCONNECT_BARRIER
+       bool
+       select ARM_HEAVY_MB
+       
 
 if ARCH_OMAP2PLUS
 
index eae6a0e87c90d33649bccb350ed1f5732deae25d..484cdadfb18785ade051c29bab4d5e608dd56218 100644 (file)
@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
 void __init omap_reserve(void)
 {
        omap_secure_ram_reserve_memblock();
+       omap_barrier_reserve_memblock();
 }
index cf3cf22ecd42696da3de4371ad11d38e46d95b33..82f88b4ec15f4d4993c0842df9d8e7fedee87fb6 100644 (file)
@@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
 }
 #endif
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+void omap_barrier_reserve_memblock(void);
+void omap_barriers_init(void);
+#else
+static inline void omap_barrier_reserve_memblock(void)
+{
+}
+#endif
+
 /* This gets called from mach-omap2/io.c, do not call this */
 void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
 
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
deleted file mode 100644 (file)
index 1c582a8..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * OMAP memory barrier header.
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- *  Santosh Shilimkar <santosh.shilimkar@ti.com>
- *  Richard Woodruff <r-woodruff2@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __MACH_BARRIERS_H
-#define __MACH_BARRIERS_H
-
-#include <asm/outercache.h>
-
-extern void omap_bus_sync(void);
-
-#define rmb()          dsb()
-#define wmb()          do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
-#define mb()           wmb()
-
-#endif /* __MACH_BARRIERS_H */
index 820dde8b5b0453f96a6a3c53a4db8e4c3c9abca6..7743e3672f98b5b5f8a5125efd9cec73e239a48a 100644 (file)
@@ -306,6 +306,7 @@ void __init am33xx_map_io(void)
 void __init omap4_map_io(void)
 {
        iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
+       omap_barriers_init();
 }
 #endif
 
@@ -313,6 +314,7 @@ void __init omap4_map_io(void)
 void __init omap5_map_io(void)
 {
        iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+       omap_barriers_init();
 }
 #endif
 /*
index 16350eefa66c893c0843836aa7f595e51369fb03..949696b6f17b69f7253f8f9b951fab92e5c35031 100644 (file)
@@ -51,6 +51,127 @@ static void __iomem *twd_base;
 
 #define IRQ_LOCALTIMER         29
 
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA                  0xfe600000
+
+static void __iomem *dram_sync, *sram_sync;
+static phys_addr_t dram_sync_paddr;
+static u32 dram_sync_size;
+
+/*
+ * The OMAP4 bus structure contains asynchrnous bridges which can buffer
+ * data writes from the MPU. These asynchronous bridges can be found on
+ * paths between the MPU to EMIF, and the MPU to L3 interconnects.
+ *
+ * We need to be careful about re-ordering which can happen as a result
+ * of different accesses being performed via different paths, and
+ * therefore different asynchronous bridges.
+ */
+
+/*
+ * OMAP4 interconnect barrier which is called for each mb() and wmb().
+ * This is to ensure that normal paths to DRAM (normal memory, cacheable
+ * accesses) are properly synchronised with writes to DMA coherent memory
+ * (normal memory, uncacheable) and device writes.
+ *
+ * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
+ * path, as we need to ensure that data is visible to other system
+ * masters prior to writes to those system masters being seen.
+ *
+ * Note: the SRAM path is not synchronised via mb() and wmb().
+ */
+static void omap4_mb(void)
+{
+       if (dram_sync)
+               writel_relaxed(0, dram_sync);
+}
+
+/*
+ * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
+ *
+ * If a data is stalled inside asynchronous bridge because of back
+ * pressure, it may be accepted multiple times, creating pointer
+ * misalignment that will corrupt next transfers on that data path until
+ * next reset of the system. No recovery procedure once the issue is hit,
+ * the path remains consistently broken.
+ *
+ * Async bridges can be found on paths between MPU to EMIF and MPU to L3
+ * interconnects.
+ *
+ * This situation can happen only when the idle is initiated by a Master
+ * Request Disconnection (which is trigged by software when executing WFI
+ * on the CPU).
+ *
+ * The work-around for this errata needs all the initiators connected
+ * through an async bridge to ensure that data path is properly drained
+ * before issuing WFI. This condition will be met if one Strongly ordered
+ * access is performed to the target right before executing the WFI.
+ *
+ * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ * IO barrier ensure that there is no synchronisation loss on initiators
+ * operating on both interconnect port simultaneously.
+ *
+ * This is a stronger version of the OMAP4 memory barrier below, and
+ * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
+ * as well, and is necessary prior to executing a WFI.
+ */
+void omap_interconnect_sync(void)
+{
+       if (dram_sync && sram_sync) {
+               writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+               writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+               isb();
+       }
+}
+
+static int __init omap4_sram_init(void)
+{
+       struct device_node *np;
+       struct gen_pool *sram_pool;
+
+       np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+       if (!np)
+               pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+                       __func__);
+       sram_pool = of_gen_pool_get(np, "sram", 0);
+       if (!sram_pool)
+               pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
+                       __func__);
+       else
+               sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
+
+       return 0;
+}
+omap_arch_initcall(omap4_sram_init);
+
+/* Steal one page physical memory for barrier implementation */
+void __init omap_barrier_reserve_memblock(void)
+{
+       dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
+       dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
+}
+
+void __init omap_barriers_init(void)
+{
+       struct map_desc dram_io_desc[1];
+
+       dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+       dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
+       dram_io_desc[0].length = dram_sync_size;
+       dram_io_desc[0].type = MT_MEMORY_RW_SO;
+       iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+       dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+
+       pr_info("OMAP4: Map %pa to %p for dram barrier\n",
+               &dram_sync_paddr, dram_sync);
+
+       soc_mb = omap4_mb;
+}
+
+#endif
+
 void gic_dist_disable(void)
 {
        if (gic_dist_base_addr)
index ad1bb9431e941c6fd084fa5d8f0867f24667b72c..9b09d85d811a1c52b0fa735a2e2743e05346843d 100644 (file)
@@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume)
 
 #endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
 
-ENTRY(omap_bus_sync)
-       ret     lr
-ENDPROC(omap_bus_sync)
-
 ENTRY(omap_do_wfi)
        stmfd   sp!, {lr}
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
        /* Drain interconnect write buffers. */
-       bl omap_bus_sync
+       bl      omap_interconnect_sync
+#endif
 
        /*
         * Execute an ISB instruction to ensure that all of the
index d99d08eeb9664cff404e50bf03a2379e08d376b7..83e94c95e314414a6d85e145c9e204f186f9cc75 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/of_platform.h>
 #include <linux/io.h>
 #include <linux/rtc/sirfsoc_rtciobrg.h>
+#include <asm/outercache.h>
 #include <asm/suspend.h>
 #include <asm/hardware/cache-l2x0.h>
 
index 476092b86c6e42420e2654a8d2abe8b8aa6dcaee..8d27ec546a35f4b9ee5771adc24a331006342bd1 100644 (file)
@@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void);
 extern void shmobile_smp_sleep(void);
 extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
                              unsigned long arg);
-extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
 extern void shmobile_boot_scu(void);
 extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
index 3923e09e966d5031cab8aa82fb304305c284e9ce..b23378f3d7e1726b6f92b3cc8be0ba8264c838f7 100644 (file)
@@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-int shmobile_smp_cpu_disable(unsigned int cpu)
+bool shmobile_smp_cpu_can_disable(unsigned int cpu)
 {
-       return 0; /* Hotplug of any CPU is supported */
+       return true; /* Hotplug of any CPU is supported */
 }
 #endif
index 930f45cbc08a5bb33677bbf02b5a2eb66b98d8ea..947e437cab68e3cef6f629cac6dc5ea1334494f3 100644 (file)
@@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = {
        .smp_prepare_cpus       = r8a7790_smp_prepare_cpus,
        .smp_boot_secondary     = shmobile_smp_apmu_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-       .cpu_disable            = shmobile_smp_cpu_disable,
+       .cpu_can_disable        = shmobile_smp_cpu_can_disable,
        .cpu_die                = shmobile_smp_apmu_cpu_die,
        .cpu_kill               = shmobile_smp_apmu_cpu_kill,
 #endif
index 5e2d1db79afa5316152d031e7bb223d1465c6e57..b2508c0d276b46d56daeac18d2e6842202b5d8f8 100644 (file)
@@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = {
        .smp_prepare_cpus       = r8a7791_smp_prepare_cpus,
        .smp_boot_secondary     = r8a7791_smp_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-       .cpu_disable            = shmobile_smp_cpu_disable,
+       .cpu_can_disable        = shmobile_smp_cpu_can_disable,
        .cpu_die                = shmobile_smp_apmu_cpu_die,
        .cpu_kill               = shmobile_smp_apmu_cpu_kill,
 #endif
index 2106d6b76a06939238ad373486e408f7df12065f..ae7c764fd6b44a13f09ce2bc1672a288e0c83277 100644 (file)
@@ -68,7 +68,7 @@ struct smp_operations sh73a0_smp_ops __initdata = {
        .smp_prepare_cpus       = sh73a0_smp_prepare_cpus,
        .smp_boot_secondary     = sh73a0_boot_secondary,
 #ifdef CONFIG_HOTPLUG_CPU
-       .cpu_disable            = shmobile_smp_cpu_disable,
+       .cpu_can_disable        = shmobile_smp_cpu_can_disable,
        .cpu_die                = shmobile_smp_scu_cpu_die,
        .cpu_kill               = shmobile_smp_scu_cpu_kill,
 #endif
index 7557bede7ae67700c6cc65e593e94210bc93408d..780bd13cd7e3d936834c10a04ef5a44ff4a0e5bb 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 
+#include <asm/outercache.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "db8500-regs.h"
index 7c6b976ab8d39d0f5005b9f8c815a752d1ab0f8b..df7537f12469a15669b89aba42bd96445e789305 100644 (file)
@@ -883,6 +883,7 @@ config OUTER_CACHE
 
 config OUTER_CACHE_SYNC
        bool
+       select ARM_HEAVY_MB
        help
          The outer cache has a outer_cache_fns.sync function pointer
          that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
          This option allows the use of custom mandatory barriers
          included via the mach/barriers.h file.
 
+config ARM_HEAVY_MB
+       bool
+
 config ARCH_SUPPORTS_BIG_ENDIAN
        bool
        help
index 54473cd4aba951c793f25df8f3e9fb17be7f0160..b3b31e30cadd207f98991841b42f60f91d2a8097 100644 (file)
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
        ldr     r3, [r4]                        @ read aborted ARM instruction
+       uaccess_disable ip                      @ disable userspace access
        bic     r1, r1, #1 << 11 | 1 << 10      @ clear bits 11 and 10 of FSR
        tst     r3, #1 << 20                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
index a0908d4653a34a2241d95c58af16dcdde4dafa9a..a6a381a6caa5a32f6b4ad018ab10e702b01920cc 100644 (file)
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
        do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
        ldreq   r3, [r4]                        @ read aborted ARM instruction
+       uaccess_disable ip                      @ disable user access
        bic     r1, r1, #1 << 11                @ clear bits 11 of FSR
-       do_ldrd_abort tmp=ip, insn=r3
+       teq_ldrd tmp=ip, insn=r3                @ insn was LDRD?
+       beq     do_DataAbort                    @ yes
        tst     r3, #1 << 20                    @ check write
        orreq   r1, r1, #1 << 11
        b       do_DataAbort
index 4006b7a612642b7fa4ec36b5a995ccc9bc1e3a40..00ab011bef5848cbcc750d8aa51770cea7ac9934 100644 (file)
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
        bne     do_DataAbort
        do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
        ldreq   r3, [r4]                        @ read aborted ARM instruction
-       do_ldrd_abort tmp=ip, insn=r3
+       uaccess_disable ip                      @ disable userspace access
+       teq_ldrd tmp=ip, insn=r3                @ insn was LDRD?
+       beq     do_DataAbort                    @ yes
        tst     r3, #1 << 20                    @ L = 0 -> write
        orreq   r1, r1, #1 << 11                @ yes.
        b       do_DataAbort
index 8c48c5c22a331aac8f547335d6990c598457ef0b..8801a15aa10595a9288edaeca03ed434d33e86b1 100644 (file)
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
        ldr     ip, =0x4107b36
        mrc     p15, 0, r3, c0, c0, 0           @ get processor id
        teq     ip, r3, lsr #4                  @ r0 ARM1136?
-       bne     do_DataAbort
+       bne     1f
        tst     r5, #PSR_J_BIT                  @ Java?
        tsteq   r5, #PSR_T_BIT                  @ Thumb?
-       bne     do_DataAbort
+       bne     1f
        bic     r1, r1, #1 << 11                @ clear bit 11 of FSR
        ldr     r3, [r4]                        @ read aborted ARM instruction
  ARM_BE8(rev   r3, r3)
 
-       do_ldrd_abort tmp=ip, insn=r3
+       teq_ldrd tmp=ip, insn=r3                @ insn was LDRD?
+       beq     1f                              @ yes
        tst     r3, #1 << 20                    @ L = 0 -> write
        orreq   r1, r1, #1 << 11                @ yes.
 #endif
+1:     uaccess_disable ip                      @ disable userspace access
        b       do_DataAbort
index 4812ad054214572ba6e7198247e2c190e469897d..e8d0e08c227fc5f36d864378bf5cf75dfbb00e10 100644 (file)
@@ -15,6 +15,7 @@
 ENTRY(v7_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
+       uaccess_disable ip                      @ disable userspace access
 
        /*
         * V6 code adjusts the returned DFSR.
index f3982580c273057b89a1c025cb52f0f54093014f..6d8e8e3365d17321f03b37fa67ab04a65b29f4ca 100644 (file)
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
 #endif
        bne     .data_thumb_abort
        ldr     r8, [r4]                        @ read arm instruction
+       uaccess_disable ip                      @ disable userspace access
        tst     r8, #1 << 20                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
        and     r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
 
 .data_thumb_abort:
        ldrh    r8, [r4]                        @ read instruction
+       uaccess_disable ip                      @ disable userspace access
        tst     r8, #1 << 11                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 8                 @ yes
        and     r7, r8, #15 << 12
index 2cbf68ef0e8321121e5ecabb55f50f95083beb1d..4509bee4e081ce78f95bcd99cd890468d97a5d8c 100644 (file)
@@ -13,6 +13,7 @@
        tst     \psr, #PSR_T_BIT
        beq     not_thumb
        ldrh    \tmp, [\pc]                     @ Read aborted Thumb instruction
+       uaccess_disable ip                      @ disable userspace access
        and     \tmp, \tmp, # 0xfe00            @ Mask opcode field
        cmp     \tmp, # 0x5600                  @ Is it ldrsb?
        orreq   \tmp, \tmp, #1 << 11            @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
  *   [7:4] == 1101
  *    [20] == 0
  */
-       .macro  do_ldrd_abort, tmp, insn
-       tst     \insn, #0x0e100000              @ [27:25,20] == 0
-       bne     not_ldrd
-       and     \tmp, \insn, #0x000000f0        @ [7:4] == 1101
-       cmp     \tmp, #0x000000d0
-       beq     do_DataAbort
-not_ldrd:
+       .macro  teq_ldrd, tmp, insn
+       mov     \tmp, #0x0e100000
+       orr     \tmp, #0x000000f0
+       and     \tmp, \insn, \tmp
+       teq     \tmp, #0x000000d0
        .endm
-
index 097181e08c25f7e924a33a51afc995b2e9baacdd..5c1b7a7b9af630002a49852652a36b6a698bf062 100644 (file)
@@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
        struct device_node *node;
        void __iomem *base;
        bool l2_wt_override = false;
-       struct resource res;
 
 #if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
        l2_wt_override = true;
@@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
 
        node = of_find_matching_node(NULL, feroceon_ids);
        if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
-               if (of_address_to_resource(node, 0, &res))
-                       return -ENODEV;
-
-               base = ioremap(res.start, resource_size(&res));
+               base = of_iomap(node, 0);
                if (!base)
                        return -ENOMEM;
 
index 71b3d3309024496570def04990751a634ef4ca02..493692d838c679360c1bcdb61b4ef594b66f7f95 100644 (file)
@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
                }
        }
 
+       if (of_property_read_bool(np, "arm,shared-override")) {
+               *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+               *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+       }
+
        prefetch = l2x0_saved_regs.prefetch_ctrl;
 
        ret = of_property_read_u32(np, "arm,double-linefill", &val);
index 1ced8a0f7a52624cae84203eb0d3417aed5171db..9f509b2643461b7934d7db9132301ecd5dbd7b13 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/system_info.h>
 #include <asm/dma-contiguous.h>
 
+#include "dma.h"
 #include "mm.h"
 
 /*
@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        size = PAGE_ALIGN(size);
        want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
 
-       if (is_coherent || nommu())
+       if (nommu())
+               addr = __alloc_simple_buffer(dev, size, gfp, &page);
+       else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+               addr = __alloc_from_contiguous(dev, size, prot, &page,
+                                              caller, want_vaddr);
+       else if (is_coherent)
                addr = __alloc_simple_buffer(dev, size, gfp, &page);
        else if (!(gfp & __GFP_WAIT))
                addr = __alloc_from_pool(size, &page);
-       else if (!dev_get_cma_area(dev))
-               addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
        else
-               addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+               addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+                                           caller, want_vaddr);
 
        if (page)
                *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
-       pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
        void *memory;
 
        if (dma_alloc_from_coherent(dev, size, handle, &memory))
                return memory;
 
-       return __dma_alloc(dev, size, handle, gfp, prot, true,
+       return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
                           attrs, __builtin_return_address(0));
 }
 
@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 
        size = PAGE_ALIGN(size);
 
-       if (is_coherent || nommu()) {
+       if (nommu()) {
                __dma_free_buffer(page, size);
-       } else if (__free_from_pool(cpu_addr, size)) {
+       } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
                return;
        } else if (!dev_get_cma_area(dev)) {
-               if (want_vaddr)
+               if (want_vaddr && !is_coherent)
                        __dma_free_remap(cpu_addr, size);
                __dma_free_buffer(page, size);
        } else {
@@ -1971,7 +1975,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
 {
        int next_bitmap;
 
-       if (mapping->nr_bitmaps > mapping->extensions)
+       if (mapping->nr_bitmaps >= mapping->extensions)
                return -EINVAL;
 
        next_bitmap = mapping->nr_bitmaps;
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644 (file)
index 0000000..70ea685
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area                  __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area                __glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API.  Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area                  cpu_cache.dma_map_area
+#define dmac_unmap_area                cpu_cache.dma_unmap_area
+
+#endif
+
+#endif
index 34b66af516ea9ea83c8afb9ac7439467d80f9a3a..1ec8e7590fc6823bf1d1ffe87c1901f645ffcd07 100644 (file)
 
 #include "mm.h"
 
+#ifdef CONFIG_ARM_HEAVY_MB
+void (*soc_mb)(void);
+
+void arm_heavy_mb(void)
+{
+#ifdef CONFIG_OUTER_CACHE_SYNC
+       if (outer_cache.sync)
+               outer_cache.sync();
+#endif
+       if (soc_mb)
+               soc_mb();
+}
+EXPORT_SYMBOL(arm_heavy_mb);
+#endif
+
 #ifdef CONFIG_CPU_CACHE_VIPT
 
 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
index ee8dfa793989785488a306a9edd8b7899f3f1f3b..9df5f09585ca49bc48c209f8588a395ca5f8d449 100644 (file)
@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
 
        type = kmap_atomic_idx_push();
 
-       idx = type + KM_TYPE_NR * smp_processor_id();
+       idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
        /*
@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (kvaddr >= (void *)FIXADDR_START) {
                type = kmap_atomic_idx();
-               idx = type + KM_TYPE_NR * smp_processor_id();
+               idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
 
                if (cache_is_vivt())
                        __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
                return page_address(page);
 
        type = kmap_atomic_idx_push();
-       idx = type + KM_TYPE_NR * smp_processor_id();
+       idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
        vaddr = __fix_to_virt(idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
        BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
index 870838a46d524141a9cb1d65980c1ba7456465cb..7cd15143a507740155ad6dbe01e3dbef371111fe 100644 (file)
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_RDONLY,
                .prot_l1   = PMD_TYPE_TABLE,
-               .domain    = DOMAIN_USER,
+               .domain    = DOMAIN_VECTORS,
        },
        [MT_HIGH_VECTORS] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_USER | L_PTE_RDONLY,
                .prot_l1   = PMD_TYPE_TABLE,
-               .domain    = DOMAIN_USER,
+               .domain    = DOMAIN_VECTORS,
        },
        [MT_MEMORY_RWX] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
 }
 EXPORT_SYMBOL(get_mem_type);
 
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+       __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+       return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+       return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+       pgd_t *pgd = pgd_offset_k(addr);
+       pud_t *pud = pud_offset(pgd, addr);
+       pmd_t *pmd = pmd_offset(pud, addr);
+
+       return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+       pmd_t *pmd;
+
+       /*
+        * The early fixmap range spans multiple pmds, for which
+        * we are not prepared:
+        */
+       BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+                    != FIXADDR_TOP >> PMD_SHIFT);
+
+       pmd = fixmap_pmd(FIXADDR_TOP);
+       pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+       pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
 /*
  * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
  * As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
 void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 {
        unsigned long vaddr = __fix_to_virt(idx);
-       pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+       pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 
        /* Make sure fixmap region does not exceed available allocation. */
        BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
        }
 
        if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
-           md->virtual >= PAGE_OFFSET &&
+           md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
            (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
                pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
                        (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1219,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
 
 /*
  * Set up the device mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function.  This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings.  This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
  */
 static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
@@ -1237,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
 
        early_trap_init(vectors);
 
-       for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+       /*
+        * Clear page table except top pmd used by early fixmaps
+        */
+       for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
                pmd_clear(pmd_off_k(addr));
 
        /*
@@ -1489,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
 
 #endif
 
+static void __init early_fixmap_shutdown(void)
+{
+       int i;
+       unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+       pte_offset_fixmap = pte_offset_late_fixmap;
+       pmd_clear(fixmap_pmd(va));
+       local_flush_tlb_kernel_page(va);
+
+       for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+               pte_t *pte;
+               struct map_desc map;
+
+               map.virtual = fix_to_virt(i);
+               pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+               /* Only i/o device mappings are supported ATM */
+               if (pte_none(*pte) ||
+                   (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+                       continue;
+
+               map.pfn = pte_pfn(*pte);
+               map.type = MT_DEVICE;
+               map.length = PAGE_SIZE;
+
+               create_mapping(&map);
+       }
+}
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
@@ -1502,6 +1575,7 @@ void __init paging_init(const struct machine_desc *mdesc)
        map_lowmem();
        memblock_set_current_limit(arm_lowmem_limit);
        dma_contiguous_remap();
+       early_fixmap_shutdown();
        devicemaps_init(mdesc);
        kmap_init();
        tcm_init();
index a3681f11dd9f12ceb8260df36e09a11ec0c22066..e683db1b90a3f805d1de8f92a9c0ebe544960368 100644 (file)
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
                if (!new_pte)
                        goto no_pte;
 
+#ifndef CONFIG_ARM_LPAE
+               /*
+                * Modify the PTE pointer to have the correct domain.  This
+                * needs to be the vectors domain to avoid the low vectors
+                * being unmapped.
+                */
+               pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+               pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
                init_pud = pud_offset(init_pgd, 0);
                init_pmd = pmd_offset(init_pud, 0);
                init_pte = pte_offset_map(init_pmd, 0);
index 0716bbe198728876e81361897b69258e3489bf24..de2b246fed3808fce444b560a725ed4007464174 100644 (file)
@@ -274,7 +274,10 @@ __v7_ca15mp_setup:
 __v7_b15mp_setup:
 __v7_ca17mp_setup:
        mov     r10, #0
-1:
+1:     adr     r12, __v7_setup_stack           @ the local stack
+       stmia   r12, {r0-r5, lr}                @ v7_invalidate_l1 touches r0-r6
+       bl      v7_invalidate_l1
+       ldmia   r12, {r0-r5, lr}
 #ifdef CONFIG_SMP
        ALT_SMP(mrc     p15, 0, r0, c1, c0, 1)
        ALT_UP(mov      r0, #(1 << 6))          @ fake it for UP
@@ -283,7 +286,7 @@ __v7_ca17mp_setup:
        orreq   r0, r0, r10                     @ Enable CPU-specific SMP bits
        mcreq   p15, 0, r0, c1, c0, 1
 #endif
-       b       __v7_setup
+       b       __v7_setup_cont
 
 /*
  * Errata:
@@ -413,10 +416,11 @@ __v7_pj4b_setup:
 
 __v7_setup:
        adr     r12, __v7_setup_stack           @ the local stack
-       stmia   r12, {r0-r5, r7, r9, r11, lr}
+       stmia   r12, {r0-r5, lr}                @ v7_invalidate_l1 touches r0-r6
        bl      v7_invalidate_l1
-       ldmia   r12, {r0-r5, r7, r9, r11, lr}
+       ldmia   r12, {r0-r5, lr}
 
+__v7_setup_cont:
        and     r0, r9, #0xff000000             @ ARM?
        teq     r0, #0x41000000
        bne     __errata_finish
@@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
 
        .align  2
 __v7_setup_stack:
-       .space  4 * 11                          @ 11 registers
+       .space  4 * 7                           @ 12 registers
 
        __INITDATA
 
index 9d259d94e429c4cc493542ad4cf238a513b13743..1160434eece0509c3797733b49e8fcb1262e42e7 100644 (file)
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 VDSO_LDFLAGS += -nostdlib -shared
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
 
 obj-$(CONFIG_VDSO) += vdso.o
 extra-$(CONFIG_VDSO) += vdso.lds
index 1bd6f9c3433140e84118d8e45ea0faab413576b9..29e6850665eb344cbbc946273343ec13cef81e12 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/qcom_scm.h>
 
-#include <asm/outercache.h>
 #include <asm/cacheflush.h>
 
 #include "qcom_scm.h"
@@ -219,8 +218,7 @@ static int __qcom_scm_call(const struct qcom_scm_command *cmd)
         * Flush the command buffer so that the secure world sees
         * the correct data.
         */
-       __cpuc_flush_dcache_area((void *)cmd, cmd->len);
-       outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+       secure_flush_area(cmd, cmd->len);
 
        ret = smc(cmd_addr);
        if (ret < 0)