loop buffer may deliver incorrect instructions. This
workaround disables the loop buffer to avoid the erratum.
+config ARM_ERRATA_818325_852422
+ bool "ARM errata: A12: some seqs of opposed cond code instrs => deadlock or corruption"
+ depends on CPU_V7
+ help
+ This option enables the workaround for:
+ - Cortex-A12 818325: Execution of an UNPREDICTABLE STR or STM
+ instruction might deadlock. Fixed in r0p1.
+ - Cortex-A12 852422: Execution of a sequence of instructions might
+ lead to either a data corruption or a CPU deadlock. Not fixed in
+ any Cortex-A12 cores yet.
+ This workaround for all both errata involves setting bit[12] of the
+ Feature Register. This bit disables an optimisation applied to a
+ sequence of 2 instructions that use opposing condition codes.
+
+config ARM_ERRATA_821420
+ bool "ARM errata: A12: sequence of VMOV to core registers might lead to a dead lock"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 821420 Cortex-A12
+ (all revs) erratum. In very rare timing conditions, a sequence
+ of VMOV to Core registers instructions, for which the second
+ one is in the shadow of a branch or abort, can lead to a
+ deadlock when the VMOV instructions are issued out-of-order.
+
+config ARM_ERRATA_825619
+ bool "ARM errata: A12: DMB NSHST/ISHST mixed ... might cause deadlock"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 825619 Cortex-A12
+ (all revs) erratum. Within rare timing constraints, executing a
+ DMB NSHST or DMB ISHST instruction followed by a mix of Cacheable
+ and Device/Strongly-Ordered loads and stores might cause deadlock
+
+config ARM_ERRATA_852421
+ bool "ARM errata: A17: DMB ST might fail to create order between stores"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 852421 Cortex-A17
+ (r1p0, r1p1, r1p2) erratum. Under very rare timing conditions,
+ execution of a DMB ST instruction might fail to properly order
+ stores from GroupA and stores from GroupB.
+
+config ARM_ERRATA_852423
+ bool "ARM errata: A17: some seqs of opposed cond code instrs => deadlock or corruption"
+ depends on CPU_V7
+ help
+ This option enables the workaround for:
+ - Cortex-A17 852423: Execution of a sequence of instructions might
+ lead to either a data corruption or a CPU deadlock. Not fixed in
+ any Cortex-A17 cores yet.
+ This is identical to Cortex-A12 erratum 852422. It is a separate
+ config option from the A12 erratum due to the way errata are checked
+ for and handled.
+
endmenu
source "arch/arm/common/Kconfig"
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+ @$(kecho) ' Kernel: $(boot)/$@ is ready'
$(INSTALL_TARGETS):
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
+ @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)'
$(obj)/Image $(obj)/zImage: FORCE
@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready'
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready'
endif
$(obj)/uImage: $(obj)/zImage FORCE
@$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
- @$(kecho) ' Image $@ is ready'
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
- @$(kecho) ' Kernel: $@ is ready'
PHONY += initrd install zinstall uinstall
initrd:
.macro uaccess_save, tmp
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc p15, 0, \tmp, c3, c0, 0
- str \tmp, [sp, #S_FRAME_SIZE]
+ str \tmp, [sp, #SVC_DACR]
#endif
.endm
.macro uaccess_restore
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
- ldr r0, [sp, #S_FRAME_SIZE]
+ ldr r0, [sp, #SVC_DACR]
mcr p15, 0, r0, c3, c0, 0
#endif
.endm
#define __arm_heavy_mb(x...) dsb(x)
#endif
-#ifdef CONFIG_ARCH_HAS_BARRIERS
-#include <mach/barriers.h>
-#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
+#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() __arm_heavy_mb()
#define rmb() dsb()
#define wmb() __arm_heavy_mb(st)
#include <asm/param.h> /* HZ */
#define MAX_UDELAY_MS 2
-#define UDELAY_MULT ((UL(2199023) * HZ) >> 11)
-#define UDELAY_SHIFT 30
+#define UDELAY_MULT UL(2047 * HZ + 483648 * HZ / 1000000)
+#define UDELAY_SHIFT 31
#ifndef __ASSEMBLY__
* it, it means that you're calling udelay() with an out of range value.
*
* With currently imposed limits, this means that we support a max delay
- * of 2000us. Further limits: HZ<=1000 and bogomips<=3355
+ * of 2000us. Further limits: HZ<=1000
*/
extern void __bad_udelay(void);
#define fd_outb(val,port) \
do { \
- if ((port) == FD_DOR) \
+ if ((port) == (u32)FD_DOR) \
fd_setdor((val)); \
else \
outb((val),(port)); \
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
struct pt_regs {
unsigned long uregs[18];
};
+struct svc_pt_regs {
+ struct pt_regs regs;
+ u32 dacr;
+ u32 addr_limit;
+};
+
+#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
+
#define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0)
#define segment_eq(a, b) ((a) == (b))
-#define __addr_ok(addr) ({ \
- unsigned long flag; \
- __asm__("cmp %2, %0; movlo %0, #0" \
- : "=&r" (flag) \
- : "0" (current_thread_info()->addr_limit), "r" (addr) \
- : "cc"); \
- (flag == 0); })
-
/* We use 33-bit arithmetic here... */
#define __range_ok(addr, size) ({ \
unsigned long flag, roksum; \
extern int __put_user_4(void *, unsigned int);
extern int __put_user_8(void *, unsigned long long);
-#define __put_user_x(__r2, __p, __e, __l, __s) \
- __asm__ __volatile__ ( \
- __asmeq("%0", "r0") __asmeq("%2", "r2") \
- __asmeq("%3", "r1") \
- "bl __put_user_" #__s \
- : "=&r" (__e) \
- : "0" (__p), "r" (__r2), "r" (__l) \
- : "ip", "lr", "cc")
-
-#define __put_user_check(x, p) \
+#define __put_user_check(__pu_val, __ptr, __err, __s) \
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
- const typeof(*(p)) __user *__tmp_p = (p); \
- register const typeof(*(p)) __r2 asm("r2") = (x); \
- register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
+ register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
+ register const void __user *__p asm("r0") = __ptr; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
- unsigned int __ua_flags = uaccess_save_and_enable(); \
- switch (sizeof(*(__p))) { \
- case 1: \
- __put_user_x(__r2, __p, __e, __l, 1); \
- break; \
- case 2: \
- __put_user_x(__r2, __p, __e, __l, 2); \
- break; \
- case 4: \
- __put_user_x(__r2, __p, __e, __l, 4); \
- break; \
- case 8: \
- __put_user_x(__r2, __p, __e, __l, 8); \
- break; \
- default: __e = __put_user_bad(); break; \
- } \
- uaccess_restore(__ua_flags); \
- __e; \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
+ __asmeq("%3", "r1") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+ : "0" (__p), "r" (__r2), "r" (__l) \
+ : "ip", "lr", "cc"); \
+ __err = __e; \
})
-#define put_user(x, p) \
- ({ \
- might_fault(); \
- __put_user_check(x, p); \
- })
-
#else /* CONFIG_MMU */
/*
}
#define get_user(x, p) __get_user(x, p)
-#define put_user(x, p) __put_user(x, p)
+#define __put_user_check __put_user_nocheck
#endif /* CONFIG_MMU */
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
+
+#define __put_user_switch(x, ptr, __err, __fn) \
+ do { \
+ const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ unsigned int __ua_flags; \
+ might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
+ switch (sizeof(*(ptr))) { \
+ case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
+ case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
+ case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
+ case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
+ default: __err = __put_user_bad(); break; \
+ } \
+ uaccess_restore(__ua_flags); \
+ } while (0)
+
+#define put_user(x, ptr) \
+({ \
+ int __pu_err = 0; \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
+ __pu_err; \
+})
+
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
- __put_user_err((x), (ptr), __pu_err); \
+ __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
__pu_err; \
})
#define __put_user_error(x, ptr, err) \
({ \
- __put_user_err((x), (ptr), err); \
+ __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
(void) 0; \
})
-#define __put_user_err(x, ptr, err) \
-do { \
- unsigned long __pu_addr = (unsigned long)(ptr); \
- unsigned int __ua_flags; \
- __typeof__(*(ptr)) __pu_val = (x); \
- __chk_user_ptr(ptr); \
- might_fault(); \
- __ua_flags = uaccess_save_and_enable(); \
- switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
- case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
- case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \
- case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
- default: __put_user_bad(); \
- } \
- uaccess_restore(__ua_flags); \
-} while (0)
+#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
+ do { \
+ unsigned long __pu_addr = (unsigned long)__pu_ptr; \
+ __put_user_nocheck_##__size(x, __pu_addr, __err); \
+ } while (0)
+
+#define __put_user_nocheck_1 __put_user_asm_byte
+#define __put_user_nocheck_2 __put_user_asm_half
+#define __put_user_nocheck_4 __put_user_asm_word
+#define __put_user_nocheck_8 __put_user_asm_dword
#define __put_user_asm(x, __pu_addr, err, instr) \
__asm__ __volatile__( \
DEFINE(S_PC, offsetof(struct pt_regs, ARM_pc));
DEFINE(S_PSR, offsetof(struct pt_regs, ARM_cpsr));
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
- DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(SVC_DACR, offsetof(struct svc_pt_regs, dacr));
+ DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit));
+ DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs));
BLANK();
#ifdef CONFIG_CACHE_L2X0
DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
* This function calls the underlying arch specific low level PM code as
* registered at the init time.
*
- * Returns -EOPNOTSUPP if no suspend callback is defined, the result of the
- * callback otherwise.
+ * Returns the result of the suspend callback.
*/
int arm_cpuidle_suspend(int index)
{
- int ret = -EOPNOTSUPP;
int cpu = smp_processor_id();
- if (cpuidle_ops[cpu].suspend)
- ret = cpuidle_ops[cpu].suspend(index);
-
- return ret;
+ return cpuidle_ops[cpu].suspend(index);
}
/**
* process.
*
* Return 0 on sucess, -ENOENT if no 'enable-method' is defined, -EOPNOTSUPP if
- * no cpuidle_ops is registered for the 'enable-method'.
+ * no cpuidle_ops is registered for the 'enable-method', or if either init or
+ * suspend callback isn't defined.
*/
static int __init arm_cpuidle_read_ops(struct device_node *dn, int cpu)
{
return -EOPNOTSUPP;
}
+ if (!ops->init || !ops->suspend) {
+ pr_warn("cpuidle_ops '%s': no init or suspend callback\n",
+ enable_method);
+ return -EOPNOTSUPP;
+ }
+
cpuidle_ops[cpu] = *ops; /* structure copy */
pr_notice("cpuidle: enable-method property '%s'"
* Returns:
* 0 on success,
* -ENODEV if it fails to find the cpu node in the device tree,
- * -EOPNOTSUPP if it does not find a registered cpuidle_ops for this cpu,
+ * -EOPNOTSUPP if it does not find a registered and valid cpuidle_ops for
+ * this cpu,
* -ENOENT if it fails to find an 'enable-method' property,
* -ENXIO if the HW reports a failure or a misconfiguration,
* -ENOMEM if the HW report an memory allocation failure
return -ENODEV;
ret = arm_cpuidle_read_ops(cpu_node, cpu);
- if (!ret && cpuidle_ops[cpu].init)
+ if (!ret)
ret = cpuidle_ops[cpu].init(cpu_node, cpu);
of_node_put(cpu_node);
#include <asm/cputype.h>
#include <asm/setup.h>
#include <asm/page.h>
+#include <asm/prom.h>
#include <asm/smp_plat.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#if defined(CONFIG_ARCH_MULTIPLATFORM) || defined(CONFIG_ARM_SINGLE_ARMV7M)
DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
+ .l2c_aux_val = 0x0,
+ .l2c_aux_mask = ~0x0,
MACHINE_END
mdesc_best = &__mach_desc_GENERIC_DT;
* Invalid mode handlers
*/
.macro inv_entry, reason
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - lr} )
THUMB( stmia sp, {r0 - r12} )
THUMB( str sp, [sp, #S_SP] )
.macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
+ sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
- add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
+ add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
@
stmia r7, {r2 - r6}
+ get_thread_info tsk
+ ldr r0, [tsk, #TI_ADDR_LIMIT]
+ mov r1, #TASK_SIZE
+ str r1, [tsk, #TI_ADDR_LIMIT]
+ str r0, [sp, #SVC_ADDR_LIMIT]
+
uaccess_save r0
.if \uaccess
uaccess_disable r0
irq_handler
#ifdef CONFIG_PREEMPT
- get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
/*
* User mode handlers
*
- * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
+ * EABI note: sp_svc is always 64-bit aligned here, so should PT_REGS_SIZE
*/
-#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (PT_REGS_SIZE & 7)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
.macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
ARM( stmib sp, {r1 - r12} )
THUMB( stmia sp, {r0 - r12} )
#ifdef CONFIG_CPU_V7M
v7m_exception_entry
#else
- sub sp, sp, #S_FRAME_SIZE
+ sub sp, sp, #PT_REGS_SIZE
stmia sp, {r0 - r12} @ Calling r0 - r12
ARM( add r8, sp, #S_PC )
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
@ Linux expects to have irqs off. Do it here before taking stack space
cpsid i
- sub sp, #S_FRAME_SIZE-S_IP
+ sub sp, #PT_REGS_SIZE-S_IP
stmdb sp!, {r0-r11}
@ load saved r12, lr, return address and xPSR.
ldmia sp!, {r0-r11}
@ restore main sp
- add sp, sp, #S_FRAME_SIZE-S_IP
+ add sp, sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
blne trace_hardirqs_off
#endif
.endif
+ ldr r1, [sp, #SVC_ADDR_LIMIT]
uaccess_restore
+ str r1, [tsk, #TI_ADDR_LIMIT]
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
+ ldr r1, [sp, #SVC_ADDR_LIMIT]
uaccess_restore
+ str r1, [tsk, #TI_ADDR_LIMIT]
#ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore
mov r0, sp
.endif
mov r0, r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
- add sp, sp, #\offset + S_FRAME_SIZE
+ add sp, sp, #\offset + PT_REGS_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
#elif defined(CONFIG_CPU_V7M)
@ V7M restore.
.else
ldmdb sp, {r0 - r12} @ get calling r0 - r12
.endif
- add sp, sp, #S_FRAME_SIZE - S_SP
+ add sp, sp, #PT_REGS_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
#endif /* !CONFIG_THUMB2_KERNEL */
.endm
@ correctness they don't need to be restored. So only r8-r11 must be
@ restored here. The easiest way to do so is to restore r0-r7, too.
ldmia sp!, {r0-r11}
- add sp, #S_FRAME_SIZE-S_IP
+ add sp, #PT_REGS_SIZE-S_IP
cpsie i
bx lr
ENDPROC(__irq_entry)
unsigned long flags;
char buf[64];
#ifndef CONFIG_CPU_V7M
- unsigned int domain;
+ unsigned int domain, fs;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
- if (user_mode(regs))
+ if (user_mode(regs)) {
domain = DACR_UACCESS_ENABLE;
- else
- domain = *(unsigned int *)(regs + 1);
+ fs = get_fs();
+ } else {
+ domain = to_svc_pt_regs(regs)->dacr;
+ fs = to_svc_pt_regs(regs)->addr_limit;
+ }
#else
domain = get_domain();
+ fs = get_fs();
#endif
#endif
if ((domain & domain_mask(DOMAIN_USER)) ==
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
segment = "none";
- else if (get_fs() == get_ds())
+ else if (fs == get_ds())
segment = "kernel";
else
segment = "user";
struct resource *res;
kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
unsigned int revidr = read_cpuid(CPUID_REVIDR);
/* Brahma-B15 r0p0..r0p2 affected
- * Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
- if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2)
+ * Cortex-A15 r0p0..r3p3 w/o ECO fix affected
+ * Fixes applied to A15 with respect to the revision and revidr are:
+ *
+ * r0p0-r2p1: No fixes applied
+ * r2p2,r2p3:
+ * REVIDR[4]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * REVIDR[9]: Not defined
+ * r2p4,r3p0,r3p1,r3p2:
+ * REVIDR[4]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * REVIDR[9]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * - This is an update to a previously released ECO.
+ * r3p3:
+ * REVIDR[4]: Reserved
+ * REVIDR[9]: 798181 Moving a virtual page that is being accessed
+ * by an active process can lead to unexpected behavior
+ * - This is an update to a previously released ECO.
+ *
+ * Handling:
+ * REVIDR[9] set -> No WA
+ * REVIDR[4] set, REVIDR[9] cleared -> Partial WA
+ * Both cleared -> Full WA
+ */
+ if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) {
erratum_a15_798181_handler = erratum_a15_798181_broadcast;
- else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 &&
- (revidr & 0x210) != 0x210) {
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) {
+ erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) {
if (revidr & 0x10)
erratum_a15_798181_handler =
erratum_a15_798181_partial;
else
erratum_a15_798181_handler =
erratum_a15_798181_broadcast;
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) {
+ if ((revidr & 0x210) == 0)
+ erratum_a15_798181_handler =
+ erratum_a15_798181_broadcast;
+ else if (revidr & 0x10)
+ erratum_a15_798181_handler =
+ erratum_a15_798181_partial;
+ } else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) {
+ if ((revidr & 0x200) == 0)
+ erratum_a15_798181_handler =
+ erratum_a15_798181_partial;
}
}
#endif
#ifdef CONFIG_DEBUG_ALIGN_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
+ _etext = .; /* End of text section */
+
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
NOTES
- _etext = .; /* End of text and rodata section */
-
#ifdef CONFIG_DEBUG_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#else
lib-y += io-readsw-armv4.o io-writesw-armv4.o
endif
-lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o
+ifeq ($(CONFIG_ARCH_RPC),y)
+ lib-y += ecard.o io-acorn.o floppydma.o
+ AFLAGS_delay-loop.o += -march=armv4
+endif
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/delay.h>
+
.text
.LC0: .word loops_per_jiffy
/*
* r0 <= 2000
- * lpj <= 0x01ffffff (max. 3355 bogomips)
* HZ <= 1000
*/
ldr r2, .LC1
mul r0, r2, r0
ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
- mov r1, #-1
ldr r2, .LC0
- ldr r2, [r2] @ max = 0x01ffffff
- add r0, r0, r1, lsr #32-14
- mov r0, r0, lsr #14 @ max = 0x0001ffff
- add r2, r2, r1, lsr #32-10
- mov r2, r2, lsr #10 @ max = 0x00007fff
- mul r0, r2, r0 @ max = 2^32-1
- add r0, r0, r1, lsr #32-6
- movs r0, r0, lsr #6
+ ldr r2, [r2]
+ umull r1, r0, r2, r0
+ adds r1, r1, #0xffffffff
+ adcs r0, r0, r0
reteq lr
/*
You are recommended say 'Y' here and debug any affected drivers.
-config ARCH_HAS_BARRIERS
- bool
- help
- This option allows the use of custom mandatory barriers
- included via the mach/barriers.h file.
-
config ARM_HEAVY_MB
bool
pgprot_t prot;
const void *caller;
bool want_vaddr;
+ int coherent_flag;
};
struct arm_dma_free_args {
bool want_vaddr;
};
+#define NORMAL 0
+#define COHERENT 1
+
struct arm_dma_allocator {
void *(*alloc)(struct arm_dma_alloc_args *args,
struct page **ret_page);
return mask;
}
-static void __dma_clear_buffer(struct page *page, size_t size)
+static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{
/*
* Ensure that the allocated pages are zeroed, and that any data
while (size > 0) {
void *ptr = kmap_atomic(page);
memset(ptr, 0, PAGE_SIZE);
- dmac_flush_range(ptr, ptr + PAGE_SIZE);
+ if (coherent_flag != COHERENT)
+ dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
page++;
size -= PAGE_SIZE;
}
- outer_flush_range(base, end);
+ if (coherent_flag != COHERENT)
+ outer_flush_range(base, end);
} else {
void *ptr = page_address(page);
memset(ptr, 0, size);
- dmac_flush_range(ptr, ptr + size);
- outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ if (coherent_flag != COHERENT) {
+ dmac_flush_range(ptr, ptr + size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+ }
}
}
* Allocate a DMA buffer for 'dev' of size 'size' using the
* specified gfp mask. Note that 'size' must be page aligned.
*/
-static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
+static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, int coherent_flag)
{
unsigned long order = get_order(size);
struct page *page, *p, *e;
for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
__free_page(p);
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
return page;
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller, bool want_vaddr);
+ const void *caller, bool want_vaddr,
+ int coherent_flag);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
if (!atomic_pool)
goto out;
-
+ /*
+ * The atomic pool is only used for non-coherent allocations
+ * so we must pass NORMAL for coherent_flag.
+ */
if (dev_get_cma_area(NULL))
ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
- &page, atomic_pool_init, true);
+ &page, atomic_pool_init, true, NORMAL);
else
ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
&page, atomic_pool_init, true);
{
struct page *page;
void *ptr = NULL;
- page = __dma_alloc_buffer(dev, size, gfp);
+ /*
+ * __alloc_remap_buffer is only called when the device is
+ * non-coherent
+ */
+ page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
if (!page)
return NULL;
if (!want_vaddr)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
- const void *caller, bool want_vaddr)
+ const void *caller, bool want_vaddr,
+ int coherent_flag)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
if (!page)
return NULL;
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
if (!want_vaddr)
goto out;
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
#define __alloc_from_pool(size, ret_page) NULL
-#define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
+#define __alloc_from_contiguous(dev, size, prot, ret, c, wv, coherent_flag) NULL
#define __free_from_pool(cpu_addr, size) do { } while (0)
#define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
#define __dma_free_remap(cpu_addr, size) do { } while (0)
struct page **ret_page)
{
struct page *page;
- page = __dma_alloc_buffer(dev, size, gfp);
+ /* __alloc_simple_buffer is only called when the device is coherent */
+ page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
if (!page)
return NULL;
{
return __alloc_from_contiguous(args->dev, args->size, args->prot,
ret_page, args->caller,
- args->want_vaddr);
+ args->want_vaddr, args->coherent_flag);
}
static void cma_allocator_free(struct arm_dma_free_args *args)
.prot = prot,
.caller = caller,
.want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+ .coherent_flag = is_coherent ? COHERENT : NORMAL,
};
#ifdef CONFIG_DMA_API_DEBUG
static const int iommu_order_array[] = { 9, 8, 4, 0 };
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, struct dma_attrs *attrs,
+ int coherent_flag)
{
struct page **pages;
int count = size >> PAGE_SHIFT;
if (!page)
goto error;
- __dma_clear_buffer(page, size);
+ __dma_clear_buffer(page, size, coherent_flag);
for (i = 0; i < count; i++)
pages[i] = page + i;
pages[i + j] = pages[i] + j;
}
- __dma_clear_buffer(pages[i], PAGE_SIZE << order);
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
i += 1 << order;
count -= 1 << order;
}
return NULL;
}
-static void *__iommu_alloc_atomic(struct device *dev, size_t size,
- dma_addr_t *handle)
+static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
+ dma_addr_t *handle, int coherent_flag)
{
struct page *page;
void *addr;
- addr = __alloc_from_pool(size, &page);
+ if (coherent_flag == COHERENT)
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else
+ addr = __alloc_from_pool(size, &page);
if (!addr)
return NULL;
}
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
- dma_addr_t handle, size_t size)
+ dma_addr_t handle, size_t size, int coherent_flag)
{
__iommu_remove_mapping(dev, handle, size);
- __free_from_pool(cpu_addr, size);
+ if (coherent_flag == COHERENT)
+ __dma_free_buffer(virt_to_page(cpu_addr), size);
+ else
+ __free_from_pool(cpu_addr, size);
}
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
+ int coherent_flag)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
struct page **pages;
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- if (!gfpflags_allow_blocking(gfp))
- return __iommu_alloc_atomic(dev, size, handle);
+ if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
+ return __iommu_alloc_simple(dev, size, gfp, handle,
+ coherent_flag);
/*
* Following is a work-around (a.k.a. hack) to prevent pages
*/
gfp &= ~(__GFP_COMP);
- pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
if (!pages)
return NULL;
return NULL;
}
-static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
+}
+
+static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
+}
+
+static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
-
if (!pages)
return -ENXIO;
return 0;
}
+static int arm_iommu_mmap_attrs(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+ return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
+
+static int arm_coherent_iommu_mmap_attrs(struct device *dev,
+ struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
+}
/*
* free a page as defined by the above mapping.
* Must not be called with IRQs disabled.
*/
-void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
{
struct page **pages;
size = PAGE_ALIGN(size);
- if (__in_atomic_pool(cpu_addr, size)) {
- __iommu_free_atomic(dev, cpu_addr, handle, size);
+ if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
return;
}
__iommu_free_buffer(dev, pages, size, attrs);
}
+void arm_iommu_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+ __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
+}
+
+void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+{
+ __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
+}
+
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
size_t size, struct dma_attrs *attrs)
};
struct dma_map_ops iommu_coherent_ops = {
- .alloc = arm_iommu_alloc_attrs,
- .free = arm_iommu_free_attrs,
- .mmap = arm_iommu_mmap_attrs,
+ .alloc = arm_coherent_iommu_alloc_attrs,
+ .free = arm_coherent_iommu_free_attrs,
+ .mmap = arm_coherent_iommu_mmap_attrs,
.get_sgtable = arm_iommu_get_sgtable,
.map_page = arm_coherent_iommu_map_page,
#endif
b __errata_finish
+__ca12_errata:
+#ifdef CONFIG_ARM_ERRATA_818325_852422
+ mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orr r10, r10, #1 << 12 @ set bit #12
+ mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_821420
+ mrc p15, 0, r10, c15, c0, 2 @ read internal feature reg
+ orr r10, r10, #1 << 1 @ set bit #1
+ mcr p15, 0, r10, c15, c0, 2 @ write internal feature reg
+#endif
+#ifdef CONFIG_ARM_ERRATA_825619
+ mrc p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orr r10, r10, #1 << 24 @ set bit #24
+ mcr p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+ b __errata_finish
+
+__ca17_errata:
+#ifdef CONFIG_ARM_ERRATA_852421
+ cmp r6, #0x12 @ only present up to r1p2
+ mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrle r10, r10, #1 << 24 @ set bit #24
+ mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+#ifdef CONFIG_ARM_ERRATA_852423
+ cmp r6, #0x12 @ only present up to r1p2
+ mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register
+ orrle r10, r10, #1 << 12 @ set bit #12
+ mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register
+#endif
+ b __errata_finish
+
__v7_pj4b_setup:
#ifdef CONFIG_CPU_PJ4B
teq r0, r10
beq __ca9_errata
+ /* Cortex-A12 Errata */
+ ldr r10, =0x00000c0d @ Cortex-A12 primary part number
+ teq r0, r10
+ beq __ca12_errata
+
+ /* Cortex-A17 Errata */
+ ldr r10, =0x00000c0e @ Cortex-A17 primary part number
+ teq r0, r10
+ beq __ca17_errata
+
/* Cortex-A15 Errata */
ldr r10, =0x00000c0f @ Cortex-A15 primary part number
teq r0, r10