]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'debug-choice', 'devel-stable' and 'misc' into for-linus
authorRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 5 Sep 2013 09:34:15 +0000 (10:34 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 5 Sep 2013 09:34:15 +0000 (10:34 +0100)
208 files changed:
Documentation/arm/Booting
Documentation/arm/kernel_mode_neon.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/l2cc.txt
Documentation/kernel-parameters.txt
MAINTAINERS
Makefile
arch/arc/lib/strchr-700.S
arch/arm/Kconfig
arch/arm/boot/dts/at91sam9n12ek.dts
arch/arm/boot/dts/at91sam9x5ek.dtsi
arch/arm/boot/dts/tegra20-seaboard.dts
arch/arm/boot/dts/tegra20-trimslice.dts
arch/arm/boot/dts/tegra20-whistler.dts
arch/arm/common/mcpm_head.S
arch/arm/common/vlock.S
arch/arm/include/asm/assembler.h
arch/arm/include/asm/barrier.h
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/memblock.h
arch/arm/include/asm/module.h
arch/arm/include/asm/neon.h [new file with mode: 0644]
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/prom.h
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/switch_to.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/types.h [new file with mode: 0644]
arch/arm/include/asm/v7m.h
arch/arm/include/asm/xor.h
arch/arm/kernel/Makefile
arch/arm/kernel/atags.h
arch/arm/kernel/atags_parse.c
arch/arm/kernel/devtree.c
arch/arm/kernel/entry-common.S
arch/arm/kernel/fiq.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/module.c
arch/arm/kernel/perf_event_cpu.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_tlb.c
arch/arm/kernel/traps.c
arch/arm/kernel/v7m.c [new file with mode: 0644]
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.h
arch/arm/kvm/coproc_a15.c
arch/arm/kvm/init.S
arch/arm/kvm/interrupts.S
arch/arm/kvm/mmio.c
arch/arm/kvm/mmu.c
arch/arm/lib/Makefile
arch/arm/lib/xor-neon.c [new file with mode: 0644]
arch/arm/mach-at91/at91sam9x5.c
arch/arm/mach-davinci/board-dm355-leopard.c
arch/arm/mach-davinci/board-dm644x-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/board-neuros-osd2.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-rx51.c
arch/arm/mach-omap2/usb-musb.c
arch/arm/mm/Kconfig
arch/arm/mm/cache-l2x0.c
arch/arm/mm/cache-v7.S
arch/arm/mm/context.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/hugetlbpage.c
arch/arm/mm/init.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-feroceon.S
arch/arm/mm/proc-v7.S
arch/arm/mm/tlb-v7.S
arch/arm/plat-samsung/init.c
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfpmodule.c
arch/arm/xen/enlighten.c
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/perf_event.c
arch/arm64/kvm/hyp.S
arch/arm64/kvm/sys_regs.c
arch/mips/math-emu/cp1emu.c
arch/x86/include/asm/bootparam_utils.h
arch/x86/include/asm/microcode_amd.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/microcode_amd_early.c
arch/x86/kernel/sys_x86_64.c
arch/x86/mm/mmap.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
drivers/acpi/video.c
drivers/ata/libata-pmp.c
drivers/ata/sata_fsl.c
drivers/ata/sata_highbank.c
drivers/gpu/drm/gma500/psb_intel_sdvo.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/nouveau/core/core/mm.c
drivers/gpu/drm/nouveau/core/include/subdev/mc.h
drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
drivers/gpu/drm/nouveau/core/subdev/mc/base.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv04/disp.h
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nv40_pm.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/rv770.c
drivers/iio/light/adjd_s311.c
drivers/md/dm-cache-policy-mq.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/filter.c
drivers/net/irda/via-ircc.c
drivers/net/macvtap.c
drivers/net/phy/realtek.c
drivers/net/usb/hso.c
drivers/net/wireless/hostap/hostap_ioctl.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/zd1201.c
drivers/of/fdt.c
drivers/pinctrl/pinctrl-sunxi.c
drivers/pinctrl/pinctrl-sunxi.h
drivers/platform/olpc/olpc-ec.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/sony-laptop.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_qdio.c
drivers/s390/scsi/zfcp_sysfs.c
drivers/scsi/Kconfig
drivers/staging/comedi/drivers.c
drivers/usb/host/ohci-pci.c
drivers/usb/phy/phy-fsl-usb.h
drivers/usb/phy/phy-fsm-usb.c
drivers/xen/events.c
fs/bfs/inode.c
fs/bio.c
fs/dcache.c
fs/efs/inode.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/inode.c
fs/gfs2/main.c
fs/hugetlbfs/inode.c
fs/namespace.c
fs/nilfs2/segbuf.c
fs/proc/fd.c
fs/proc/generic.c
fs/proc/root.c
include/linux/dcache.h
include/linux/inetdevice.h
include/linux/ipv6.h
include/linux/mm_types.h
include/linux/raid/pq.h
include/linux/sched.h
include/linux/wait.h
include/net/ip6_route.h
include/uapi/linux/ip.h
init/Kconfig
kernel/cpuset.c
kernel/time/sched_clock.c
kernel/time/tick-sched.c
kernel/wait.c
lib/lz4/lz4_compress.c
lib/lz4/lz4_decompress.c
lib/lz4/lz4hc_compress.c
lib/raid6/.gitignore
lib/raid6/Makefile
lib/raid6/algos.c
lib/raid6/neon.c [new file with mode: 0644]
lib/raid6/neon.uc [new file with mode: 0644]
lib/raid6/test/Makefile
mm/memcontrol.c
mm/shmem.c
net/batman-adv/unicast.c
net/bridge/br_fdb.c
net/bridge/br_netlink.c
net/bridge/br_vlan.c
net/ipv4/tcp.c
net/ipv6/addrconf.c
net/ipv6/ndisc.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/netlink/genetlink.c
net/packet/af_packet.c
net/wireless/nl80211.c
net/wireless/sme.c

index 0c1f475fdf36e140d28bcdc26d880546f8a8bdc4..371814a36719f7979402f41c63ca3609fcdae6a9 100644 (file)
@@ -18,7 +18,8 @@ following:
 2. Initialise one serial port.
 3. Detect the machine type.
 4. Setup the kernel tagged list.
-5. Call the kernel image.
+5. Load initramfs.
+6. Call the kernel image.
 
 
 1. Setup and initialise RAM
@@ -120,12 +121,27 @@ tagged list.
 The boot loader must pass at a minimum the size and location of the
 system memory, and the root filesystem location.  The dtb must be
 placed in a region of memory where the kernel decompressor will not
-overwrite it.  The recommended placement is in the first 16KiB of RAM
-with the caveat that it may not be located at physical address 0 since
-the kernel interprets a value of 0 in r2 to mean neither a tagged list
-nor a dtb were passed.
+overwrite it, whilst remaining within the region which will be covered
+by the kernel's low-memory mapping.
 
-5. Calling the kernel image
+A safe location is just above the 128MiB boundary from start of RAM.
+
+5. Load initramfs.
+------------------
+
+Existing boot loaders:         OPTIONAL
+New boot loaders:              OPTIONAL
+
+If an initramfs is in use then, as with the dtb, it must be placed in
+a region of memory where the kernel decompressor will not overwrite it
+while also with the region which will be covered by the kernel's
+low-memory mapping.
+
+A safe location is just above the device tree blob which itself will
+be loaded just above the 128MiB boundary from the start of RAM as
+recommended above.
+
+6. Calling the kernel image
 ---------------------------
 
 Existing boot loaders:         MANDATORY
@@ -136,11 +152,17 @@ is stored in flash, and is linked correctly to be run from flash,
 then it is legal for the boot loader to call the zImage in flash
 directly.
 
-The zImage may also be placed in system RAM (at any location) and
-called there.  Note that the kernel uses 16K of RAM below the image
-to store page tables.  The recommended placement is 32KiB into RAM.
+The zImage may also be placed in system RAM and called there.  The
+kernel should be placed in the first 128MiB of RAM.  It is recommended
+that it is loaded above 32MiB in order to avoid the need to relocate
+prior to decompression, which will make the boot process slightly
+faster.
+
+When booting a raw (non-zImage) kernel the constraints are tighter.
+In this case the kernel must be loaded at an offset into system equal
+to TEXT_OFFSET - PAGE_OFFSET.
 
-In either case, the following conditions must be met:
+In any case, the following conditions must be met:
 
 - Quiesce all DMA capable devices so that memory does not get
   corrupted by bogus network packets or disk data. This will save
diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
new file mode 100644 (file)
index 0000000..5254527
--- /dev/null
@@ -0,0 +1,121 @@
+Kernel mode NEON
+================
+
+TL;DR summary
+-------------
+* Use only NEON instructions, or VFP instructions that don't rely on support
+  code
+* Isolate your NEON code in a separate compilation unit, and compile it with
+  '-mfpu=neon -mfloat-abi=softfp'
+* Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
+  NEON code
+* Don't sleep in your NEON code, and be aware that it will be executed with
+  preemption disabled
+
+
+Introduction
+------------
+It is possible to use NEON instructions (and in some cases, VFP instructions) in
+code that runs in kernel mode. However, for performance reasons, the NEON/VFP
+register file is not preserved and restored at every context switch or taken
+exception like the normal register file is, so some manual intervention is
+required. Furthermore, special care is required for code that may sleep [i.e.,
+may call schedule()], as NEON or VFP instructions will be executed in a
+non-preemptible section for reasons outlined below.
+
+
+Lazy preserve and restore
+-------------------------
+The NEON/VFP register file is managed using lazy preserve (on UP systems) and
+lazy restore (on both SMP and UP systems). This means that the register file is
+kept 'live', and is only preserved and restored when multiple tasks are
+contending for the NEON/VFP unit (or, in the SMP case, when a task migrates to
+another core). Lazy restore is implemented by disabling the NEON/VFP unit after
+every context switch, resulting in a trap when subsequently a NEON/VFP
+instruction is issued, allowing the kernel to step in and perform the restore if
+necessary.
+
+Any use of the NEON/VFP unit in kernel mode should not interfere with this, so
+it is required to do an 'eager' preserve of the NEON/VFP register file, and
+enable the NEON/VFP unit explicitly so no exceptions are generated on first
+subsequent use. This is handled by the function kernel_neon_begin(), which
+should be called before any kernel mode NEON or VFP instructions are issued.
+Likewise, the NEON/VFP unit should be disabled again after use to make sure user
+mode will hit the lazy restore trap upon next use. This is handled by the
+function kernel_neon_end().
+
+
+Interruptions in kernel mode
+----------------------------
+For reasons of performance and simplicity, it was decided that there shall be no
+preserve/restore mechanism for the kernel mode NEON/VFP register contents. This
+implies that interruptions of a kernel mode NEON section can only be allowed if
+they are guaranteed not to touch the NEON/VFP registers. For this reason, the
+following rules and restrictions apply in the kernel:
+* NEON/VFP code is not allowed in interrupt context;
+* NEON/VFP code is not allowed to sleep;
+* NEON/VFP code is executed with preemption disabled.
+
+If latency is a concern, it is possible to put back to back calls to
+kernel_neon_end() and kernel_neon_begin() in places in your code where none of
+the NEON registers are live. (Additional calls to kernel_neon_begin() should be
+reasonably cheap if no context switch occurred in the meantime)
+
+
+VFP and support code
+--------------------
+Earlier versions of VFP (prior to version 3) rely on software support for things
+like IEEE-754 compliant underflow handling etc. When the VFP unit needs such
+software assistance, it signals the kernel by raising an undefined instruction
+exception. The kernel responds by inspecting the VFP control registers and the
+current instruction and arguments, and emulates the instruction in software.
+
+Such software assistance is currently not implemented for VFP instructions
+executed in kernel mode. If such a condition is encountered, the kernel will
+fail and generate an OOPS.
+
+
+Separating NEON code from ordinary code
+---------------------------------------
+The compiler is not aware of the special significance of kernel_neon_begin() and
+kernel_neon_end(), i.e., that it is only allowed to issue NEON/VFP instructions
+between calls to these respective functions. Furthermore, GCC may generate NEON
+instructions of its own at -O3 level if -mfpu=neon is selected, and even if the
+kernel is currently compiled at -O2, future changes may result in NEON/VFP
+instructions appearing in unexpected places if no special care is taken.
+
+Therefore, the recommended and only supported way of using NEON/VFP in the
+kernel is by adhering to the following rules:
+* isolate the NEON code in a separate compilation unit and compile it with
+  '-mfpu=neon -mfloat-abi=softfp';
+* issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
+  into the unit containing the NEON code from a compilation unit which is *not*
+  built with the GCC flag '-mfpu=neon' set.
+
+As the kernel is compiled with '-msoft-float', the above will guarantee that
+both NEON and VFP instructions will only ever appear in designated compilation
+units at any optimization level.
+
+
+NEON assembler
+--------------
+NEON assembler is supported with no additional caveats as long as the rules
+above are followed.
+
+
+NEON code generated by GCC
+--------------------------
+The GCC option -ftree-vectorize (implied by -O3) tries to exploit implicit
+parallelism, and generates NEON code from ordinary C source code. This is fully
+supported as long as the rules above are followed.
+
+
+NEON intrinsics
+---------------
+NEON intrinsics are also supported. However, as code using NEON intrinsics
+relies on the GCC header <arm_neon.h>, (which #includes <stdint.h>), you should
+observe the following in addition to the rules above:
+* Compile the unit containing the NEON intrinsics with '-ffreestanding' so GCC
+  uses its builtin version of <stdint.h> (this is a C99 header which the kernel
+  does not supply);
+* Include <arm_neon.h> last, or at least after <linux/types.h>
index 69ddf9fad2dcccc36bbaa330b96170d2eb9b213d..c0c7626fd0ff760075c2c97c866c9f83cdb21819 100644 (file)
@@ -16,9 +16,11 @@ Required properties:
      performs the same operation).
        "marvell,"aurora-outer-cache: Marvell Controller designed to be
         compatible with the ARM one with outer cache mode.
-       "bcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
+       "brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
        offset needs to be added to the address before passing down to the L2
        cache controller
+       "bcm,bcm11351-a2-pl310-cache": DEPRECATED by
+                                      "brcm,bcm11351-a2-pl310-cache"
 - cache-unified : Specifies the cache is a unified cache.
 - cache-level : Should be set to 2 for a level 2 cache.
 - reg : Physical base address and size of cache controller's memory mapped
index 15356aca938cd9a7bb2cdef09d8e7a19da36db90..7f9d4f53882c457ab8aa7f3e48c5fbbae7e903c2 100644 (file)
@@ -2953,7 +2953,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        improve throughput, but will also increase the
                        amount of memory reserved for use by the client.
 
-       swapaccount[=0|1]
+       swapaccount=[0|1]
                        [KNL] Enable accounting of swap in memory resource
                        controller if no parameter or 1 is given or disable
                        it if 0 is given (See Documentation/cgroups/memory.txt)
index 229c66b12cc21b9ebe6479932491ecd5e8784118..8197fbd70a3ef465fae3f180015cbc5ff11edf00 100644 (file)
@@ -5884,7 +5884,7 @@ F:        drivers/i2c/busses/i2c-omap.c
 F:     include/linux/i2c-omap.h
 
 OMAP DEVICE TREE SUPPORT
-M:     Benoît Cousson <b-cousson@ti.com>
+M:     Benoît Cousson <bcousson@baylibre.com>
 M:     Tony Lindgren <tony@atomide.com>
 L:     linux-omap@vger.kernel.org
 L:     devicetree@vger.kernel.org
@@ -5964,14 +5964,14 @@ S:      Maintained
 F:     drivers/char/hw_random/omap-rng.c
 
 OMAP HWMOD SUPPORT
-M:     Benoît Cousson <b-cousson@ti.com>
+M:     Benoît Cousson <bcousson@baylibre.com>
 M:     Paul Walmsley <paul@pwsan.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/mach-omap2/omap_hwmod.*
 
 OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
-M:     Benoît Cousson <b-cousson@ti.com>
+M:     Benoît Cousson <bcousson@baylibre.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index a5a55f4547c6e79fa17a0051dc5cce1daaec2a50..369882e4fc7718dd4e114a24be5514a43d3a1ef1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Linux for Workgroups
 
 # *DOCUMENTATION*
index 99c10475d477c73957353e5e9df751718289b5ac..9c548c7cf0014e1ce9c0823026039d60365b6192 100644 (file)
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
        ld.a    r2,[r0,4]
        sub     r12,r6,r7
        bic     r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
        and     r7,r12,r4
        breq    r7,0,.Loop ; For speed, we want this branch to be unaligned.
        b       .Lfound_char ; Likewise this one.
+#else
+       and     r12,r12,r4
+       breq    r12,0,.Loop ; For speed, we want this branch to be unaligned.
+       lsr_s   r12,r12,7
+       bic     r2,r7,r6
+       b.d     .Lfound_char_b
+       and_s   r2,r2,r12
+#endif
 ; /* We require this code address to be unaligned for speed...  */
 .Laligned:
        ld_s    r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
        lsr     r7,r7,7
 
        bic     r2,r7,r6
+.Lfound_char_b:
        norm    r2,r2
        sub_s   r0,r0,4
        asr_s   r2,r2,3
index 43594d5116efce810798b763da8f3e7ea6891c93..053c78cb0fad73731453a1a8079d352e6aeb7a57 100644 (file)
@@ -52,6 +52,7 @@ config ARM
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16
+       select IRQ_FORCED_THREADING
        select KTIME_SCALAR
        select PERF_USE_VMALLOC
        select RTC_LIB
@@ -1372,6 +1373,15 @@ config ARM_ERRATA_798181
          which sends an IPI to the CPUs that are running the same ASID
          as the one being invalidated.
 
+config ARM_ERRATA_773022
+       bool "ARM errata: incorrect instructions may be executed from loop buffer"
+       depends on CPU_V7
+       help
+         This option enables the workaround for the 773022 Cortex-A15
+         (up to r0p4) erratum. In certain rare sequences of code, the
+         loop buffer may deliver incorrect instructions. This
+         workaround disables the loop buffer to avoid the erratum.
+
 endmenu
 
 source "arch/arm/common/Kconfig"
@@ -1613,13 +1623,49 @@ config ARCH_NR_GPIO
 
 source kernel/Kconfig.preempt
 
-config HZ
+config HZ_FIXED
        int
        default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \
                ARCH_S5PV210 || ARCH_EXYNOS4
        default AT91_TIMER_HZ if ARCH_AT91
        default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
-       default 100
+
+choice
+       depends on !HZ_FIXED
+       prompt "Timer frequency"
+
+config HZ_100
+       bool "100 Hz"
+
+config HZ_200
+       bool "200 Hz"
+
+config HZ_250
+       bool "250 Hz"
+
+config HZ_300
+       bool "300 Hz"
+
+config HZ_500
+       bool "500 Hz"
+
+config HZ_1000
+       bool "1000 Hz"
+
+endchoice
+
+config HZ
+       int
+       default HZ_FIXED if HZ_FIXED
+       default 100 if HZ_100
+       default 200 if HZ_200
+       default 250 if HZ_250
+       default 300 if HZ_300
+       default 500 if HZ_500
+       default 1000
+
+config SCHED_HRTICK
+       def_bool HIGH_RES_TIMERS
 
 config SCHED_HRTICK
        def_bool HIGH_RES_TIMERS
@@ -1756,6 +1802,9 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
        def_bool y
        depends on ARM_LPAE
 
+config ARCH_WANT_GENERAL_HUGETLB
+       def_bool y
+
 source "mm/Kconfig"
 
 config FORCE_MAX_ZONEORDER
@@ -2175,6 +2224,13 @@ config NEON
          Say Y to include support code for NEON, the ARMv7 Advanced SIMD
          Extension.
 
+config KERNEL_MODE_NEON
+       bool "Support for NEON in kernel mode"
+       default n
+       depends on NEON
+       help
+         Say Y to include support for NEON in kernel mode.
+
 endmenu
 
 menu "Userspace binary formats"
@@ -2199,7 +2255,7 @@ source "kernel/power/Kconfig"
 
 config ARCH_SUSPEND_POSSIBLE
        depends on !ARCH_S5PC100
-       depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
+       depends on CPU_ARM920T || CPU_ARM926T || CPU_FEROCEON || CPU_SA1100 || \
                CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK
        def_bool y
 
index d59b70c6a6a0dbadafcded2baaba38f12270aff4..3d77dbe406f4736aacb7a1d361f4f02758225aa0 100644 (file)
        compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
 
        chosen {
-               bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
+               bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
        };
 
        memory {
-               reg = <0x20000000 0x10000000>;
+               reg = <0x20000000 0x8000000>;
        };
 
        clocks {
index b753855b20584320d00c9b38c0a4c40d972a1b1b..49e3c45818c236caf750fe5f42137b963f0eb7c2 100644 (file)
@@ -94,8 +94,9 @@
 
                usb0: ohci@00600000 {
                        status = "okay";
-                       num-ports = <2>;
-                       atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW
+                       num-ports = <3>;
+                       atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
+                                          &pioD 19 GPIO_ACTIVE_LOW
                                           &pioD 20 GPIO_ACTIVE_LOW
                                          >;
                };
index 365760b33a26e1ea9ac7bae3a7c32f7f4bde9878..40e6fb280333ec190864a0c3ef55ba6007371ac4 100644 (file)
                        regulator-max-microvolt = <5000000>;
                        enable-active-high;
                        gpio = <&gpio 24 0>; /* PD0 */
+                       regulator-always-on;
+                       regulator-boot-on;
                };
        };
 
index ed4b901b0227405f3cd687f1a832cad4db808f22..37c93d3c4812ec65d9ef7689bb173757d37d9060 100644 (file)
                        regulator-max-microvolt = <5000000>;
                        enable-active-high;
                        gpio = <&gpio 170 0>; /* PV2 */
+                       regulator-always-on;
+                       regulator-boot-on;
                };
        };
 
index ab67c94db280cebb2fc8c5e7126977a3fee8fb40..a3d0ebad78a1137eca61fea4781a178af0d516cc 100644 (file)
                        regulator-max-microvolt = <5000000>;
                        enable-active-high;
                        gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
+                       regulator-always-on;
+                       regulator-boot-on;
                };
 
                vbus3_reg: regulator@3 {
                        regulator-max-microvolt = <5000000>;
                        enable-active-high;
                        gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
+                       regulator-always-on;
+                       regulator-boot-on;
                };
        };
 
index 80f033614a1fa07d90de18de6673ce45ec0bf91b..39c96df3477a41549d71e1a18733dd85f4a168df 100644 (file)
@@ -151,7 +151,7 @@ mcpm_setup_leave:
 
        mov     r0, #INBOUND_NOT_COMING_UP
        strb    r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
-       dsb
+       dsb     st
        sev
 
        mov     r0, r11
index ff198583f683930951e93a4503ed651e76c7d939..8b7df283fedf8e16361f9834349e5f2934fba92e 100644 (file)
@@ -42,7 +42,7 @@
        dmb
        mov     \rscratch, #0
        strb    \rscratch, [\rbase, \rcpu]
-       dsb
+       dsb     st
        sev
 .endm
 
@@ -102,7 +102,7 @@ ENTRY(vlock_unlock)
        dmb
        mov     r1, #VLOCK_OWNER_NONE
        strb    r1, [r0, #VLOCK_OWNER_OFFSET]
-       dsb
+       dsb     st
        sev
        bx      lr
 ENDPROC(vlock_unlock)
index a5fef710af3225b70a35c733335418dcbd3dc830..fcc1b5bf6979cafd4850ff88b3cf8d21bb6791a5 100644 (file)
 #ifdef CONFIG_SMP
 #if __LINUX_ARM_ARCH__ >= 7
        .ifeqs "\mode","arm"
-       ALT_SMP(dmb)
+       ALT_SMP(dmb     ish)
        .else
-       ALT_SMP(W(dmb))
+       ALT_SMP(W(dmb)  ish)
        .endif
 #elif __LINUX_ARM_ARCH__ == 6
        ALT_SMP(mcr     p15, 0, r0, c7, c10, 5) @ dmb
index 8dcd9c702d90c9c352d85595d0ea8f83a42f215e..60f15e274e6d461814eb467cc84dc8b0f0395163 100644 (file)
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
 #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
                                    : : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                                    : : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
                                    : : "r" (0) : "memory")
 #elif defined(CONFIG_CPU_FA526)
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
                                    : : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                                    : : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #else
-#define isb() __asm__ __volatile__ ("" : : : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define isb(x) __asm__ __volatile__ ("" : : : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
                                    : : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
 #endif
 
 #ifdef CONFIG_ARCH_HAS_BARRIERS
@@ -42,7 +42,7 @@
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
 #define mb()           do { dsb(); outer_sync(); } while (0)
 #define rmb()          dsb()
-#define wmb()          mb()
+#define wmb()          do { dsb(st); outer_sync(); } while (0)
 #else
 #define mb()           barrier()
 #define rmb()          barrier()
@@ -54,9 +54,9 @@
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
 #else
-#define smp_mb()       dmb()
-#define smp_rmb()      dmb()
-#define smp_wmb()      dmb()
+#define smp_mb()       dmb(ish)
+#define smp_rmb()      smp_mb()
+#define smp_wmb()      dmb(ishst)
 #endif
 
 #define read_barrier_depends()         do { } while(0)
index 17d0ae8672fa666b8d1e43ae40b0226e1bd04ac8..15f2d5bf8875636e1514d377fc61b426534b3f74 100644 (file)
@@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
  * Harvard caches are synchronised for the user space address range.
  * This is used for the ARM private sys_cacheflush system call.
  */
-#define flush_cache_user_range(start,end) \
-       __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+#define flush_cache_user_range(s,e)    __cpuc_coherent_user_range(s,e)
 
 /*
  * Perform necessary cache operations to ensure that data previously
@@ -352,7 +351,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
                 * set_pte_at() called from vmap_pte_range() does not
                 * have a DSB after cleaning the cache line.
                 */
-               dsb();
+               dsb(ishst);
 }
 
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
index 441efc491b50aa0f402bf60fbe6ecd6020456875..69b879ac0289fde3a1ce8776b7d3602078286be7 100644 (file)
@@ -65,12 +65,12 @@ struct machine_desc {
 /*
  * Current machine - only accessible during boot.
  */
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
 
 /*
  * Machine type table - also only accessible during boot
  */
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
 #define for_each_machine_desc(p)                       \
        for (p = __arch_info_begin; p < __arch_info_end; p++)
 
index 00ca5f92648ea56616afe8bfbd4a25ff8f0b0e27..c2f5102ae6595b19b9e3d6494395d052ec74687d 100644 (file)
@@ -4,8 +4,7 @@
 struct meminfo;
 struct machine_desc;
 
-extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
-
+void arm_memblock_init(struct meminfo *, const struct machine_desc *);
 phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
 
 #endif
index 0d3a28dbc8e5db05823e6b83f85ccc5d04b807ac..ed690c49ef93fae344aeaec993c939e336460283 100644 (file)
@@ -12,6 +12,8 @@ enum {
        ARM_SEC_CORE,
        ARM_SEC_EXIT,
        ARM_SEC_DEVEXIT,
+       ARM_SEC_HOT,
+       ARM_SEC_UNLIKELY,
        ARM_SEC_MAX,
 };
 
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644 (file)
index 0000000..8f730fe
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon()         (!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ *     -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+       BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
index 04aeb02d2e116f904e1871e416f433b249ec4086..be956dbf6baea3a7481baa11c58233d0741702d1 100644 (file)
@@ -100,7 +100,7 @@ extern pgprot_t             pgprot_s2_device;
 #define PAGE_HYP               _MOD_PROT(pgprot_kernel, L_PTE_HYP)
 #define PAGE_HYP_DEVICE                _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
 #define PAGE_S2                        _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE         _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE         _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
 
 #define __PAGE_NONE            __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
 #define __PAGE_SHARED          __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
index a219227c3e43815417887e2425d2c900d856333e..4a2985e21969ab283889935c20ad44a486ed1dcd 100644 (file)
 
 #ifdef CONFIG_OF
 
-extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
 extern void arm_dt_memblock_reserve(void);
 extern void __init arm_dt_init_cpu_maps(void);
 
 #else /* CONFIG_OF */
 
-static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
 {
        return NULL;
 }
index b07c09e5a0ac86c6ddd5f7bc9ba25425c784e147..4f2c28060c9aa227c47e73ac6557c45add91128f 100644 (file)
@@ -46,7 +46,7 @@ static inline void dsb_sev(void)
 {
 #if __LINUX_ARM_ARCH__ >= 7
        __asm__ __volatile__ (
-               "dsb\n"
+               "dsb ishst\n"
                SEV
        );
 #else
index fa09e6b49bf19357f7a3454fe89bc6137b9fb403..c99e259469f7de9185f394dbb3c4453ff2f0f50f 100644 (file)
@@ -3,6 +3,16 @@
 
 #include <linux/thread_info.h>
 
+/*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+ * to ensure that the maintenance completes in case we migrate to another
+ * CPU.
+ */
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#define finish_arch_switch(prev)       dsb(ish)
+#endif
+
 /*
  * switch_to(prev, next) should switch from task `prev' to `next'
  * `prev' will never be the same as `next'.  schedule() itself
index 2b8114fcba09a3c5b9d6aceefbc6e456a8c9050a..df5e13d64f2c02b28963c23b319a7634cf5dacc8 100644 (file)
@@ -43,6 +43,16 @@ struct cpu_context_save {
        __u32   extra[2];               /* Xscale 'acc' register, etc */
 };
 
+struct arm_restart_block {
+       union {
+               /* For user cache flushing */
+               struct {
+                       unsigned long start;
+                       unsigned long end;
+               } cache;
+       };
+};
+
 /*
  * low level task data that entry.S needs immediate access to.
  * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -68,6 +78,7 @@ struct thread_info {
        unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
 #endif
        struct restart_block    restart_block;
+       struct arm_restart_block        arm_restart_block;
 };
 
 #define INIT_THREAD_INFO(tsk)                                          \
index f467e9b3f8d5d5b35c1d6fce386c718eb1b21e8f..38960264040cd989068b7e897979194bd5d30bc4 100644 (file)
@@ -319,67 +319,110 @@ extern struct cpu_tlb_fns cpu_tlb;
 #define tlb_op(f, regs, arg)   __tlb_op(f, "p15, 0, %0, " regs, arg)
 #define tlb_l2_op(f, regs, arg)        __tlb_op(f, "p15, 1, %0, " regs, arg)
 
-static inline void local_flush_tlb_all(void)
+static inline void __local_flush_tlb_all(void)
 {
        const int zero = 0;
        const unsigned int __tlb_flag = __cpu_tlb_flags;
 
-       if (tlb_flag(TLB_WB))
-               dsb();
-
        tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
        tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
        tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
-       tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+}
+
+static inline void local_flush_tlb_all(void)
+{
+       const int zero = 0;
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       if (tlb_flag(TLB_WB))
+               dsb(nshst);
+
+       __local_flush_tlb_all();
+       tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
 
        if (tlb_flag(TLB_BARRIER)) {
-               dsb();
+               dsb(nsh);
                isb();
        }
 }
 
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
+static inline void __flush_tlb_all(void)
 {
        const int zero = 0;
-       const int asid = ASID(mm);
        const unsigned int __tlb_flag = __cpu_tlb_flags;
 
        if (tlb_flag(TLB_WB))
-               dsb();
+               dsb(ishst);
+
+       __local_flush_tlb_all();
+       tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+
+       if (tlb_flag(TLB_BARRIER)) {
+               dsb(ish);
+               isb();
+       }
+}
+
+static inline void __local_flush_tlb_mm(struct mm_struct *mm)
+{
+       const int zero = 0;
+       const int asid = ASID(mm);
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
 
        if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
-               if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
+               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
                        tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
                        tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
                        tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
                }
-               put_cpu();
        }
 
        tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
        tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
        tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+       const int asid = ASID(mm);
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       if (tlb_flag(TLB_WB))
+               dsb(nshst);
+
+       __local_flush_tlb_mm(mm);
+       tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
+
+       if (tlb_flag(TLB_BARRIER))
+               dsb(nsh);
+}
+
+static inline void __flush_tlb_mm(struct mm_struct *mm)
+{
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       if (tlb_flag(TLB_WB))
+               dsb(ishst);
+
+       __local_flush_tlb_mm(mm);
 #ifdef CONFIG_ARM_ERRATA_720789
-       tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero);
+       tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
 #else
-       tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid);
+       tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
 #endif
 
        if (tlb_flag(TLB_BARRIER))
-               dsb();
+               dsb(ish);
 }
 
 static inline void
-local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 {
        const int zero = 0;
        const unsigned int __tlb_flag = __cpu_tlb_flags;
 
        uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 
-       if (tlb_flag(TLB_WB))
-               dsb();
-
        if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
            cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
                tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
@@ -392,6 +435,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
        tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
        tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
        tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+       if (tlb_flag(TLB_WB))
+               dsb(nshst);
+
+       __local_flush_tlb_page(vma, uaddr);
+       tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
+
+       if (tlb_flag(TLB_BARRIER))
+               dsb(nsh);
+}
+
+static inline void
+__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+       if (tlb_flag(TLB_WB))
+               dsb(ishst);
+
+       __local_flush_tlb_page(vma, uaddr);
 #ifdef CONFIG_ARM_ERRATA_720789
        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
 #else
@@ -399,19 +472,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 #endif
 
        if (tlb_flag(TLB_BARRIER))
-               dsb();
+               dsb(ish);
 }
 
-static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
 {
        const int zero = 0;
        const unsigned int __tlb_flag = __cpu_tlb_flags;
 
-       kaddr &= PAGE_MASK;
-
-       if (tlb_flag(TLB_WB))
-               dsb();
-
        tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
        tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
        tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -421,26 +489,75 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
        tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
        tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
        tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       kaddr &= PAGE_MASK;
+
+       if (tlb_flag(TLB_WB))
+               dsb(nshst);
+
+       __local_flush_tlb_kernel_page(kaddr);
+       tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
+
+       if (tlb_flag(TLB_BARRIER)) {
+               dsb(nsh);
+               isb();
+       }
+}
+
+static inline void __flush_tlb_kernel_page(unsigned long kaddr)
+{
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       kaddr &= PAGE_MASK;
+
+       if (tlb_flag(TLB_WB))
+               dsb(ishst);
+
+       __local_flush_tlb_kernel_page(kaddr);
        tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
 
        if (tlb_flag(TLB_BARRIER)) {
-               dsb();
+               dsb(ish);
                isb();
        }
 }
 
+/*
+ * Branch predictor maintenance is paired with full TLB invalidation, so
+ * there is no need for any barriers here.
+ */
+static inline void __local_flush_bp_all(void)
+{
+       const int zero = 0;
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       if (tlb_flag(TLB_V6_BP))
+               asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
 static inline void local_flush_bp_all(void)
 {
        const int zero = 0;
        const unsigned int __tlb_flag = __cpu_tlb_flags;
 
+       __local_flush_bp_all();
        if (tlb_flag(TLB_V7_UIS_BP))
-               asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
-       else if (tlb_flag(TLB_V6_BP))
                asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
 
-       if (tlb_flag(TLB_BARRIER))
-               isb();
+static inline void __flush_bp_all(void)
+{
+       const int zero = 0;
+       const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+       __local_flush_bp_all();
+       if (tlb_flag(TLB_V7_UIS_BP))
+               asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
 }
 
 #include <asm/cputype.h>
@@ -461,7 +578,7 @@ static inline void dummy_flush_tlb_a15_erratum(void)
         * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
         */
        asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
-       dsb();
+       dsb(ish);
 }
 #else
 static inline int erratum_a15_798181(void)
@@ -495,7 +612,7 @@ static inline void flush_pmd_entry(void *pmd)
        tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1  @ L2 flush_pmd", pmd);
 
        if (tlb_flag(TLB_WB))
-               dsb();
+               dsb(ishst);
 }
 
 static inline void clean_pmd_entry(void *pmd)
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
new file mode 100644 (file)
index 0000000..a53cdb8
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+/*
+ * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
+ * unambiguous on ARM as you would expect. For the types below, there is a
+ * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
+ * and the kernel itself, which results in build errors if you try to build with
+ * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
+ * in order to use NEON intrinsics)
+ *
+ * As the typedefs for these types in 'stdint.h' are based on builtin defines
+ * supplied by GCC, we can tweak these to align with the kernel's idea of those
+ * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
+ * source file (provided that -ffreestanding is used).
+ *
+ *                    int32_t         uint32_t               uintptr_t
+ * bare metal GCC     long            unsigned long          unsigned int
+ * glibc GCC          int             unsigned int           unsigned int
+ * kernel             int             unsigned int           unsigned long
+ */
+
+#ifdef __INT32_TYPE__
+#undef __INT32_TYPE__
+#define __INT32_TYPE__         int
+#endif
+
+#ifdef __UINT32_TYPE__
+#undef __UINT32_TYPE__
+#define __UINT32_TYPE__        unsigned int
+#endif
+
+#ifdef __UINTPTR_TYPE__
+#undef __UINTPTR_TYPE__
+#define __UINTPTR_TYPE__       unsigned long
+#endif
+
+#endif /* _ASM_TYPES_H */
index fa88d09fa3d9f10a8ca5de8a11dbb812645df2da..615781c61627ed30bfaed0d734cdc05417f69e3e 100644 (file)
 
 #define V7M_SCB_VTOR                   0x08
 
+#define V7M_SCB_AIRCR                  0x0c
+#define V7M_SCB_AIRCR_VECTKEY                  (0x05fa << 16)
+#define V7M_SCB_AIRCR_SYSRESETREQ              (1 << 2)
+
 #define V7M_SCB_SCR                    0x10
 #define V7M_SCB_SCR_SLEEPDEEP                  (1 << 2)
 
  */
 #define EXC_RET_STACK_MASK                     0x00000004
 #define EXC_RET_THREADMODE_PROCESSSTACK                0xfffffffd
+
+#ifndef __ASSEMBLY__
+
+enum reboot_mode;
+
+void armv7m_restart(enum reboot_mode mode, const char *cmd);
+
+#endif /* __ASSEMBLY__ */
index 7604673dc4278609b4f7ba68985d8b996b49ec47..4ffb26d4cad8d9f5225b83e15b9c20c1d3ef7c00 100644 (file)
@@ -7,7 +7,10 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/hardirq.h>
 #include <asm-generic/xor.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
 
 #define __XOR(a1, a2) a1 ^= a2
 
@@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = {
                xor_speed(&xor_block_arm4regs); \
                xor_speed(&xor_block_8regs);    \
                xor_speed(&xor_block_32regs);   \
+               NEON_TEMPLATES;                 \
        } while (0)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+extern struct xor_block_template const xor_block_neon_inner;
+
+static void
+xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_2(bytes, p1, p2);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_2(bytes, p1, p2);
+               kernel_neon_end();
+       }
+}
+
+static void
+xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+               unsigned long *p3)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_3(bytes, p1, p2, p3);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_3(bytes, p1, p2, p3);
+               kernel_neon_end();
+       }
+}
+
+static void
+xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+               unsigned long *p3, unsigned long *p4)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_4(bytes, p1, p2, p3, p4);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
+               kernel_neon_end();
+       }
+}
+
+static void
+xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+               unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+       if (in_interrupt()) {
+               xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
+       } else {
+               kernel_neon_begin();
+               xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
+               kernel_neon_end();
+       }
+}
+
+static struct xor_block_template xor_block_neon = {
+       .name   = "neon",
+       .do_2   = xor_neon_2,
+       .do_3   = xor_neon_3,
+       .do_4   = xor_neon_4,
+       .do_5   = xor_neon_5
+};
+
+#define NEON_TEMPLATES \
+       do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
+#else
+#define NEON_TEMPLATES
+#endif
index 86d10dd47dc46b52328436fbfee0baf88cbc87fc..5140df5f23aa485214914a8dfbfdf31dc04a5691 100644 (file)
@@ -24,7 +24,7 @@ obj-$(CONFIG_ATAGS_PROC)      += atags_proc.o
 obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
 
 ifeq ($(CONFIG_CPU_V7M),y)
-obj-y          += entry-v7m.o
+obj-y          += entry-v7m.o v7m.o
 else
 obj-y          += entry-armv.o
 endif
index 9edc9692332d1a368a293f38e0a9543c2102a751..ec4164da6e3018737c42022bef16b3ed6bcea350 100644 (file)
@@ -7,9 +7,10 @@ static inline void save_atags(struct tag *tags) { }
 void convert_to_tag_list(struct tag *tags);
 
 #ifdef CONFIG_ATAGS
-struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr);
+const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
+       unsigned int machine_nr);
 #else
-static inline struct machine_desc *
+static inline const struct machine_desc *
 setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
 {
        early_print("no ATAGS support: can't continue\n");
index 14512e6931d87957b718445dd25b31283315a222..8c14de8180c0af46fea1531261a7a276177a91a8 100644 (file)
@@ -178,11 +178,11 @@ static void __init squash_mem_tags(struct tag *tag)
                        tag->hdr.tag = ATAG_NONE;
 }
 
-struct machine_desc * __init setup_machine_tags(phys_addr_t __atags_pointer,
-                                               unsigned int machine_nr)
+const struct machine_desc * __init
+setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
 {
        struct tag *tags = (struct tag *)&default_tags;
-       struct machine_desc *mdesc = NULL, *p;
+       const struct machine_desc *mdesc = NULL, *p;
        char *from = default_command_line;
 
        default_tags.mem.start = PHYS_OFFSET;
index 5859c8bc727c4254bc7e8fa254a4271d6b214242..eae1976f859dd6ffee3da4385163c4c5ddb21cc2 100644 (file)
@@ -176,10 +176,10 @@ void __init arm_dt_init_cpu_maps(void)
  * If a dtb was passed to the kernel in r2, then use it to choose the
  * correct machine_desc and to setup the system.
  */
-struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
+const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
 {
        struct boot_param_header *devtree;
-       struct machine_desc *mdesc, *mdesc_best = NULL;
+       const struct machine_desc *mdesc, *mdesc_best = NULL;
        unsigned int score, mdesc_score = ~1;
        unsigned long dt_root;
        const char *model;
@@ -188,7 +188,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
        DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
        MACHINE_END
 
-       mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT;
+       mdesc_best = &__mach_desc_GENERIC_DT;
 #endif
 
        if (!dt_phys)
index 94104bf69719f896e51836ca8a048077086fbb0a..74ad15d1a065fba97aac1d4ac43ff33a9e994470 100644 (file)
@@ -442,10 +442,10 @@ local_restart:
        ldrcc   pc, [tbl, scno, lsl #2]         @ call sys_* routine
 
        add     r1, sp, #S_OFF
-2:     mov     why, #0                         @ no longer a real syscall
        cmp     scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
        eor     r0, scno, #__NR_SYSCALL_BASE    @ put OS number back
-       bcs     arm_syscall     
+       bcs     arm_syscall
+2:     mov     why, #0                         @ no longer a real syscall
        b       sys_ni_syscall                  @ not private func
 
 #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
index fc7920288a3d90a3f9c3ca38be03ff845f84515a..918875d96d5dc598985c7dce050e0a1785637b49 100644 (file)
@@ -89,7 +89,8 @@ void set_fiq_handler(void *start, unsigned int length)
 
        memcpy(base + offset, start, length);
        if (!cache_is_vipt_nonaliasing())
-               flush_icache_range(base + offset, offset + length);
+               flush_icache_range((unsigned long)base + offset, offset +
+                                  length);
        flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
 }
 
index d7c82df692436df0248fa1a00502cf74b7fca23c..57221e349a7ce0eec03445cd56de2845e5d23358 100644 (file)
@@ -82,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
        crash_save_cpu(&regs, smp_processor_id());
        flush_cache_all();
 
+       set_cpu_online(smp_processor_id(), false);
        atomic_dec(&waiting_for_crash_ipi);
        while (1)
                cpu_relax();
index 85c3fb6c93c2e234769b17e559655d100ad923ab..084dc8896986f321247fc590b9f348cb47872eff 100644 (file)
@@ -292,12 +292,20 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
                        maps[ARM_SEC_CORE].unw_sec = s;
                else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
                        maps[ARM_SEC_EXIT].unw_sec = s;
+               else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
+                       maps[ARM_SEC_UNLIKELY].unw_sec = s;
+               else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
+                       maps[ARM_SEC_HOT].unw_sec = s;
                else if (strcmp(".init.text", secname) == 0)
                        maps[ARM_SEC_INIT].txt_sec = s;
                else if (strcmp(".text", secname) == 0)
                        maps[ARM_SEC_CORE].txt_sec = s;
                else if (strcmp(".exit.text", secname) == 0)
                        maps[ARM_SEC_EXIT].txt_sec = s;
+               else if (strcmp(".text.unlikely", secname) == 0)
+                       maps[ARM_SEC_UNLIKELY].txt_sec = s;
+               else if (strcmp(".text.hot", secname) == 0)
+                       maps[ARM_SEC_HOT].txt_sec = s;
        }
 
        for (i = 0; i < ARM_SEC_MAX; i++)
index aebe0e99c153eaa6889efc7b36459ebdfaeb65cc..8d6147b2001f82eca02c738265f9477dc2dbaefa 100644 (file)
@@ -118,7 +118,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
                        continue;
                }
 
-               err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
+               err = request_irq(irq, handler,
+                                 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
                                  cpu_pmu);
                if (err) {
                        pr_err("unable to request IRQ%d for ARM PMU counters\n",
index afc2489ee13bc098523f92549baa4a69728c1ced..0e1e2b3afa45864b5776c177ceb5001402e31681 100644 (file)
@@ -72,10 +72,10 @@ static int __init fpe_setup(char *line)
 __setup("fpe=", fpe_setup);
 #endif
 
-extern void paging_init(struct machine_desc *desc);
+extern void paging_init(const struct machine_desc *desc);
 extern void sanity_check_meminfo(void);
 extern enum reboot_mode reboot_mode;
-extern void setup_dma_zone(struct machine_desc *desc);
+extern void setup_dma_zone(const struct machine_desc *desc);
 
 unsigned int processor_id;
 EXPORT_SYMBOL(processor_id);
@@ -139,7 +139,7 @@ EXPORT_SYMBOL(elf_platform);
 static const char *cpu_name;
 static const char *machine_name;
 static char __initdata cmd_line[COMMAND_LINE_SIZE];
-struct machine_desc *machine_desc __initdata;
+const struct machine_desc *machine_desc __initdata;
 
 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 #define ENDIANNESS ((char)endian_test.l)
@@ -607,7 +607,7 @@ static void __init setup_processor(void)
 
 void __init dump_machine_table(void)
 {
-       struct machine_desc *p;
+       const struct machine_desc *p;
 
        early_print("Available machine support:\n\nID (hex)\tNAME\n");
        for_each_machine_desc(p)
@@ -694,7 +694,7 @@ static int __init early_mem(char *p)
 }
 early_param("mem", early_mem);
 
-static void __init request_standard_resources(struct machine_desc *mdesc)
+static void __init request_standard_resources(const struct machine_desc *mdesc)
 {
        struct memblock_region *region;
        struct resource *res;
@@ -852,7 +852,7 @@ void __init hyp_mode_check(void)
 
 void __init setup_arch(char **cmdline_p)
 {
-       struct machine_desc *mdesc;
+       const struct machine_desc *mdesc;
 
        setup_processor();
        mdesc = setup_machine_fdt(__atags_pointer);
@@ -994,15 +994,6 @@ static int c_show(struct seq_file *m, void *v)
                seq_printf(m, "model name\t: %s rev %d (%s)\n",
                           cpu_name, cpuid & 15, elf_platform);
 
-#if defined(CONFIG_SMP)
-               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
-                          per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
-                          (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
-#else
-               seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
-                          loops_per_jiffy / (500000/HZ),
-                          (loops_per_jiffy / (5000/HZ)) % 100);
-#endif
                /* dump out the processor features */
                seq_puts(m, "Features\t: ");
 
index 2dc19349eb19fc23feafa1a2b93db17eb5e6394a..92d10e503746b91b095276de4d234f85d3b9b7fd 100644 (file)
@@ -398,17 +398,8 @@ asmlinkage void secondary_start_kernel(void)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-       int cpu;
-       unsigned long bogosum = 0;
-
-       for_each_online_cpu(cpu)
-               bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
-
-       printk(KERN_INFO "SMP: Total of %d processors activated "
-              "(%lu.%02lu BogoMIPS).\n",
-              num_online_cpus(),
-              bogosum / (500000/HZ),
-              (bogosum / (5000/HZ)) % 100);
+       printk(KERN_INFO "SMP: Total of %d processors activated.\n",
+              num_online_cpus());
 
        hyp_mode_check();
 }
index c2edfff573c2c9e2e68193cf729a98f460ac1a2d..83ccca303df83c4a1f40dce35f324153d979ad16 100644 (file)
@@ -104,7 +104,7 @@ void flush_tlb_all(void)
        if (tlb_ops_need_broadcast())
                on_each_cpu(ipi_flush_tlb_all, NULL, 1);
        else
-               local_flush_tlb_all();
+               __flush_tlb_all();
        broadcast_tlb_a15_erratum();
 }
 
@@ -113,7 +113,7 @@ void flush_tlb_mm(struct mm_struct *mm)
        if (tlb_ops_need_broadcast())
                on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
        else
-               local_flush_tlb_mm(mm);
+               __flush_tlb_mm(mm);
        broadcast_tlb_mm_a15_erratum(mm);
 }
 
@@ -126,7 +126,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
                on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
                                        &ta, 1);
        } else
-               local_flush_tlb_page(vma, uaddr);
+               __flush_tlb_page(vma, uaddr);
        broadcast_tlb_mm_a15_erratum(vma->vm_mm);
 }
 
@@ -137,7 +137,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
                ta.ta_start = kaddr;
                on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
        } else
-               local_flush_tlb_kernel_page(kaddr);
+               __flush_tlb_kernel_page(kaddr);
        broadcast_tlb_a15_erratum();
 }
 
@@ -173,5 +173,5 @@ void flush_bp_all(void)
        if (tlb_ops_need_broadcast())
                on_each_cpu(ipi_flush_bp_all, NULL, 1);
        else
-               local_flush_bp_all();
+               __flush_bp_all();
 }
index ab517fcce21b8308dc957bc5b6809039c2e5f1fe..8fcda140358d94d6056ea200b79b50bb6e7bde81 100644 (file)
@@ -497,28 +497,64 @@ static int bad_syscall(int n, struct pt_regs *regs)
        return regs->ARM_r0;
 }
 
+static long do_cache_op_restart(struct restart_block *);
+
 static inline int
-do_cache_op(unsigned long start, unsigned long end, int flags)
+__do_cache_op(unsigned long start, unsigned long end)
 {
-       struct mm_struct *mm = current->active_mm;
-       struct vm_area_struct *vma;
+       int ret;
+       unsigned long chunk = PAGE_SIZE;
+
+       do {
+               if (signal_pending(current)) {
+                       struct thread_info *ti = current_thread_info();
+
+                       ti->restart_block = (struct restart_block) {
+                               .fn     = do_cache_op_restart,
+                       };
+
+                       ti->arm_restart_block = (struct arm_restart_block) {
+                               {
+                                       .cache = {
+                                               .start  = start,
+                                               .end    = end,
+                                       },
+                               },
+                       };
+
+                       return -ERESTART_RESTARTBLOCK;
+               }
+
+               ret = flush_cache_user_range(start, start + chunk);
+               if (ret)
+                       return ret;
 
+               cond_resched();
+               start += chunk;
+       } while (start < end);
+
+       return 0;
+}
+
+static long do_cache_op_restart(struct restart_block *unused)
+{
+       struct arm_restart_block *restart_block;
+
+       restart_block = &current_thread_info()->arm_restart_block;
+       return __do_cache_op(restart_block->cache.start,
+                            restart_block->cache.end);
+}
+
+static inline int
+do_cache_op(unsigned long start, unsigned long end, int flags)
+{
        if (end < start || flags)
                return -EINVAL;
 
-       down_read(&mm->mmap_sem);
-       vma = find_vma(mm, start);
-       if (vma && vma->vm_start < end) {
-               if (start < vma->vm_start)
-                       start = vma->vm_start;
-               if (end > vma->vm_end)
-                       end = vma->vm_end;
+       if (!access_ok(VERIFY_READ, start, end - start))
+               return -EFAULT;
 
-               up_read(&mm->mmap_sem);
-               return flush_cache_user_range(start, end);
-       }
-       up_read(&mm->mmap_sem);
-       return -EINVAL;
+       return __do_cache_op(start, end);
 }
 
 /*
diff --git a/arch/arm/kernel/v7m.c b/arch/arm/kernel/v7m.c
new file mode 100644 (file)
index 0000000..4d2cba9
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2013 Uwe Kleine-Koenig for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/reboot.h>
+#include <asm/barrier.h>
+#include <asm/v7m.h>
+
+void armv7m_restart(enum reboot_mode mode, const char *cmd)
+{
+       dsb();
+       __raw_writel(V7M_SCB_AIRCR_VECTKEY | V7M_SCB_AIRCR_SYSRESETREQ,
+                       BASEADDR_V7M_SCB + V7M_SCB_AIRCR);
+       dsb();
+}
index 4a5199070430672728c91dfe610047e83b78a964..db9cf692d4dded3e2a6cc7e5622ba90ee5bef2e8 100644 (file)
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
 #define access_pmintenclr pm_fake
 
 /* Architected CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg cp15_regs[] = {
        /* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
                        NULL, reset_unknown, c0_CSSELR },
 
        /* TTBR0/TTBR1: swapped by interrupt.S. */
-       { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
-       { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+       { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+       { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
 
        /* TTBCR: swapped by interrupt.S. */
        { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
                        NULL, reset_unknown, c6_IFAR },
 
        /* PAR swapped by interrupt.S */
-       { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+       { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
 
        /*
         * DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
                              | KVM_REG_ARM_OPC1_MASK))
                        return false;
                params->is_64bit = true;
-               params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+               /* CRm to CRn: see cp15_to_index for details */
+               params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
                               >> KVM_REG_ARM_CRM_SHIFT);
                params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
                               >> KVM_REG_ARM_OPC1_SHIFT);
                params->Op2 = 0;
-               params->CRn = 0;
+               params->CRm = 0;
                return true;
        default:
                return false;
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
        if (reg->is_64) {
                val |= KVM_REG_SIZE_U64;
                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
-               val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+               /*
+                * CRn always denotes the primary coproc. reg. nr. for the
+                * in-kernel representation, but the user space API uses the
+                * CRm for the encoding, because it is modelled after the
+                * MRRC/MCRR instructions: see the ARM ARM rev. c page
+                * B3-1445
+                */
+               val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
        } else {
                val |= KVM_REG_SIZE_U32;
                val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
index b7301d3e479921f4d8983a172c88ec6edababd81..0461d5c8d3de4f99c3ecfef669340ec6fa8e0411 100644 (file)
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
                return -1;
        if (i1->CRn != i2->CRn)
                return i1->CRn - i2->CRn;
+       if (i1->is_64 != i2->is_64)
+               return i2->is_64 - i1->is_64;
        if (i1->CRm != i2->CRm)
                return i1->CRm - i2->CRm;
        if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
 
 #define CRn(_x)                .CRn = _x
 #define CRm(_x)        .CRm = _x
+#define CRm64(_x)       .CRn = _x, .CRm = 0
 #define Op1(_x)        .Op1 = _x
 #define Op2(_x)        .Op2 = _x
 #define is64           .is_64 = true
index 685063a6d0cf655296aaec9713d08f19b53fa260..cf93472b9dd60daf3da620cf3a44a9ff65a6eac6 100644 (file)
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
 
 /*
  * A15-specific CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ *            registers preceding 32-bit ones.
  */
 static const struct coproc_reg a15_regs[] = {
        /* MPIDR: we use VMPIDR for guest access. */
index f048338135f7a5b20bf52371d03a3af0b922b68f..1b9844d369cc08e6e1a88fc60b6f3fd6db0b6965 100644 (file)
@@ -142,7 +142,7 @@ target:     @ We're now in the trampoline code, switch page tables
 
        @ Invalidate the old TLBs
        mcr     p15, 4, r0, c8, c7, 0   @ TLBIALLH
-       dsb
+       dsb     ish
 
        eret
 
index 16cd4ba5d7fd6d0ff0781da759712167cbc17508..f85052facffc528fb6a1dee9049844316688afd4 100644 (file)
@@ -55,7 +55,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
        mcrr    p15, 6, r2, r3, c2      @ Write VTTBR
        isb
        mcr     p15, 0, r0, c8, c3, 0   @ TLBIALLIS (rt ignored)
-       dsb
+       dsb     ish
        isb
        mov     r2, #0
        mov     r3, #0
@@ -79,7 +79,7 @@ ENTRY(__kvm_flush_vm_context)
        mcr     p15, 4, r0, c8, c3, 4
        /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
        mcr     p15, 0, r0, c7, c1, 0
-       dsb
+       dsb     ish
        isb                             @ Not necessary if followed by eret
 
        bx      lr
index b8e06b7a28331ede0a01ce6aefcb60d7be343f6e..0c25d9487d5382d2a19a1b3399398244f3718866 100644 (file)
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
 static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                      struct kvm_exit_mmio *mmio)
 {
-       unsigned long rt, len;
+       unsigned long rt;
+       int len;
        bool is_write, sign_extend;
 
        if (kvm_vcpu_dabt_isextabt(vcpu)) {
index ca6bea4859b48c35e9c34d970fb02580840e23b4..b0de86b56c13006a189efba24967b81d162a56d6 100644 (file)
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
        return p;
 }
 
+static bool page_empty(void *ptr)
+{
+       struct page *ptr_page = virt_to_page(ptr);
+       return page_count(ptr_page) == 1;
+}
+
 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
        pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
        put_page(virt_to_page(pmd));
 }
 
-static bool pmd_empty(pmd_t *pmd)
-{
-       struct page *pmd_page = virt_to_page(pmd);
-       return page_count(pmd_page) == 1;
-}
-
 static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
 {
        if (pte_present(*pte)) {
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
        }
 }
 
-static bool pte_empty(pte_t *pte)
-{
-       struct page *pte_page = virt_to_page(pte);
-       return page_count(pte_page) == 1;
-}
-
 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
                        unsigned long long start, u64 size)
 {
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
        pmd_t *pmd;
        pte_t *pte;
        unsigned long long addr = start, end = start + size;
-       u64 range;
+       u64 next;
 
        while (addr < end) {
                pgd = pgdp + pgd_index(addr);
                pud = pud_offset(pgd, addr);
                if (pud_none(*pud)) {
-                       addr += PUD_SIZE;
+                       addr = pud_addr_end(addr, end);
                        continue;
                }
 
                pmd = pmd_offset(pud, addr);
                if (pmd_none(*pmd)) {
-                       addr += PMD_SIZE;
+                       addr = pmd_addr_end(addr, end);
                        continue;
                }
 
                pte = pte_offset_kernel(pmd, addr);
                clear_pte_entry(kvm, pte, addr);
-               range = PAGE_SIZE;
+               next = addr + PAGE_SIZE;
 
                /* If we emptied the pte, walk back up the ladder */
-               if (pte_empty(pte)) {
+               if (page_empty(pte)) {
                        clear_pmd_entry(kvm, pmd, addr);
-                       range = PMD_SIZE;
-                       if (pmd_empty(pmd)) {
+                       next = pmd_addr_end(addr, end);
+                       if (page_empty(pmd) && !page_empty(pud)) {
                                clear_pud_entry(kvm, pud, addr);
-                               range = PUD_SIZE;
+                               next = pud_addr_end(addr, end);
                        }
                }
 
-               addr += range;
+               addr = next;
        }
 }
 
@@ -495,7 +489,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
 
        for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
                pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
-               kvm_set_s2pte_writable(&pte);
 
                ret = mmu_topup_memory_cache(&cache, 2, 2);
                if (ret)
index af72969820b4951448c9d95135383ae9d8387cde..aaf3a87311360d5c9db1531db9c3436af26237cd 100644 (file)
@@ -45,3 +45,9 @@ lib-$(CONFIG_ARCH_SHARK)      += io-shark.o
 
 $(obj)/csumpartialcopy.o:      $(obj)/csumpartialcopygeneric.S
 $(obj)/csumpartialcopyuser.o:  $(obj)/csumpartialcopygeneric.S
+
+ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
+  NEON_FLAGS                   := -mfloat-abi=softfp -mfpu=neon
+  CFLAGS_xor-neon.o            += $(NEON_FLAGS)
+  lib-$(CONFIG_XOR_BLOCKS)     += xor-neon.o
+endif
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
new file mode 100644 (file)
index 0000000..f485e5a
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * linux/arch/arm/lib/xor-neon.c
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/raid/xor.h>
+
+#ifndef __ARM_NEON__
+#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
+#endif
+
+/*
+ * Pull in the reference implementations while instructing GCC (through
+ * -ftree-vectorize) to attempt to exploit implicit parallelism and emit
+ * NEON instructions.
+ */
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC optimize "tree-vectorize"
+#else
+/*
+ * While older versions of GCC do not generate incorrect code, they fail to
+ * recognize the parallel nature of these functions, and emit plain ARM code,
+ * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
+ */
+#warning This code requires at least version 4.6 of GCC
+#endif
+
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#include <asm-generic/xor.h>
+
+struct xor_block_template const xor_block_neon_inner = {
+       .name   = "__inner_neon__",
+       .do_2   = xor_8regs_2,
+       .do_3   = xor_8regs_3,
+       .do_4   = xor_8regs_4,
+       .do_5   = xor_8regs_5,
+};
index 2abee6626aace2cff322f2c22a3bab79786d5473..916e5a1429171bd39835da54b02fa444b1941905 100644 (file)
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
        CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
        CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
+       CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
+       CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
        CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
        CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
index dff4ddc5ef81312590cd3a2cdb1ad4b40e3741ab..139e42da25f061baa0128c7615723da54e068592 100644 (file)
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
        .parts                  = davinci_nand_partitions,
        .nr_parts               = ARRAY_SIZE(davinci_nand_partitions),
        .ecc_mode               = NAND_ECC_HW_SYNDROME,
+       .ecc_bits               = 4,
        .bbt_options            = NAND_BBT_USE_FLASH,
 };
 
index a33686a6fbb226f9b880c2268a87beeb6b6f98e9..fa4bfaf952d886abcc94fd20bbb46285bd4cada6 100644 (file)
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
        .parts          = davinci_evm_nandflash_partition,
        .nr_parts       = ARRAY_SIZE(davinci_evm_nandflash_partition),
        .ecc_mode       = NAND_ECC_HW,
+       .ecc_bits       = 1,
        .bbt_options    = NAND_BBT_USE_FLASH,
        .timing         = &davinci_evm_nandflash_timing,
 };
index fbb8e5ab1dc19bbd56e3508a5505929bb6c71406..0c005e876cac6fbd226c1700cfe47818b80dc6ee 100644 (file)
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
        .parts                  = davinci_nand_partitions,
        .nr_parts               = ARRAY_SIZE(davinci_nand_partitions),
        .ecc_mode               = NAND_ECC_HW,
+       .ecc_bits               = 1,
        .options                = 0,
 };
 
index 2bc112adf565495aed9505bfc23a401e53341d52..808233b60e3d0047e257227d50d49955c816228d 100644 (file)
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
        .parts          = davinci_ntosd2_nandflash_partition,
        .nr_parts       = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
        .ecc_mode       = NAND_ECC_HW,
+       .ecc_bits       = 1,
        .bbt_options    = NAND_BBT_USE_FLASH,
 };
 
index f6eeb87e4e955e425903475b733328656ffcaeda..827d15009a86c980a9577ad8951c2a94e0cbd17d 100644 (file)
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data tusb_data = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#else
-       .mode           = MUSB_HOST,
-#endif
        .set_power      = tusb_set_power,
        .min_power      = 25,   /* x2 = 50 mA drawn from VBUS as peripheral */
        .power          = 100,  /* Max 100 mA VBUS for host mode */
index d2ea68ea678af901715aa609b4c5f41175641ddf..7735105561d87dd218c436b357ade5211e5a6d2e 100644 (file)
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = {
 
 static struct omap_musb_board_data musb_board_data = {
        .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_PERIPHERAL,
+       .mode                   = MUSB_OTG,
        .power                  = 0,
 };
 
index 8c4de2708cf28e6bf5f0011392c5fb15bbcb60fe..bc897231bd1098714ca602e7763442671e9f2fe5 100644 (file)
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = {
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
        .mode           = MUSB_OTG,
-#else
-       .mode           = MUSB_HOST,
-#endif
+
        /* .clock is set dynamically */
        .config         = &musb_config,
 
index db5c2cab8fda4251bb636aa5567afafe5cfdb406..cd2c88e7a8f7557bfe299a7a6b364c395a969428 100644 (file)
@@ -809,15 +809,18 @@ config KUSER_HELPERS
          the CPU type fitted to the system.  This permits binaries to be
          run on ARMv4 through to ARMv7 without modification.
 
+         See Documentation/arm/kernel_user_helpers.txt for details.
+
          However, the fixed address nature of these helpers can be used
          by ROP (return orientated programming) authors when creating
          exploits.
 
          If all of the binaries and libraries which run on your platform
          are built specifically for your platform, and make no use of
-         these helpers, then you can turn this option off.  However,
-         when such an binary or library is run, it will receive a SIGILL
-         signal, which will terminate the program.
+         these helpers, then you can turn this option off to hinder
+         such exploits. However, in that case, if a binary or library
+         relying on those helpers is run, it will receive a SIGILL signal,
+         which will terminate the program.
 
          Say N here only if you are absolutely certain that you do not
          need these helpers; otherwise, the safe option is to say Y.
index d70e0aba0c9d9b4a0d10a78c627e159b1bf90d9b..447da6ffadd5333e10d2ce05bf7ca334cf25bc1a 100644 (file)
@@ -290,7 +290,7 @@ static void l2x0_disable(void)
        raw_spin_lock_irqsave(&l2x0_lock, flags);
        __l2x0_flush_all();
        writel_relaxed(0, l2x0_base + L2X0_CTRL);
-       dsb();
+       dsb(st);
        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
@@ -417,9 +417,9 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
                outer_cache.disable = l2x0_disable;
        }
 
-       printk(KERN_INFO "%s cache controller enabled\n", type);
-       printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
-                       ways, cache_id, aux, l2x0_size);
+       pr_info("%s cache controller enabled\n", type);
+       pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
+               ways, cache_id, aux, l2x0_size >> 10);
 }
 
 #ifdef CONFIG_OF
@@ -929,7 +929,9 @@ static const struct of_device_id l2x0_ids[] __initconst = {
          .data = (void *)&aurora_no_outer_data},
        { .compatible = "marvell,aurora-outer-cache",
          .data = (void *)&aurora_with_outer_data},
-       { .compatible = "bcm,bcm11351-a2-pl310-cache",
+       { .compatible = "brcm,bcm11351-a2-pl310-cache",
+         .data = (void *)&bcm_l2x0_data},
+       { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
          .data = (void *)&bcm_l2x0_data},
        {}
 };
index 515b00064da8f66db5400c3990905f7ad89e2113..b5c467a65c271a8c8538defc2880b0689067d046 100644 (file)
@@ -282,7 +282,7 @@ ENTRY(v7_coherent_user_range)
        add     r12, r12, r2
        cmp     r12, r1
        blo     1b
-       dsb
+       dsb     ishst
        icache_line_size r2, r3
        sub     r3, r2, #1
        bic     r12, r0, r3
@@ -294,7 +294,7 @@ ENTRY(v7_coherent_user_range)
        mov     r0, #0
        ALT_SMP(mcr     p15, 0, r0, c7, c1, 6)  @ invalidate BTB Inner Shareable
        ALT_UP(mcr      p15, 0, r0, c7, c5, 6)  @ invalidate BTB
-       dsb
+       dsb     ishst
        isb
        mov     pc, lr
 
index 4a0544492f10e4cd63d490f61d3cff3e54adbd19..84e6f772e204597032cc0fcd4101f36ce588728f 100644 (file)
@@ -162,10 +162,7 @@ static void flush_context(unsigned int cpu)
        }
 
        /* Queue a TLB invalidate and flush the I-cache if necessary. */
-       if (!tlb_ops_need_broadcast())
-               cpumask_set_cpu(cpu, &tlb_flush_pending);
-       else
-               cpumask_setall(&tlb_flush_pending);
+       cpumask_setall(&tlb_flush_pending);
 
        if (icache_is_vivt_asid_tagged())
                __flush_icache_all();
@@ -245,8 +242,6 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
        if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
                local_flush_bp_all();
                local_flush_tlb_all();
-               if (erratum_a15_798181())
-                       dummy_flush_tlb_a15_erratum();
        }
 
        atomic64_set(&per_cpu(active_asids, cpu), asid);
index 7f9b1798c6cf12f08e55496b27082722e16a30f3..0c17f69a8286e6f9d4646028b9f4b615ad018e08 100644 (file)
@@ -455,7 +455,6 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
        unsigned end = start + size;
 
        apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
-       dsb();
        flush_tlb_kernel_range(start, end);
 }
 
index 3d1e4a205b0b9325f0b8bc7bc231e735f06871e7..66781bf34077cb540a67f845eeeb919c2decfc4e 100644 (file)
  * of type casting from pmd_t * to pte_t *.
  */
 
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd = NULL;
-
-       pgd = pgd_offset(mm, addr);
-       if (pgd_present(*pgd)) {
-               pud = pud_offset(pgd, addr);
-               if (pud_present(*pud))
-                       pmd = pmd_offset(pud, addr);
-       }
-
-       return (pte_t *)pmd;
-}
-
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write)
 {
@@ -68,33 +52,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
        return 0;
 }
 
-pte_t *huge_pte_alloc(struct mm_struct *mm,
-                       unsigned long addr, unsigned long sz)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pte_t *pte = NULL;
-
-       pgd = pgd_offset(mm, addr);
-       pud = pud_alloc(mm, pgd, addr);
-       if (pud)
-               pte = (pte_t *)pmd_alloc(mm, pud, addr);
-
-       return pte;
-}
-
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
-               pmd_t *pmd, int write)
-{
-       struct page *page;
-
-       page = pte_page(*(pte_t *)pmd);
-       if (page)
-               page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
-       return page;
-}
-
 int pmd_huge(pmd_t pmd)
 {
        return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
index 15225d829d7173b6169076ad063a388ceefe1248..2958e74fc42c8f45444fca0a48f5f38856f32a73 100644 (file)
@@ -231,7 +231,7 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 }
 #endif
 
-void __init setup_dma_zone(struct machine_desc *mdesc)
+void __init setup_dma_zone(const struct machine_desc *mdesc)
 {
 #ifdef CONFIG_ZONE_DMA
        if (mdesc->dma_zone_size) {
@@ -335,7 +335,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
        return phys;
 }
 
-void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
+void __init arm_memblock_init(struct meminfo *mi,
+       const struct machine_desc *mdesc)
 {
        int i;
 
index 53cdbd39ec8e23c1facab667f56fa9441aa870c6..b1d17eeb59b895cd429e762d082d6c5c56c3ff57 100644 (file)
@@ -1186,7 +1186,7 @@ void __init arm_mm_memblock_reserve(void)
  * called function.  This means you can't use any function or debugging
  * method which may touch any device, otherwise the kernel _will_ crash.
  */
-static void __init devicemaps_init(struct machine_desc *mdesc)
+static void __init devicemaps_init(const struct machine_desc *mdesc)
 {
        struct map_desc map;
        unsigned long addr;
@@ -1319,7 +1319,7 @@ static void __init map_lowmem(void)
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
  */
-void __init paging_init(struct machine_desc *mdesc)
+void __init paging_init(const struct machine_desc *mdesc)
 {
        void *zero_page;
 
index 1fa50100ab6af1f57067af8ff06c4a05d1fcb3ff..34d4ab217babb39b563e493cb4d4643963d33a64 100644 (file)
@@ -299,7 +299,7 @@ void __init sanity_check_meminfo(void)
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
  */
-void __init paging_init(struct machine_desc *mdesc)
+void __init paging_init(const struct machine_desc *mdesc)
 {
        early_trap_init((void *)CONFIG_VECTORS_BASE);
        mpu_setup();
index d5146b98c8d12bd59037017244bc0bf5ab9c4099..db79b62c92fb1c26ef777b54c4c8f49fe94047a6 100644 (file)
@@ -514,6 +514,32 @@ ENTRY(cpu_feroceon_set_pte_ext)
 #endif
        mov     pc, lr
 
+/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
+.globl cpu_feroceon_suspend_size
+.equ   cpu_feroceon_suspend_size, 4 * 3
+#ifdef CONFIG_ARM_CPU_SUSPEND
+ENTRY(cpu_feroceon_do_suspend)
+       stmfd   sp!, {r4 - r6, lr}
+       mrc     p15, 0, r4, c13, c0, 0  @ PID
+       mrc     p15, 0, r5, c3, c0, 0   @ Domain ID
+       mrc     p15, 0, r6, c1, c0, 0   @ Control register
+       stmia   r0, {r4 - r6}
+       ldmfd   sp!, {r4 - r6, pc}
+ENDPROC(cpu_feroceon_do_suspend)
+
+ENTRY(cpu_feroceon_do_resume)
+       mov     ip, #0
+       mcr     p15, 0, ip, c8, c7, 0   @ invalidate I+D TLBs
+       mcr     p15, 0, ip, c7, c7, 0   @ invalidate I+D caches
+       ldmia   r0, {r4 - r6}
+       mcr     p15, 0, r4, c13, c0, 0  @ PID
+       mcr     p15, 0, r5, c3, c0, 0   @ Domain ID
+       mcr     p15, 0, r1, c2, c0, 0   @ TTB address
+       mov     r0, r6                  @ control register
+       b       cpu_resume_mmu
+ENDPROC(cpu_feroceon_do_resume)
+#endif
+
        .type   __feroceon_setup, #function
 __feroceon_setup:
        mov     r0, #0
index 73398bcf9bd8ea8d7293803828cb837306742fb7..c63d9bdee51e65cab7bbad13632207702db41273 100644 (file)
@@ -83,7 +83,7 @@ ENTRY(cpu_v7_dcache_clean_area)
        add     r0, r0, r2
        subs    r1, r1, r2
        bhi     2b
-       dsb
+       dsb     ishst
        mov     pc, lr
 ENDPROC(cpu_v7_dcache_clean_area)
 
@@ -330,7 +330,19 @@ __v7_setup:
 1:
 #endif
 
-3:     mov     r10, #0
+       /* Cortex-A15 Errata */
+3:     ldr     r10, =0x00000c0f                @ Cortex-A15 primary part number
+       teq     r0, r10
+       bne     4f
+
+#ifdef CONFIG_ARM_ERRATA_773022
+       cmp     r6, #0x4                        @ only present up to r0p4
+       mrcle   p15, 0, r10, c1, c0, 1          @ read aux control register
+       orrle   r10, r10, #1 << 1               @ disable loop buffer
+       mcrle   p15, 0, r10, c1, c0, 1          @ write aux control register
+#endif
+
+4:     mov     r10, #0
        mcr     p15, 0, r10, c7, c5, 0          @ I+BTB cache invalidate
        dsb
 #ifdef CONFIG_MMU
index ea94765acf9a3650f5cb850f6990893f3ccb459b..355308767bae69bf29044d4c85ef30ec3a03b56b 100644 (file)
@@ -35,7 +35,7 @@
 ENTRY(v7wbi_flush_user_tlb_range)
        vma_vm_mm r3, r2                        @ get vma->vm_mm
        mmid    r3, r3                          @ get vm_mm->context.id
-       dsb
+       dsb     ish
        mov     r0, r0, lsr #PAGE_SHIFT         @ align address
        mov     r1, r1, lsr #PAGE_SHIFT
        asid    r3, r3                          @ mask ASID
@@ -56,7 +56,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       dsb
+       dsb     ish
        mov     pc, lr
 ENDPROC(v7wbi_flush_user_tlb_range)
 
@@ -69,7 +69,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
  *     - end   - end address (exclusive, may not be aligned)
  */
 ENTRY(v7wbi_flush_kern_tlb_range)
-       dsb
+       dsb     ish
        mov     r0, r0, lsr #PAGE_SHIFT         @ align address
        mov     r1, r1, lsr #PAGE_SHIFT
        mov     r0, r0, lsl #PAGE_SHIFT
@@ -84,7 +84,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
        add     r0, r0, #PAGE_SZ
        cmp     r0, r1
        blo     1b
-       dsb
+       dsb     ish
        isb
        mov     pc, lr
 ENDPROC(v7wbi_flush_kern_tlb_range)
index 3e5c4619caa5ef26cc9fdea0940856c09f639390..50a3ea0037db10d2032e2ce020688b6fa74614b0 100644 (file)
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode,
 
        printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
 
-       if (cpu->map_io == NULL || cpu->init == NULL) {
+       if (cpu->init == NULL) {
                printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
                panic("Unsupported Samsung CPU");
        }
 
-       cpu->map_io();
+       if (cpu->map_io)
+               cpu->map_io();
 }
 
 /* s3c24xx_init_clocks
index 8d10dc8a1e17b34776a366f4b71fcca68c21bf8a..3e5d3115a2a6847ee41fc3d6d23ffde8fa0a7917 100644 (file)
 ENTRY(vfp_support_entry)
        DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
 
+       ldr     r3, [sp, #S_PSR]        @ Neither lazy restore nor FP exceptions
+       and     r3, r3, #MODE_MASK      @ are supported in kernel mode
+       teq     r3, #USR_MODE
+       bne     vfp_kmode_exception     @ Returns through lr
+
        VFPFMRX r1, FPEXC               @ Is the VFP enabled?
        DBGSTR1 "fpexc %08x", r1
        tst     r1, #FPEXC_EN
index 5dfbb0b8e7f4484ddeb97974080a51bfaec0dc3e..52b8f40b1c73d48d206d497a6fadc381b5ddd888 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/user.h>
+#include <linux/export.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@@ -648,6 +649,72 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
        return NOTIFY_OK;
 }
 
+void vfp_kmode_exception(void)
+{
+       /*
+        * If we reach this point, a floating point exception has been raised
+        * while running in kernel mode. If the NEON/VFP unit was enabled at the
+        * time, it means a VFP instruction has been issued that requires
+        * software assistance to complete, something which is not currently
+        * supported in kernel mode.
+        * If the NEON/VFP unit was disabled, and the location pointed to below
+        * is properly preceded by a call to kernel_neon_begin(), something has
+        * caused the task to be scheduled out and back in again. In this case,
+        * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
+        * be helpful in localizing the problem.
+        */
+       if (fmrx(FPEXC) & FPEXC_EN)
+               pr_crit("BUG: unsupported FP instruction in kernel mode\n");
+       else
+               pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+       struct thread_info *thread = current_thread_info();
+       unsigned int cpu;
+       u32 fpexc;
+
+       /*
+        * Kernel mode NEON is only allowed outside of interrupt context
+        * with preemption disabled. This will make sure that the kernel
+        * mode NEON register contents never need to be preserved.
+        */
+       BUG_ON(in_interrupt());
+       cpu = get_cpu();
+
+       fpexc = fmrx(FPEXC) | FPEXC_EN;
+       fmxr(FPEXC, fpexc);
+
+       /*
+        * Save the userland NEON/VFP state. Under UP,
+        * the owner could be a task other than 'current'
+        */
+       if (vfp_state_in_hw(cpu, thread))
+               vfp_save_state(&thread->vfpstate, fpexc);
+#ifndef CONFIG_SMP
+       else if (vfp_current_hw_state[cpu] != NULL)
+               vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+#endif
+       vfp_current_hw_state[cpu] = NULL;
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+       /* Disable the NEON/VFP unit. */
+       fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+       put_cpu();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
 /*
  * VFP support code initialisation.
  */
@@ -731,4 +798,4 @@ static int __init vfp_init(void)
        return 0;
 }
 
-late_initcall(vfp_init);
+core_initcall(vfp_init);
index c9770ba5c7df5c3b68c909c32db7fa2fb7be39f1..8a6295c86209cd982076a8f79662bd20c2c0f02b 100644 (file)
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused)
        per_cpu(xen_vcpu, cpu) = vcpup;
 
        enable_percpu_irq(xen_events_irq, 0);
+       put_cpu();
 }
 
 static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
index c92de4163eba519802dfaa4b63450259bb6d0395..b25763bc0ec4977a4eca6139ef7d84b3a15eeea0 100644 (file)
 #define        TPIDR_EL1       18      /* Thread ID, Privileged */
 #define        AMAIR_EL1       19      /* Aux Memory Attribute Indirection Register */
 #define        CNTKCTL_EL1     20      /* Timer Control Register (EL1) */
+#define        PAR_EL1         21      /* Physical Address Register */
 /* 32bit specific registers. Keep them at the end of the range */
-#define        DACR32_EL2      21      /* Domain Access Control Register */
-#define        IFSR32_EL2      22      /* Instruction Fault Status Register */
-#define        FPEXC32_EL2     23      /* Floating-Point Exception Control Register */
-#define        DBGVCR32_EL2    24      /* Debug Vector Catch Register */
-#define        TEECR32_EL1     25      /* ThumbEE Configuration Register */
-#define        TEEHBR32_EL1    26      /* ThumbEE Handler Base Register */
-#define        NR_SYS_REGS     27
+#define        DACR32_EL2      22      /* Domain Access Control Register */
+#define        IFSR32_EL2      23      /* Instruction Fault Status Register */
+#define        FPEXC32_EL2     24      /* Floating-Point Exception Control Register */
+#define        DBGVCR32_EL2    25      /* Debug Vector Catch Register */
+#define        TEECR32_EL1     26      /* ThumbEE Configuration Register */
+#define        TEEHBR32_EL1    27      /* ThumbEE Handler Base Register */
+#define        NR_SYS_REGS     28
 
 /* 32bit mapping */
 #define c0_MPIDR       (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
 #define c5_AIFSR       (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
 #define c6_DFAR                (FAR_EL1 * 2)   /* Data Fault Address Register */
 #define c6_IFAR                (c6_DFAR + 1)   /* Instruction Fault Address Register */
+#define c7_PAR         (PAR_EL1 * 2)   /* Physical Address Register */
+#define c7_PAR_high    (c7_PAR + 1)    /* PAR top 32 bits */
 #define c10_PRRR       (MAIR_EL1 * 2)  /* Primary Region Remap Register */
 #define c10_NMRR       (c10_PRRR + 1)  /* Normal Memory Remap Register */
 #define c12_VBAR       (VBAR_EL1 * 2)  /* Vector Base Address Register */
index 644d7395686493e371d01266c98597a00a104da4..0859a4ddd1e7d0e8b1792416b19a8f9908457af7 100644 (file)
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
        struct kvm_mmu_memory_cache mmu_page_cache;
 
        /* Target CPU and feature flags */
-       u32 target;
+       int target;
        DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
 
        /* Detect first run of a vcpu */
index 9ba33c40cdf8f841e974f68e599f0f97e87138ff..12e6ccb88691c65e6a20d761275babb1af369182 100644 (file)
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
 static int
 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-       int mapping = (*event_map)[config];
+       int mapping;
+
+       if (config >= PERF_COUNT_HW_MAX)
+               return -EINVAL;
+
+       mapping = (*event_map)[config];
        return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
        struct hw_perf_event fake_event = event->hw;
        struct pmu *leader_pmu = event->group_leader->pmu;
 
+       if (is_software_event(event))
+               return 1;
+
        if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
                return 1;
 
index ff985e3d8b72db7861b1559cd42a59a59d957fad..1ac0bbbdddb27976ada4376fe4d28538fee7ccc4 100644 (file)
@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
        mrs     x21,    tpidr_el1
        mrs     x22,    amair_el1
        mrs     x23,    cntkctl_el1
+       mrs     x24,    par_el1
 
        stp     x4, x5, [x3]
        stp     x6, x7, [x3, #16]
@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
        stp     x18, x19, [x3, #112]
        stp     x20, x21, [x3, #128]
        stp     x22, x23, [x3, #144]
+       str     x24, [x3, #160]
 .endm
 
 .macro restore_sysregs
@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
        ldp     x18, x19, [x3, #112]
        ldp     x20, x21, [x3, #128]
        ldp     x22, x23, [x3, #144]
+       ldr     x24, [x3, #160]
 
        msr     vmpidr_el2,     x4
        msr     csselr_el1,     x5
@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
        msr     tpidr_el1,      x21
        msr     amair_el1,      x22
        msr     cntkctl_el1,    x23
+       msr     par_el1,        x24
 .endm
 
 .macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
 
 // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
 ENTRY(__kvm_tlb_flush_vmid_ipa)
+       dsb     ishst
+
        kern_hyp_va     x0
        ldr     x2, [x0, #KVM_VTTBR]
        msr     vttbr_el2, x2
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
 ENDPROC(__kvm_tlb_flush_vmid_ipa)
 
 ENTRY(__kvm_flush_vm_context)
+       dsb     ishst
        tlbi    alle1is
        ic      ialluis
        dsb     sy
@@ -753,6 +760,10 @@ el1_trap:
         */
        tbnz    x1, #7, 1f      // S1PTW is set
 
+       /* Preserve PAR_EL1 */
+       mrs     x3, par_el1
+       push    x3, xzr
+
        /*
         * Permission fault, HPFAR_EL2 is invalid.
         * Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@ el1_trap:
 
        /* Read result */
        mrs     x3, par_el1
+       pop     x0, xzr                 // Restore PAR_EL1 from the stack
+       msr     par_el1, x0
        tbnz    x3, #0, 3f              // Bail out if we failed the translation
        ubfx    x3, x3, #12, #36        // Extract IPA
        lsl     x3, x3, #4              // and present it like HPFAR
index 94923609753b2ae91715080fff2cd544281c8621..02e9d09e1d804b4e9344427037dd5a2b88d378ba 100644 (file)
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        /* FAR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
          NULL, reset_unknown, FAR_EL1 },
+       /* PAR_EL1 */
+       { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+         NULL, reset_unknown, PAR_EL1 },
 
        /* PMINTENSET_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
index e773659ccf9f8f607db709109e39b0cacb6f7989..46048d24328c759b0bf4189c612929015f139f69 100644 (file)
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                return 1;
                break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+       case lwc2_op: /* This is bbit0 on Octeon */
+               if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+       case ldc2_op: /* This is bbit032 on Octeon */
+               if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+       case swc2_op: /* This is bbit1 on Octeon */
+               if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+       case sdc2_op: /* This is bbit132 on Octeon */
+               if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
+                       *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       *contpc = regs->cp0_epc + 8;
+               return 1;
+#endif
        case cop0_op:
        case cop1_op:
        case cop2_op:
index 653668d140f994e543ad52e46d0c8402d5fe9259..4a8cb8d7cbd5d2b0febd4333931b459e75f1ea1d 100644 (file)
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
         */
        if (boot_params->sentinel) {
                /* fields in boot_params are left uninitialized, clear them */
-               memset(&boot_params->olpc_ofw_header, 0,
+               memset(&boot_params->ext_ramdisk_image, 0,
                       (char *)&boot_params->efi_info -
-                       (char *)&boot_params->olpc_ofw_header);
+                       (char *)&boot_params->ext_ramdisk_image);
                memset(&boot_params->kbd_status, 0,
                       (char *)&boot_params->hdr -
                       (char *)&boot_params->kbd_status);
index 50e5c58ced23b2ec8537569a71ae4ac41566281f..4c019179a57dd97d6b48ae064ef1faea0dc2e7f7 100644 (file)
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
 
 extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
 extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
 
 #ifdef CONFIG_MICROCODE_AMD_EARLY
 #ifdef CONFIG_X86_32
index f654ecefea5b6d5348df41195a529a4dce303261..08a089043ccfbb669c889ac034091a55aaa92b75 100644 (file)
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 
 static const int amd_erratum_383[];
 static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
 
 static void init_amd(struct cpuinfo_x86 *c)
 {
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
                value &= ~(1ULL << 24);
                wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
 
-               if (cpu_has_amd_erratum(amd_erratum_383))
+               if (cpu_has_amd_erratum(c, amd_erratum_383))
                        set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
        }
 
-       if (cpu_has_amd_erratum(amd_erratum_400))
+       if (cpu_has_amd_erratum(c, amd_erratum_400))
                set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
 
        rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
 static const int amd_erratum_383[] =
        AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
 
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
 {
-       struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
        int osvw_id = *erratum++;
        u32 range;
        u32 ms;
 
-       /*
-        * If called early enough that current_cpu_data hasn't been initialized
-        * yet, fall back to boot_cpu_data.
-        */
-       if (cpu->x86 == 0)
-               cpu = &boot_cpu_data;
-
-       if (cpu->x86_vendor != X86_VENDOR_AMD)
-               return false;
-
        if (osvw_id >= 0 && osvw_id < 65536 &&
            cpu_has(cpu, X86_FEATURE_OSVW)) {
                u64 osvw_len;
index 7a0adb7ee43397aa9a9fcbf733c3b1c9308f2b71..7123b5df479d872def8ff437fcd407c5c4d5ca50 100644 (file)
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
        return 0;
 }
 
-static unsigned int verify_patch_size(int cpu, u32 patch_size,
+static unsigned int verify_patch_size(u8 family, u32 patch_size,
                                      unsigned int size)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
        u32 max_size;
 
 #define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
 #define F15H_MPB_MAX_SIZE 4096
 #define F16H_MPB_MAX_SIZE 3458
 
-       switch (c->x86) {
+       switch (family) {
        case 0x14:
                max_size = F14H_MPB_MAX_SIZE;
                break;
@@ -277,9 +276,8 @@ static void cleanup(void)
  * driver cannot continue functioning normally. In such cases, we tear
  * down everything we've used up so far and exit.
  */
-static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
 {
-       struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_header_amd *mc_hdr;
        struct ucode_patch *patch;
        unsigned int patch_size, crnt_size, ret;
@@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
 
        /* check if patch is for the current family */
        proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
-       if (proc_fam != c->x86)
+       if (proc_fam != family)
                return crnt_size;
 
        if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
                return crnt_size;
        }
 
-       ret = verify_patch_size(cpu, patch_size, leftover);
+       ret = verify_patch_size(family, patch_size, leftover);
        if (!ret) {
                pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
                return crnt_size;
@@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
        return crnt_size;
 }
 
-static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+                                            size_t size)
 {
        enum ucode_state ret = UCODE_ERROR;
        unsigned int leftover;
@@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
        }
 
        while (leftover) {
-               crnt_size = verify_and_add_patch(cpu, fw, leftover);
+               crnt_size = verify_and_add_patch(family, fw, leftover);
                if (crnt_size < 0)
                        return ret;
 
@@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
        return UCODE_OK;
 }
 
-enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
+enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
 {
        enum ucode_state ret;
 
        /* free old equiv table */
        free_equiv_cpu_table();
 
-       ret = __load_microcode_amd(cpu, data, size);
+       ret = __load_microcode_amd(family, data, size);
 
        if (ret != UCODE_OK)
                cleanup();
 
 #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
        /* save BSP's matching patch for early load */
-       if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
-               struct ucode_patch *p = find_patch(cpu);
+       if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
+               struct ucode_patch *p = find_patch(smp_processor_id());
                if (p) {
                        memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
                        memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
                goto fw_release;
        }
 
-       ret = load_microcode_amd(cpu, fw->data, fw->size);
+       ret = load_microcode_amd(c->x86, fw->data, fw->size);
 
  fw_release:
        release_firmware(fw);
index 1d14ffee57495a9793d8f9f5f01073958da6ee3e..6073104ccaa36bca776290155e42a30bdd444a8d 100644 (file)
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
        uci->cpu_sig.sig = cpuid_eax(0x00000001);
 }
 #else
-static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
-                                                struct ucode_cpu_info *uci)
+void load_ucode_amd_ap(void)
 {
+       unsigned int cpu = smp_processor_id();
+       struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        u32 rev, eax;
 
        rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
        eax = cpuid_eax(0x00000001);
 
-       uci->cpu_sig.sig = eax;
        uci->cpu_sig.rev = rev;
-       c->microcode = rev;
-       c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-}
-
-void load_ucode_amd_ap(void)
-{
-       unsigned int cpu = smp_processor_id();
-
-       collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
+       uci->cpu_sig.sig = eax;
 
        if (cpu && !ucode_loaded) {
                void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
                        return;
 
                ucode = (void *)(initrd_start + ucode_offset);
-               if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK)
+               eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+               if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
                        return;
+
                ucode_loaded = true;
        }
 
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
 {
        enum ucode_state ret;
        void *ucode;
+       u32 eax;
+
 #ifdef CONFIG_X86_32
        unsigned int bsp = boot_cpu_data.cpu_index;
        struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
                return 0;
 
        ucode = (void *)(initrd_start + ucode_offset);
-       ret = load_microcode_amd(0, ucode, ucode_size);
+       eax   = cpuid_eax(0x00000001);
+       eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+       ret = load_microcode_amd(eax, ucode, ucode_size);
        if (ret != UCODE_OK)
                return -EINVAL;
 
index 48f8375e4c6b07edfbcefd819a8210f6e0839dfe..30277e27431acde9a9320e0b1be4470bddb40e3a 100644 (file)
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
                                *begin = new_begin;
                }
        } else {
-               *begin = mmap_legacy_base();
+               *begin = current->mm->mmap_legacy_base;
                *end = TASK_SIZE;
        }
 }
index f63778cb2363981ad8f98d0e068e4d789c2136b0..25e7e1372bb26e961b580c753407edf28a320aa3 100644 (file)
@@ -98,7 +98,7 @@ static unsigned long mmap_base(void)
  * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
  * does, but not when emulating X86_32
  */
-unsigned long mmap_legacy_base(void)
+static unsigned long mmap_legacy_base(void)
 {
        if (mmap_is_ia32())
                return TASK_UNMAPPED_BASE;
@@ -112,11 +112,13 @@ unsigned long mmap_legacy_base(void)
  */
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
+       mm->mmap_legacy_base = mmap_legacy_base();
+       mm->mmap_base = mmap_base();
+
        if (mmap_is_legacy()) {
-               mm->mmap_base = mmap_legacy_base();
+               mm->mmap_base = mm->mmap_legacy_base;
                mm->get_unmapped_area = arch_get_unmapped_area;
        } else {
-               mm->mmap_base = mmap_base();
                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
        }
 }
index 056d11faef21e96e5adf56a455a2827d2f97fbf1..8f3eea6b80c527bd65fbbe80c6c8c8b7513805c6 100644 (file)
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
        e820_add_region(start, end - start, type);
 }
 
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+       struct e820entry *entry;
+       unsigned int i;
+
+       for (i = 0, entry = list; i < map_size; i++, entry++) {
+               if (entry->type == E820_UNUSABLE)
+                       entry->type = E820_RAM;
+       }
+}
+
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void)
        }
        BUG_ON(rc);
 
+       /*
+        * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+        * regions, so if we're using the machine memory map leave the
+        * region as RAM as it is in the pseudo-physical map.
+        *
+        * UNUSABLE regions in domUs are not handled and will need
+        * a patch in the future.
+        */
+       if (xen_initial_domain())
+               xen_ignore_unusable(map, memmap.nr_entries);
+
        /* Make sure the Xen-supplied memory map is well-ordered. */
        sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
 
index ca92754eb846b6d7f8293a4f6f75dedae7bf13a9..b81c88e51daa3d412a147f2088c1f51be1649d26 100644 (file)
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
 static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
        int rc;
-       rc = native_cpu_up(cpu, tidle);
-       WARN_ON (xen_smp_intr_init(cpu));
+       /*
+        * xen_smp_intr_init() needs to run before native_cpu_up()
+        * so that IPI vectors are set up on the booting CPU before
+        * it is marked online in native_cpu_up().
+       */
+       rc = xen_smp_intr_init(cpu);
+       WARN_ON(rc);
+       if (!rc)
+               rc =  native_cpu_up(cpu, tidle);
        return rc;
 }
 
index e1284b8dc6eef9b800f997ecbf6bc5e60214eacc..3270d3c8ba4ed239d25b0507882fa50df3abb5e0 100644 (file)
@@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
                device->cap._DDC = 1;
        }
 
-       if (acpi_video_init_brightness(device))
-               return;
-
        if (acpi_video_backlight_support()) {
                struct backlight_properties props;
                struct pci_dev *pdev;
@@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
                static int count = 0;
                char *name;
 
+               result = acpi_video_init_brightness(device);
+               if (result)
+                       return;
                name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
                if (!name)
                        return;
@@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
                if (result)
                        printk(KERN_ERR PREFIX "Create sysfs link\n");
 
-       } else {
-               /* Remove the brightness object. */
-               kfree(device->brightness->levels);
-               kfree(device->brightness);
-               device->brightness = NULL;
        }
 }
 
index 1c41722bb7e2d39016dd25f023dc37d6860b7584..20fd337a57314a2928c9208bf706ffabed0c4dd5 100644 (file)
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
 
        /* Disable sending Early R_OK.
         * With "cached read" HDD testing and multiple ports busy on a SATA
-        * host controller, 3726 PMP will very rarely drop a deferred
+        * host controller, 3x26 PMP will very rarely drop a deferred
         * R_OK that was intended for the host. Symptom will be all
         * 5 drives under test will timeout, get reset, and recover.
         */
-       if (vendor == 0x1095 && devid == 0x3726) {
+       if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
                u32 reg;
 
                err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
                if (err_mask) {
                        rc = -EIO;
-                       reason = "failed to read Sil3726 Private Register";
+                       reason = "failed to read Sil3x26 Private Register";
                        goto fail;
                }
                reg &= ~0x1;
                err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
                if (err_mask) {
                        rc = -EIO;
-                       reason = "failed to write Sil3726 Private Register";
+                       reason = "failed to write Sil3x26 Private Register";
                        goto fail;
                }
        }
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
        u16 devid = sata_pmp_gscr_devid(gscr);
        struct ata_link *link;
 
-       if (vendor == 0x1095 && devid == 0x3726) {
-               /* sil3726 quirks */
+       if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+               /* sil3x26 quirks */
                ata_for_each_link(link, ap, EDGE) {
                        /* link reports offline after LPM */
                        link->flags |= ATA_LFLAG_NO_LPM;
index 19720a0a4a65ff5c6198ab4e37df7e756ce85717..851bd3f43ac63fc0f3631193f58aa3d94c0af39a 100644 (file)
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
 {
        struct sata_fsl_host_priv *host_priv = host->private_data;
        void __iomem *hcr_base = host_priv->hcr_base;
+       unsigned long flags;
 
        if (count > ICC_MAX_INT_COUNT_THRESHOLD)
                count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
                        (count > ICC_MIN_INT_COUNT_THRESHOLD))
                ticks = ICC_SAFE_INT_TICKS;
 
-       spin_lock(&host->lock);
+       spin_lock_irqsave(&host->lock, flags);
        iowrite32((count << 24 | ticks), hcr_base + ICC);
 
        intr_coalescing_count = count;
        intr_coalescing_ticks = ticks;
-       spin_unlock(&host->lock);
+       spin_unlock_irqrestore(&host->lock, flags);
 
        DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
                        intr_coalescing_count, intr_coalescing_ticks);
index d047d92a456fbab39c56046d74feb24c9163668f..e9a4f46d962e817ab21aa44e155d399f07e76132 100644 (file)
@@ -86,11 +86,11 @@ struct ecx_plat_data {
 
 #define SGPIO_SIGNALS                  3
 #define ECX_ACTIVITY_BITS              0x300000
-#define ECX_ACTIVITY_SHIFT             2
+#define ECX_ACTIVITY_SHIFT             0
 #define ECX_LOCATE_BITS                        0x80000
 #define ECX_LOCATE_SHIFT               1
 #define ECX_FAULT_BITS                 0x400000
-#define ECX_FAULT_SHIFT                        0
+#define ECX_FAULT_SHIFT                        2
 static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
                                u32 shift)
 {
index 19e36603b23b64e2c500cb425d3bf0246b2fdd8f..3bc8414533c9bfa235024bef3458bbcf3e119605 100644 (file)
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
                                  &status))
                goto log_fail;
 
-       while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+       while ((status == SDVO_CMD_STATUS_PENDING ||
+               status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
                udelay(15);
                if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
                                          SDVO_I2C_CMD_STATUS,
index dc53a527126b0569800ff2df3a8a36ebbf904855..9e6578330801638caeb91e7f92e8e0139660eb6f 100644 (file)
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
                                   struct sg_table *sg,
                                   enum dma_data_direction dir)
 {
+       struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+
+       mutex_lock(&obj->base.dev->struct_mutex);
+
        dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
        sg_free_table(sg);
        kfree(sg);
+
+       i915_gem_object_unpin_pages(obj);
+
+       mutex_unlock(&obj->base.dev->struct_mutex);
 }
 
 static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
index 6f514297c4837882a64f9480b9f944c3913f7b0c..53cddd98540616084a7bcc05b718bb048879c730 100644 (file)
                                        will not assert AGPBUSY# and will only
                                        be delivered when out of C3. */
 #define   INSTPM_FORCE_ORDERING                                (1<<7) /* GEN6+ */
+#define   INSTPM_TLB_INVALIDATE        (1<<9)
+#define   INSTPM_SYNC_FLUSH    (1<<5)
 #define ACTHD          0x020c8
 #define FW_BLC         0x020d8
 #define FW_BLC2                0x020dc
index e38b457866535925acaf054b549f5bb07ce180f7..be79f477a38f9e48de386332e4062f09484a3453 100644 (file)
@@ -10042,6 +10042,8 @@ struct intel_display_error_state {
 
        u32 power_well_driver;
 
+       int num_transcoders;
+
        struct intel_cursor_error_state {
                u32 control;
                u32 position;
@@ -10050,16 +10052,7 @@ struct intel_display_error_state {
        } cursor[I915_MAX_PIPES];
 
        struct intel_pipe_error_state {
-               enum transcoder cpu_transcoder;
-               u32 conf;
                u32 source;
-
-               u32 htotal;
-               u32 hblank;
-               u32 hsync;
-               u32 vtotal;
-               u32 vblank;
-               u32 vsync;
        } pipe[I915_MAX_PIPES];
 
        struct intel_plane_error_state {
@@ -10071,6 +10064,19 @@ struct intel_display_error_state {
                u32 surface;
                u32 tile_offset;
        } plane[I915_MAX_PIPES];
+
+       struct intel_transcoder_error_state {
+               enum transcoder cpu_transcoder;
+
+               u32 conf;
+
+               u32 htotal;
+               u32 hblank;
+               u32 hsync;
+               u32 vtotal;
+               u32 vblank;
+               u32 vsync;
+       } transcoder[4];
 };
 
 struct intel_display_error_state *
@@ -10078,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_display_error_state *error;
-       enum transcoder cpu_transcoder;
+       int transcoders[] = {
+               TRANSCODER_A,
+               TRANSCODER_B,
+               TRANSCODER_C,
+               TRANSCODER_EDP,
+       };
        int i;
 
+       if (INTEL_INFO(dev)->num_pipes == 0)
+               return NULL;
+
        error = kmalloc(sizeof(*error), GFP_ATOMIC);
        if (error == NULL)
                return NULL;
@@ -10089,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev)
                error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
 
        for_each_pipe(i) {
-               cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
-               error->pipe[i].cpu_transcoder = cpu_transcoder;
-
                if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
                        error->cursor[i].control = I915_READ(CURCNTR(i));
                        error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10115,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev)
                        error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
                }
 
-               error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
                error->pipe[i].source = I915_READ(PIPESRC(i));
-               error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
-               error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
-               error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
-               error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
-               error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
-               error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+       }
+
+       error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+       if (HAS_DDI(dev_priv->dev))
+               error->num_transcoders++; /* Account for eDP. */
+
+       for (i = 0; i < error->num_transcoders; i++) {
+               enum transcoder cpu_transcoder = transcoders[i];
+
+               error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+               error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+               error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+               error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+               error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+               error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+               error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+               error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
        }
 
        /* In the code above we read the registers without checking if the power
@@ -10144,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
 {
        int i;
 
+       if (!error)
+               return;
+
        err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
        if (HAS_POWER_WELL(dev))
                err_printf(m, "PWR_WELL_CTL2: %08x\n",
                           error->power_well_driver);
        for_each_pipe(i) {
                err_printf(m, "Pipe [%d]:\n", i);
-               err_printf(m, "  CPU transcoder: %c\n",
-                          transcoder_name(error->pipe[i].cpu_transcoder));
-               err_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
-               err_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
-               err_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
-               err_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
-               err_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
-               err_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
-               err_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
 
                err_printf(m, "Plane [%d]:\n", i);
                err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
@@ -10180,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                err_printf(m, "  POS: %08x\n", error->cursor[i].position);
                err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
        }
+
+       for (i = 0; i < error->num_transcoders; i++) {
+               err_printf(m, "  CPU transcoder: %c\n",
+                          transcoder_name(error->transcoder[i].cpu_transcoder));
+               err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
+               err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
+               err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
+               err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
+               err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
+               err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
+               err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
+       }
 }
 #endif
index 664118d8c1d6426353ed97bb61b1113369a7678a..079ef0129e7416aabcf46e3c96ee409b65702808 100644 (file)
@@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
 
        I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
        POSTING_READ(mmio);
+
+       /* Flush the TLB for this page */
+       if (INTEL_INFO(dev)->gen >= 6) {
+               u32 reg = RING_INSTPM(ring->mmio_base);
+               I915_WRITE(reg,
+                          _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+                                             INSTPM_SYNC_FLUSH));
+               if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+                            1000))
+                       DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+                                 ring->name);
+       }
 }
 
 static int
index d8291724dbd47d53ff6edc48a12ad669e5c68d8c..7a4e0891c5f872e0dbda7dc5164b348aa59058c7 100644 (file)
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
        u32 splitoff;
        u32 s, e;
 
+       BUG_ON(!type);
+
        list_for_each_entry(this, &mm->free, fl_entry) {
                e = this->offset + this->length;
                s = this->offset;
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
        struct nouveau_mm_node *prev, *this, *next;
        u32 mask = align - 1;
 
+       BUG_ON(!type);
+
        list_for_each_entry_reverse(this, &mm->free, fl_entry) {
                u32 e = this->offset + this->length;
                u32 s = this->offset;
index d5502267c30f71162ad901fdbedfa6a375cb54eb..9d2cd200625084608b9a6ecb8e866a9dd7cecf2f 100644 (file)
@@ -20,8 +20,8 @@ nouveau_mc(void *obj)
        return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
 }
 
-#define nouveau_mc_create(p,e,o,d)                                             \
-       nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,m,d)                                           \
+       nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
 #define nouveau_mc_destroy(p) ({                                               \
        struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc));        \
 })
@@ -33,7 +33,8 @@ nouveau_mc(void *obj)
 })
 
 int  nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
-                       struct nouveau_oclass *, int, void **);
+                       struct nouveau_oclass *, const struct nouveau_mc_intr *,
+                       int, void **);
 void _nouveau_mc_dtor(struct nouveau_object *);
 int  _nouveau_mc_init(struct nouveau_object *);
 int  _nouveau_mc_fini(struct nouveau_object *, bool);
index 19e3a9a63a02a79ae582e65fae75c617890afc5a..ab7ef0ac9e34c121e863c8110d243557773c681f 100644 (file)
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        switch (pfb914 & 0x00000003) {
-       case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break;
-       case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break;
-       case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break;
+       case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
+       case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
+       case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
        case 0x00000003: break;
        }
 
-       pfb->ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
-       pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
-       pfb->ram->tags  =  nv_rd32(pfb, 0x100320);
+       ram->size  =  nv_rd32(pfb, 0x10020c) & 0xff000000;
+       ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+       ram->tags  =  nv_rd32(pfb, 0x100320);
        return 0;
 }
 
index 7192aa6e5577b6b2e1b460dc8b75a2b145eaff6e..63a6aab860282272caac175ff77ab89eb0fc91db 100644 (file)
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
-       pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
-       pfb->ram->type = NV_MEM_TYPE_STOLEN;
+       ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+       ram->type = NV_MEM_TYPE_STOLEN;
        return 0;
 }
 
index bcca883018f4cb030dd20642da03379c9064b06e..cce65cc565145cb74fdfd513967cb5723c1afc8d 100644 (file)
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
        struct nouveau_ltcg base;
        u32 part_nr;
        u32 subp_nr;
-       struct nouveau_mm tags;
        u32 num_tags;
+       u32 tag_base;
+       struct nouveau_mm tags;
        struct nouveau_mm_node *tag_ram;
 };
 
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
        u32 tag_size, tag_margin, tag_align;
        int ret;
 
-       nv_wr32(priv, 0x17e8d8, priv->part_nr);
-       if (nv_device(pfb)->card_type >= NV_E0)
-               nv_wr32(priv, 0x17e000, priv->part_nr);
-
        /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
        priv->num_tags = (pfb->ram->size >> 17) / 4;
        if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
        tag_size += tag_align;
        tag_size  = (tag_size + 0xfff) >> 12; /* round up */
 
-       ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
+       ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
                              &priv->tag_ram);
        if (ret) {
                priv->num_tags = 0;
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
                tag_base += tag_align - 1;
                ret = do_div(tag_base, tag_align);
 
-               nv_wr32(priv, 0x17e8d4, tag_base);
+               priv->tag_base = tag_base;
        }
        ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
 
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        }
        priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
 
-       nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
-
        ret = nvc0_ltcg_init_tag_ram(pfb, priv);
        if (ret)
                return ret;
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
        nouveau_ltcg_destroy(ltcg);
 }
 
+static int
+nvc0_ltcg_init(struct nouveau_object *object)
+{
+       struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+       struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+       int ret;
+
+       ret = nouveau_ltcg_init(ltcg);
+       if (ret)
+               return ret;
+
+       nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+       nv_wr32(priv, 0x17e8d8, priv->part_nr);
+       if (nv_device(ltcg)->card_type >= NV_E0)
+               nv_wr32(priv, 0x17e000, priv->part_nr);
+       nv_wr32(priv, 0x17e8d4, priv->tag_base);
+       return 0;
+}
+
 struct nouveau_oclass
 nvc0_ltcg_oclass = {
        .handle = NV_SUBDEV(LTCG, 0xc0),
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nvc0_ltcg_ctor,
                .dtor = nvc0_ltcg_dtor,
-               .init = _nouveau_ltcg_init,
+               .init = nvc0_ltcg_init,
                .fini = _nouveau_ltcg_fini,
        },
 };
index 1c0330b8c9a43919861a42781f6691bec3cbf51f..ec9cd6f10f910aac9f56c4acbc4d9e09d393c85a 100644 (file)
@@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
 
 int
 nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
-                  struct nouveau_oclass *oclass, int length, void **pobject)
+                  struct nouveau_oclass *oclass,
+                  const struct nouveau_mc_intr *intr_map,
+                  int length, void **pobject)
 {
        struct nouveau_device *device = nv_device(parent);
        struct nouveau_mc *pmc;
@@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
        if (ret)
                return ret;
 
+       pmc->intr_map = intr_map;
+
        ret = request_irq(device->pdev->irq, nouveau_mc_intr,
                          IRQF_SHARED, "nouveau", pmc);
        if (ret < 0)
index 8c769715227bd65a6b814b0585e047b169ad1bb8..64aa4edb0d9d958d60daf543fcd7d98209da10fb 100644 (file)
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv04_mc_priv *priv;
        int ret;
 
-       ret = nouveau_mc_create(parent, engine, oclass, &priv);
+       ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       priv->base.intr_map = nv04_mc_intr;
        return 0;
 }
 
index 51919371810fdf25c924f6e765ede4c0c682e829..d9891782bf28ea69a906a50714110914af9cb8ba 100644 (file)
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv44_mc_priv *priv;
        int ret;
 
-       ret = nouveau_mc_create(parent, engine, oclass, &priv);
+       ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       priv->base.intr_map = nv04_mc_intr;
        return 0;
 }
 
index f25fc5fc7dd11a773e6743a3df5d3ae5e1235532..2b1afe225db84b6a715e3891a96e649f65a7c380 100644 (file)
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv50_mc_priv *priv;
        int ret;
 
-       ret = nouveau_mc_create(parent, engine, oclass, &priv);
+       ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       priv->base.intr_map = nv50_mc_intr;
        return 0;
 }
 
index e82fd21b504154e8b28ed3dcca7fb7c1647a4394..0d57b4d3e001a9f2d141f745bed505ae7670badd 100644 (file)
@@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nv98_mc_priv *priv;
        int ret;
 
-       ret = nouveau_mc_create(parent, engine, oclass, &priv);
+       ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       priv->base.intr_map = nv98_mc_intr;
        return 0;
 }
 
index c5da3babbc621edccdb1bed8a55552885ec8d98f..104175c5a2ddf0a08ca82a01246b551605397246 100644 (file)
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
        struct nvc0_mc_priv *priv;
        int ret;
 
-       ret = nouveau_mc_create(parent, engine, oclass, &priv);
+       ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
        *pobject = nv_object(priv);
        if (ret)
                return ret;
 
-       priv->base.intr_map = nvc0_mc_intr;
        return 0;
 }
 
index 0782bd2f1e04c4076c715c3bac9af271fa76348a..6a13ffb53bdb642e989cfdc75a7bc5f8341d08d3 100644 (file)
@@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
        regp->ramdac_a34 = 0x1;
 }
 
+static int
+nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+       struct nv04_display *disp = nv04_display(crtc->dev);
+       struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       int ret;
+
+       ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+       if (ret == 0) {
+               if (disp->image[nv_crtc->index])
+                       nouveau_bo_unpin(disp->image[nv_crtc->index]);
+               nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+       }
+
+       return ret;
+}
+
 /**
  * Sets up registers for the given mode/adjusted_mode pair.
  *
@@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
        struct drm_device *dev = crtc->dev;
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
        struct nouveau_drm *drm = nouveau_drm(dev);
+       int ret;
 
        NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
        drm_mode_debug_printmodeline(adjusted_mode);
 
+       ret = nv_crtc_swap_fbs(crtc, old_fb);
+       if (ret)
+               return ret;
+
        /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
        nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
 
@@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
 
 static void nv_crtc_destroy(struct drm_crtc *crtc)
 {
+       struct nv04_display *disp = nv04_display(crtc->dev);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 
        if (!nv_crtc)
@@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
 
        drm_crtc_cleanup(crtc);
 
+       if (disp->image[nv_crtc->index])
+               nouveau_bo_unpin(disp->image[nv_crtc->index]);
+       nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+
        nouveau_bo_unmap(nv_crtc->cursor.nvbo);
        nouveau_bo_unpin(nv_crtc->cursor.nvbo);
        nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -753,6 +781,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
        nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
 }
 
+static void
+nv_crtc_disable(struct drm_crtc *crtc)
+{
+       struct nv04_display *disp = nv04_display(crtc->dev);
+       struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+       if (disp->image[nv_crtc->index])
+               nouveau_bo_unpin(disp->image[nv_crtc->index]);
+       nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+}
+
 static void
 nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
                  uint32_t size)
@@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        struct drm_framebuffer *drm_fb;
        struct nouveau_framebuffer *fb;
        int arb_burst, arb_lwm;
-       int ret;
 
        NV_DEBUG(drm, "index %d\n", nv_crtc->index);
 
@@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
                return 0;
        }
 
-
        /* If atomic, we want to switch to the fb we were passed, so
-        * now we update pointers to do that.  (We don't pin; just
-        * assume we're already pinned and update the base address.)
+        * now we update pointers to do that.
         */
        if (atomic) {
                drm_fb = passed_fb;
@@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
        } else {
                drm_fb = crtc->fb;
                fb = nouveau_framebuffer(crtc->fb);
-               /* If not atomic, we can go ahead and pin, and unpin the
-                * old fb we were passed.
-                */
-               ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
-               if (ret)
-                       return ret;
-
-               if (passed_fb) {
-                       struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
-                       nouveau_bo_unpin(ofb->nvbo);
-               }
        }
 
        nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -877,6 +901,9 @@ static int
 nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
                        struct drm_framebuffer *old_fb)
 {
+       int ret = nv_crtc_swap_fbs(crtc, old_fb);
+       if (ret)
+               return ret;
        return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
 }
 
@@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
        .mode_set_base = nv04_crtc_mode_set_base,
        .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
        .load_lut = nv_crtc_gamma_load,
+       .disable = nv_crtc_disable,
 };
 
 int
index a0a031dad13f366a9b69510afaa55200337fba3b..9928187f0a7d0bebdf3c80f8aec00809b294546e 100644 (file)
@@ -81,6 +81,7 @@ struct nv04_display {
        uint32_t saved_vga_font[4][16384];
        uint32_t dac_users[4];
        struct nouveau_object *core;
+       struct nouveau_bo *image[2];
 };
 
 static inline struct nv04_display *
index 907d20ef6d4d119f81c06fe1801edf3867f01673..a03e75deacafc23e05ff2c9982bd9e44e2380d76 100644 (file)
@@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                ret = nv50_display_flip_next(crtc, fb, chan, 0);
                if (ret)
                        goto fail_unreserve;
+       } else {
+               struct nv04_display *dispnv04 = nv04_display(dev);
+               nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
        }
 
        ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
index 3af5bcd0b203f904440fe33445d8da4098cf0bb2..625f80d53dc2b9c4c7e886ac5d282355193a643f 100644 (file)
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
        if (clk < pll->vco1.max_freq)
                pll->vco2.max_freq = 0;
 
-       pclk->pll_calc(pclk, pll, clk, &coef);
+       ret = pclk->pll_calc(pclk, pll, clk, &coef);
        if (ret == 0)
                return -ERANGE;
 
index 274b8e1b889fd0fbbe1dde2a71492e975f00a711..9f19259667dfa71e254052735be72ce0f14e75d9 100644 (file)
@@ -2163,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
                WREG32(reg, tmp_);                              \
        } while (0)
 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
 #define WREG32_PLL_P(reg, val, mask)                           \
        do {                                                    \
                uint32_t tmp_ = RREG32_PLL(reg);                \
index f1c15754e73ca6d933d6ea1e0877b839cecab4b6..b79f4f5cdd626108c8790394cc6e57ddd9b27bce 100644 (file)
@@ -356,6 +356,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                return -EINVAL;
        }
 
+       if (bo->tbo.sync_obj) {
+               r = radeon_fence_wait(bo->tbo.sync_obj, false);
+               if (r) {
+                       DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+                       return r;
+               }
+       }
+
        r = radeon_bo_kmap(bo, &ptr);
        if (r) {
                DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
index bcc68ec204adeb7582a536cd3125ab28d00bce6d..f5e92cfcc140984bd63e1a892fa88277bb7530c3 100644 (file)
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
                                                 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
                radeon_program_register_sequence(rdev,
                                                 rv730_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv730_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv730_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv730_mgcg_init));
                break;
        case CHIP_RV710:
                radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
                                                 (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
                radeon_program_register_sequence(rdev,
                                                 rv710_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv710_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv710_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv710_mgcg_init));
                break;
        case CHIP_RV740:
                radeon_program_register_sequence(rdev,
                                                 rv740_golden_registers,
-                                                (const u32)ARRAY_SIZE(rv770_golden_registers));
+                                                (const u32)ARRAY_SIZE(rv740_golden_registers));
                radeon_program_register_sequence(rdev,
                                                 rv740_mgcg_init,
-                                                (const u32)ARRAY_SIZE(rv770_mgcg_init));
+                                                (const u32)ARRAY_SIZE(rv740_mgcg_init));
                break;
        default:
                break;
index 5f4749e60b0428ae6fb3ffb1be7b4de00913b699..c1cd5698b8aea471802f471626e0b3865b2ca058 100644 (file)
@@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               ret = adjd_s311_read_data(indio_dev, chan->address, val);
+               ret = adjd_s311_read_data(indio_dev,
+                       ADJD_S311_DATA_REG(chan->address), val);
                if (ret < 0)
                        return ret;
                return IIO_VAL_INT;
index dc112a7137fe9280fca348908ed99b77f36f9417..4296155090b2b181f5840e21d97402ae0351d739 100644 (file)
@@ -959,23 +959,21 @@ out:
        return r;
 }
 
-static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
 {
-       struct entry *e = hash_lookup(mq, oblock);
+       struct mq_policy *mq = to_mq_policy(p);
+       struct entry *e;
+
+       mutex_lock(&mq->lock);
+
+       e = hash_lookup(mq, oblock);
 
        BUG_ON(!e || !e->in_cache);
 
        del(mq, e);
        e->in_cache = false;
        push(mq, e);
-}
 
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
-       struct mq_policy *mq = to_mq_policy(p);
-
-       mutex_lock(&mq->lock);
-       remove_mapping(mq, oblock);
        mutex_unlock(&mq->lock);
 }
 
index ce9b387b5a1962949582354f7b0dcfecb621a08d..00b88cbfde25c41bdad32491eb7ab24edfb8c00d 100644 (file)
@@ -1333,6 +1333,8 @@ enum {
        BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
        BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+       BNX2X_SP_RTNL_TX_STOP,
+       BNX2X_SP_RTNL_TX_RESUME,
 };
 
 struct bnx2x_prev_path_list {
index f9122f2d6b657d0e674c7b036635b60e97586609..fcf2761d8828804d3edf7ca8e2ad245576d23685 100644 (file)
 #include "bnx2x_dcb.h"
 
 /* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
 static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
 static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
 static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
                                          u32 *set_configuration_ets_pg,
                                          u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
                bnx2x_pfc_clear(bp);
 }
 
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
 {
        struct bnx2x_func_state_params func_params = {NULL};
+       int rc;
 
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_TX_STOP;
 
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
        DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
-       return bnx2x_func_state_change(bp, &func_params);
+
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc) {
+               BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+               bnx2x_panic();
+       }
+
+       return rc;
 }
 
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
 {
        struct bnx2x_func_state_params func_params = {NULL};
        struct bnx2x_func_tx_start_params *tx_params =
                &func_params.params.tx_start;
+       int rc;
 
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_TX_START;
 
+       __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+       __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
        bnx2x_dcbx_fw_struct(bp, tx_params);
 
        DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
-       return bnx2x_func_state_change(bp, &func_params);
+
+       rc = bnx2x_func_state_change(bp, &func_params);
+       if (rc) {
+               BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+               bnx2x_panic();
+       }
+
+       return rc;
 }
 
 static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                        if (IS_MF(bp))
                                bnx2x_link_sync_notify(bp);
 
-                       bnx2x_dcbx_stop_hw_tx(bp);
+                       set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
+
+                       schedule_delayed_work(&bp->sp_rtnl_task, 0);
 
                        return;
                }
@@ -757,7 +779,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
                /* ets may affect cmng configuration: reinit it in hw */
                bnx2x_set_local_cmng(bp);
 
-               bnx2x_dcbx_resume_hw_tx(bp);
+               set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
+
+               schedule_delayed_work(&bp->sp_rtnl_task, 0);
 
                return;
        case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2367,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
                case DCB_FEATCFG_ATTR_PG:
                        if (bp->dcbx_local_feat.ets.enabled)
                                *flags |= DCB_FEATCFG_ENABLE;
-                       if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+                       if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+                                             DCBX_REMOTE_MIB_ERROR))
                                *flags |= DCB_FEATCFG_ERROR;
                        break;
                case DCB_FEATCFG_ATTR_PFC:
                        if (bp->dcbx_local_feat.pfc.enabled)
                                *flags |= DCB_FEATCFG_ENABLE;
                        if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
-                           DCBX_LOCAL_PFC_MISMATCH))
+                                             DCBX_LOCAL_PFC_MISMATCH |
+                                             DCBX_REMOTE_MIB_ERROR))
                                *flags |= DCB_FEATCFG_ERROR;
                        break;
                case DCB_FEATCFG_ATTR_APP:
                        if (bp->dcbx_local_feat.app.enabled)
                                *flags |= DCB_FEATCFG_ENABLE;
                        if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
-                           DCBX_LOCAL_APP_MISMATCH))
+                                             DCBX_LOCAL_APP_MISMATCH |
+                                             DCBX_REMOTE_MIB_ERROR))
                                *flags |= DCB_FEATCFG_ERROR;
                        break;
                default:
index 125bd1b6586ffc1f96b5fc946a4ee5a4613ce5a4..804b8f64463e80a1fcb45f51bda976b4d8544062 100644 (file)
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
 int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
 #endif /* BCM_DCBNL */
 
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
 #endif /* BNX2X_DCB_H */
index 955d6cfd9cb7c48179b587a7bf5239a572b7e9e8..8bdc8b9730074159f64411b818114c48727a08a5 100644 (file)
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
                bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
 }
 
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+       u32 pause_enabled = 0;
+
+       if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+               if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+                       pause_enabled = 1;
+
+               REG_WR(bp, BAR_USTRORM_INTMEM +
+                          USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+                      pause_enabled);
+       }
+
+       DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+          pause_enabled ? "enabled" : "disabled");
+}
+
 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
 {
        int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
 
                bnx2x_release_phy_lock(bp);
 
+               bnx2x_init_dropless_fc(bp);
+
                bnx2x_calc_fc_adv(bp);
 
                if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp)
                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
                bnx2x_release_phy_lock(bp);
 
+               bnx2x_init_dropless_fc(bp);
+
                bnx2x_calc_fc_adv(bp);
        } else
                BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2556,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
 
        bnx2x_link_update(&bp->link_params, &bp->link_vars);
 
-       if (bp->link_vars.link_up) {
+       bnx2x_init_dropless_fc(bp);
 
-               /* dropless flow control */
-               if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
-                       int port = BP_PORT(bp);
-                       u32 pause_enabled = 0;
-
-                       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
-                               pause_enabled = 1;
-
-                       REG_WR(bp, BAR_USTRORM_INTMEM +
-                              USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
-                              pause_enabled);
-               }
+       if (bp->link_vars.link_up) {
 
                if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
                        struct host_port_stats *pstats;
@@ -9645,6 +9655,12 @@ sp_rtnl_not_reset:
                               &bp->sp_rtnl_state))
                bnx2x_pf_set_vfs_vlan(bp);
 
+       if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
+               bnx2x_dcbx_stop_hw_tx(bp);
+
+       if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
+               bnx2x_dcbx_resume_hw_tx(bp);
+
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
@@ -11147,6 +11163,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
        int tmp;
        u32 cfg;
 
+       if (IS_VF(bp))
+               return 0;
+
        if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
                /* Take function: tmp = func */
                tmp = BP_ABS_FUNC(bp);
index 44104fb27947fba144575a33a0be3cf1343fe39d..ad83f4b4877761f73ed2ff790a9fef26b6bbd9b1 100644 (file)
@@ -1747,11 +1747,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
 
 void bnx2x_iov_init_dmae(struct bnx2x *bp)
 {
-       DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
-       if (!IS_SRIOV(bp))
-               return;
-
-       REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+       if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+               REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
 }
 
 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -3084,8 +3081,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
        pci_disable_sriov(bp->pdev);
 }
 
-static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
-                              struct bnx2x_virtf *vf)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+                            struct bnx2x_virtf **vf,
+                            struct pf_vf_bulletin_content **bulletin)
 {
        if (bp->state != BNX2X_STATE_OPEN) {
                BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,12 +3101,22 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
                return -EINVAL;
        }
 
-       if (!vf) {
+       /* init members */
+       *vf = BP_VF(bp, vfidx);
+       *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+       if (!*vf) {
                BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
                          vfidx);
                return -EINVAL;
        }
 
+       if (!*bulletin) {
+               BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+                         vfidx);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
@@ -3116,17 +3124,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                        struct ifla_vf_info *ivi)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
-       struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
-       struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
-       struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+       struct bnx2x_virtf *vf = NULL;
+       struct pf_vf_bulletin_content *bulletin = NULL;
+       struct bnx2x_vlan_mac_obj *mac_obj;
+       struct bnx2x_vlan_mac_obj *vlan_obj;
        int rc;
 
-       /* sanity */
-       rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+       /* sanity and init */
+       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
        if (rc)
                return rc;
-       if (!mac_obj || !vlan_obj || !bulletin) {
+       mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+       vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
+       if (!mac_obj || !vlan_obj) {
                BNX2X_ERR("VF partially initialized\n");
                return -EINVAL;
        }
@@ -3183,11 +3193,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
 {
        struct bnx2x *bp = netdev_priv(dev);
        int rc, q_logical_state;
-       struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
-       struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+       struct bnx2x_virtf *vf = NULL;
+       struct pf_vf_bulletin_content *bulletin = NULL;
 
-       /* sanity */
-       rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+       /* sanity and init */
+       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
        if (rc)
                return rc;
        if (!is_valid_ether_addr(mac)) {
@@ -3249,11 +3259,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
 {
        struct bnx2x *bp = netdev_priv(dev);
        int rc, q_logical_state;
-       struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
-       struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+       struct bnx2x_virtf *vf = NULL;
+       struct pf_vf_bulletin_content *bulletin = NULL;
 
-       /* sanity */
-       rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+       /* sanity and init */
+       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
        if (rc)
                return rc;
 
index 181edb522450ada0c9ba72e9a736d312594cc183..4559c35eea13b5bf5e989066cf062c94ab38710a 100644 (file)
@@ -2563,8 +2563,8 @@ static int be_close(struct net_device *netdev)
        /* Wait for all pending tx completions to arrive so that
         * all tx skbs are freed.
         */
-       be_tx_compl_clean(adapter);
        netif_tx_disable(netdev);
+       be_tx_compl_clean(adapter);
 
        be_rx_qs_destroy(adapter);
 
index b5eb4195fc9927a8b88493c7b1f783926928dc8c..85e5c97191dd0b1a6e75e5c548c9f96cbe19be04 100644 (file)
@@ -7088,7 +7088,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        RTL_W8(Cfg9346, Cfg9346_Unlock);
        RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
-       RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+       RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
        if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
index 2a469b27a5061641a07a8502ecf63736953070a2..30d744235d276bc0bf10c56d7ea821692a5de811 100644 (file)
@@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
                BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
                BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
                             EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
-               rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
+               rep_index = spec->type - EFX_FILTER_UC_DEF;
                ins_index = rep_index;
 
                spin_lock_bh(&state->lock);
index 51f2bc37610188b44de5b8546062d16142571505..2dcc60fb37f1dee50beea82b878e98248c1688a0 100644 (file)
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
                        pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
                        pci_write_config_byte(pcidev,0x5a,0xc0);
                        WriteLPCReg(0x28, 0x70 );
-                       if (via_ircc_open(pcidev, &info, 0x3076) == 0)
-                               rc=0;
+                       rc = via_ircc_open(pcidev, &info, 0x3076);
                } else
                        rc = -ENODEV; //IR not turn on   
        } else { //Not VT1211
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
                        info.irq=FirIRQ;
                        info.dma=FirDRQ1;
                        info.dma2=FirDRQ0;
-                       if (via_ircc_open(pcidev, &info, 0x3096) == 0)
-                               rc=0;
+                       rc = via_ircc_open(pcidev, &info, 0x3096);
                } else
                        rc = -ENODEV; //IR not turn on !!!!!
        }//Not VT1211
index b51db2abfe442cd95fdbcb97e17aec702ec0d2c5..ea53abb209889ed1032323bfcebd154e68fc1d1d 100644 (file)
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops;
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
                      NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
 /*
  * RCU usage:
  * The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        struct macvtap_queue *q = macvtap_get_queue(dev, skb);
-       netdev_features_t features;
+       netdev_features_t features = TAP_FEATURES;
+
        if (!q)
                goto drop;
 
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
 
        skb->dev = dev;
        /* Apply the forward feature mask so that we perform segmentation
-        * according to users wishes.
+        * according to users wishes.  This only works if VNET_HDR is
+        * enabled.
         */
-       features = netif_skb_features(skb) & vlan->tap_features;
+       if (q->flags & IFF_VNET_HDR)
+               features |= vlan->tap_features;
        if (netif_needs_gso(skb, features)) {
                struct sk_buff *segs = __skb_gso_segment(skb, features, false);
 
@@ -1064,8 +1069,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
        /* tap_features are the same as features on tun/tap and
         * reflect user expectations.
         */
-       vlan->tap_features = vlan->dev->features &
-                           (feature_mask | ~TUN_OFFLOADS);
+       vlan->tap_features = feature_mask;
        vlan->set_features = features;
        netdev_update_features(vlan->dev);
 
@@ -1161,10 +1165,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
                            TUN_F_TSO_ECN | TUN_F_UFO))
                        return -EINVAL;
 
-               /* TODO: only accept frames with the features that
-                        got enabled for forwarded frames */
-               if (!(q->flags & IFF_VNET_HDR))
-                       return  -EINVAL;
                rtnl_lock();
                ret = set_offload(q, arg);
                rtnl_unlock();
index 8e7af8354342c9ce6440e3131aad536fbe385c87..138de837977f1e5762ecb8ae6fecac59ade8c181 100644 (file)
@@ -23,7 +23,7 @@
 #define RTL821x_INER_INIT      0x6400
 #define RTL821x_INSR           0x13
 
-#define        RTL8211E_INER_LINK_STAT 0x10
+#define        RTL8211E_INER_LINK_STATUS       0x400
 
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
                err = phy_write(phydev, RTL821x_INER,
-                               RTL8211E_INER_LINK_STAT);
+                               RTL8211E_INER_LINK_STATUS);
        else
                err = phy_write(phydev, RTL821x_INER, 0);
 
index cba1d46e672e4cde205fbbd36fb79275cd1ecc53..86292e6aaf4955c4412ead6579f74e6848bcd089 100644 (file)
@@ -2816,13 +2816,16 @@ exit:
 static int hso_get_config_data(struct usb_interface *interface)
 {
        struct usb_device *usbdev = interface_to_usbdev(interface);
-       u8 config_data[17];
+       u8 *config_data = kmalloc(17, GFP_KERNEL);
        u32 if_num = interface->altsetting->desc.bInterfaceNumber;
        s32 result;
 
+       if (!config_data)
+               return -ENOMEM;
        if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                            0x86, 0xC0, 0, 0, config_data, 17,
                            USB_CTRL_SET_TIMEOUT) != 0x11) {
+               kfree(config_data);
                return -EIO;
        }
 
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface)
        if (config_data[16] & 0x1)
                result |= HSO_INFO_CRC_BUG;
 
+       kfree(config_data);
        return result;
 }
 
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface,
        struct hso_shared_int *shared_int;
        struct hso_device *tmp_dev = NULL;
 
+       if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
+               dev_err(&interface->dev, "Not our interface\n");
+               return -ENODEV;
+       }
+
        if_num = interface->altsetting->desc.bInterfaceNumber;
 
        /* Get the interface/port specification from either driver_info or from
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface,
        else
                port_spec = hso_get_config_data(interface);
 
-       if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
-               dev_err(&interface->dev, "Not our interface\n");
-               return -ENODEV;
-       }
        /* Check if we need to switch to alt interfaces prior to port
         * configuration */
        if (interface->num_altsetting > 1)
index ac074731335a5ed1b7ef393dfd15ae6c87299d03..e5090309824e53c04d1961c0fd6993e80c5aa61e 100644 (file)
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
 
        data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
 
-       memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+       memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
        data->flags = 1; /* has quality information */
-       memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+       memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
               sizeof(struct iw_quality) * data->length);
 
        kfree(addr);
index 822f1a00efbb7c5f40ed3edf7ca14f11103d49cc..319387263e1234daec62105051b00aa94994a107 100644 (file)
@@ -1068,7 +1068,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
 
-       if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+       if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+               return;
+
+       if (ctx->vif)
                ieee80211_chswitch_done(ctx->vif, is_success);
 }
 
index a70c7b9d9bad897345fb1e1e89d5c421e0d8a3da..ff8cc75c189d4d842abf8611fb8c5e7c7a63bf38 100644 (file)
@@ -97,8 +97,6 @@
 
 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
 
-#define APMG_RTC_INT_STT_RFKILL                (0x10000000)
-
 /* Device system time */
 #define DEVICE_SYSTEM_TIME_REG 0xA0206C
 
index ad9bbca992133cc096ff0b90c5048ae248b635c9..7fd6fbfbc1b387696a7ad05f16e9bd49bf48b350 100644 (file)
@@ -138,6 +138,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
        schedule_work(&mvm->roc_done_wk);
 }
 
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+                                       struct ieee80211_vif *vif,
+                                       const char *errmsg)
+{
+       if (vif->type != NL80211_IFTYPE_STATION)
+               return false;
+       if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+               return false;
+       if (errmsg)
+               IWL_ERR(mvm, "%s\n", errmsg);
+       ieee80211_connection_loss(vif);
+       return true;
+}
+
 /*
  * Handles a FW notification for an event that is known to the driver.
  *
@@ -163,8 +177,13 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
         * P2P Device discoveribility, while there are other higher priority
         * events in the system).
         */
-       WARN_ONCE(!le32_to_cpu(notif->status),
-                 "Failed to schedule time event\n");
+       if (WARN_ONCE(!le32_to_cpu(notif->status),
+                     "Failed to schedule time event\n")) {
+               if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+                       iwl_mvm_te_clear_data(mvm, te_data);
+                       return;
+               }
+       }
 
        if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
                IWL_DEBUG_TE(mvm,
@@ -180,14 +199,8 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
                 * By now, we should have finished association
                 * and know the dtim period.
                 */
-               if (te_data->vif->type == NL80211_IFTYPE_STATION &&
-                   (!te_data->vif->bss_conf.assoc ||
-                    !te_data->vif->bss_conf.dtim_period)) {
-                       IWL_ERR(mvm,
-                               "No assocation and the time event is over already...\n");
-                       ieee80211_connection_loss(te_data->vif);
-               }
-
+               iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+                       "No assocation and the time event is over already...");
                iwl_mvm_te_clear_data(mvm, te_data);
        } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
                te_data->running = true;
index f600e68a410a1abe454170bd65d29b5937df41a9..fd848cd1583ebeb3372792911849a0eec25d9667 100644 (file)
@@ -888,14 +888,6 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
 
                iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
                if (hw_rfkill) {
-                       /*
-                        * Clear the interrupt in APMG if the NIC is going down.
-                        * Note that when the NIC exits RFkill (else branch), we
-                        * can't access prph and the NIC will be reset in
-                        * start_hw anyway.
-                        */
-                       iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
-                                      APMG_RTC_INT_STT_RFKILL);
                        set_bit(STATUS_RFKILL, &trans_pcie->status);
                        if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
                                               &trans_pcie->status))
index 96cfcdd390794060ee6c72c3951984f33da5ff38..390e2f058aff12c48e5f349e2c00730848e30b72 100644 (file)
@@ -1502,16 +1502,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->reg_lock);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
-       /* W/A - seems to solve weird behavior. We need to remove this if we
-        * don't want to stay in L1 all the time. This wastes a lot of power */
-       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
-                              PCIE_LINK_STATE_CLKPM);
-
        if (pci_enable_device(pdev)) {
                err = -ENODEV;
                goto out_no_pci;
        }
 
+       /* W/A - seems to solve weird behavior. We need to remove this if we
+        * don't want to stay in L1 all the time. This wastes a lot of power */
+       pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+                              PCIE_LINK_STATE_CLKPM);
+
        pci_set_master(pdev);
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
index 4941f201d6c8dc5a4b62446a9153a4384a3cb4f4..b8ba1f925e75521a2886b42a8a3d95dac69c0aa9 100644 (file)
@@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
                goto exit;
 
        err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
-           USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+           USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
        if (err < 0)
                goto exit;
 
+       memcpy(&ret, buf, sizeof(ret));
+
        if (ret & 0x80) {
                err = -EIO;
                goto exit;
index 6bb7cf2de556b559d1f54f9d1c7c3ff297138a3a..b10ba00cc3e6d00f476fc99e1f35daa26f5ae2e6 100644 (file)
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
        mem = (unsigned long)
                dt_alloc(size + 4, __alignof__(struct device_node));
 
+       memset((void *)mem, 0, size);
+
        ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
 
        pr_debug("  unflattening %lx...\n", mem);
index c47fd1e5450ba6f26ba8e70f2ad207efc1959a82..94716c779800ea099bfd580c627a93345af7d1db 100644 (file)
@@ -278,6 +278,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
 {
        struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
        struct sunxi_pinctrl_group *g = &pctl->groups[group];
+       unsigned long flags;
        u32 val, mask;
        u16 strength;
        u8 dlevel;
@@ -295,22 +296,35 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
                 *   3: 40mA
                 */
                dlevel = strength / 10 - 1;
+
+               spin_lock_irqsave(&pctl->lock, flags);
+
                val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
                mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
                writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
                        pctl->membase + sunxi_dlevel_reg(g->pin));
+
+               spin_unlock_irqrestore(&pctl->lock, flags);
                break;
        case PIN_CONFIG_BIAS_PULL_UP:
+               spin_lock_irqsave(&pctl->lock, flags);
+
                val = readl(pctl->membase + sunxi_pull_reg(g->pin));
                mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
                writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
                        pctl->membase + sunxi_pull_reg(g->pin));
+
+               spin_unlock_irqrestore(&pctl->lock, flags);
                break;
        case PIN_CONFIG_BIAS_PULL_DOWN:
+               spin_lock_irqsave(&pctl->lock, flags);
+
                val = readl(pctl->membase + sunxi_pull_reg(g->pin));
                mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
                writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
                        pctl->membase + sunxi_pull_reg(g->pin));
+
+               spin_unlock_irqrestore(&pctl->lock, flags);
                break;
        default:
                break;
@@ -360,11 +374,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
                                 u8 config)
 {
        struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+       unsigned long flags;
+       u32 val, mask;
+
+       spin_lock_irqsave(&pctl->lock, flags);
 
-       u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
-       u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
+       val = readl(pctl->membase + sunxi_mux_reg(pin));
+       mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
        writel((val & ~mask) | config << sunxi_mux_offset(pin),
                pctl->membase + sunxi_mux_reg(pin));
+
+       spin_unlock_irqrestore(&pctl->lock, flags);
 }
 
 static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
@@ -464,8 +484,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
        struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
        u32 reg = sunxi_data_reg(offset);
        u8 index = sunxi_data_offset(offset);
+       unsigned long flags;
+       u32 regval;
+
+       spin_lock_irqsave(&pctl->lock, flags);
+
+       regval = readl(pctl->membase + reg);
 
-       writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
+       if (value)
+               regval |= BIT(index);
+       else
+               regval &= ~(BIT(index));
+
+       writel(regval, pctl->membase + reg);
+
+       spin_unlock_irqrestore(&pctl->lock, flags);
 }
 
 static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -526,6 +559,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
        struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
        u32 reg = sunxi_irq_cfg_reg(d->hwirq);
        u8 index = sunxi_irq_cfg_offset(d->hwirq);
+       unsigned long flags;
+       u32 regval;
        u8 mode;
 
        switch (type) {
@@ -548,7 +583,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
                return -EINVAL;
        }
 
-       writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg);
+       spin_lock_irqsave(&pctl->lock, flags);
+
+       regval = readl(pctl->membase + reg);
+       regval &= ~IRQ_CFG_IRQ_MASK;
+       writel(regval | (mode << index), pctl->membase + reg);
+
+       spin_unlock_irqrestore(&pctl->lock, flags);
 
        return 0;
 }
@@ -560,14 +601,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d)
        u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
        u32 status_reg = sunxi_irq_status_reg(d->hwirq);
        u8 status_idx = sunxi_irq_status_offset(d->hwirq);
+       unsigned long flags;
        u32 val;
 
+       spin_lock_irqsave(&pctl->lock, flags);
+
        /* Mask the IRQ */
        val = readl(pctl->membase + ctrl_reg);
        writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
 
        /* Clear the IRQ */
        writel(1 << status_idx, pctl->membase + status_reg);
+
+       spin_unlock_irqrestore(&pctl->lock, flags);
 }
 
 static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -575,11 +621,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
        struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
        u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
        u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+       unsigned long flags;
        u32 val;
 
+       spin_lock_irqsave(&pctl->lock, flags);
+
        /* Mask the IRQ */
        val = readl(pctl->membase + reg);
        writel(val & ~(1 << idx), pctl->membase + reg);
+
+       spin_unlock_irqrestore(&pctl->lock, flags);
 }
 
 static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
@@ -588,6 +639,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
        struct sunxi_desc_function *func;
        u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
        u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+       unsigned long flags;
        u32 val;
 
        func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
@@ -597,9 +649,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
        /* Change muxing to INT mode */
        sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
 
+       spin_lock_irqsave(&pctl->lock, flags);
+
        /* Unmask the IRQ */
        val = readl(pctl->membase + reg);
        writel(val | (1 << idx), pctl->membase + reg);
+
+       spin_unlock_irqrestore(&pctl->lock, flags);
 }
 
 static struct irq_chip sunxi_pinctrl_irq_chip = {
@@ -752,6 +808,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
                return -ENOMEM;
        platform_set_drvdata(pdev, pctl);
 
+       spin_lock_init(&pctl->lock);
+
        pctl->membase = of_iomap(node, 0);
        if (!pctl->membase)
                return -ENOMEM;
index d68047d8f6992e709c250d39dab4cf1497102eea..01c494f8a14f0119493d783624b86831549ccdb0 100644 (file)
@@ -14,6 +14,7 @@
 #define __PINCTRL_SUNXI_H
 
 #include <linux/kernel.h>
+#include <linux/spinlock.h>
 
 #define PA_BASE        0
 #define PB_BASE        32
@@ -407,6 +408,7 @@ struct sunxi_pinctrl {
        unsigned                        ngroups;
        int                             irq;
        int                             irq_array[SUNXI_IRQ_NUMBER];
+       spinlock_t                      lock;
        struct pinctrl_dev              *pctl_dev;
 };
 
index 0f9f8596b300a0d06d525547159bf4d038ba1505..f9119525f5570c2d041d563c0eefbc9a01e32c20 100644 (file)
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
        return platform_driver_register(&olpc_ec_plat_driver);
 }
 
-module_init(olpc_ec_init_module);
+arch_initcall(olpc_ec_init_module);
 
 MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
 MODULE_LICENSE("GPL");
index 97bb05edcb5a806d85e9930022a77530735b2336..d6970f47ae72f639d648c924a27bea8b97cbdfb6 100644 (file)
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
 #define HPWMI_ALS_QUERY 0x3
 #define HPWMI_HARDWARE_QUERY 0x4
 #define HPWMI_WIRELESS_QUERY 0x5
-#define HPWMI_BIOS_QUERY 0x9
 #define HPWMI_HOTKEY_QUERY 0xc
 #define HPWMI_WIRELESS2_QUERY 0x1b
 #define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void)
        return (state & 0x4) ? 1 : 0;
 }
 
-static int hp_wmi_enable_hotkeys(void)
-{
-       int ret;
-       int query = 0x6e;
-
-       ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
-                                  0);
-
-       if (ret)
-               return -EINVAL;
-       return 0;
-}
-
 static int hp_wmi_set_block(void *data, bool blocked)
 {
        enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void)
                err = hp_wmi_input_setup();
                if (err)
                        return err;
-
-               hp_wmi_enable_hotkeys();
        }
 
        if (bios_capable) {
index 2ac045f27f10112aa467b867a9b97a9a1790c189..3a1b6bf326a814d8453a1cdb2e3d699834f7e87a 100644 (file)
@@ -2440,7 +2440,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
        if (pos < 0)
                return pos;
 
-       return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+       return snprintf(buffer, PAGE_SIZE, "%s\n",
+                                       pos == SPEED ? "speed" :
+                                       pos == STAMINA ? "stamina" :
+                                       pos == AUTO ? "auto" : "unknown");
 }
 
 static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -4320,7 +4323,8 @@ static int sony_pic_add(struct acpi_device *device)
                goto err_free_resources;
        }
 
-       if (sonypi_compat_init())
+       result = sonypi_compat_init();
+       if (result)
                goto err_remove_input;
 
        /* request io port */
index 1d4c8fe72752899acb408119bc435b959277d731..c82fe65c41286ae90af5d9d12452df32202e5504 100644 (file)
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
 
        if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
                zfcp_erp_action_dismiss(&port->erp_action);
-       else
-               shost_for_each_device(sdev, port->adapter->scsi_host)
+       else {
+               spin_lock(port->adapter->scsi_host->host_lock);
+               __shost_for_each_device(sdev, port->adapter->scsi_host)
                        if (sdev_to_zfcp(sdev)->port == port)
                                zfcp_erp_action_dismiss_lun(sdev);
+               spin_unlock(port->adapter->scsi_host->host_lock);
+       }
 }
 
 static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
 {
        struct scsi_device *sdev;
 
-       shost_for_each_device(sdev, port->adapter->scsi_host)
+       spin_lock(port->adapter->scsi_host->host_lock);
+       __shost_for_each_device(sdev, port->adapter->scsi_host)
                if (sdev_to_zfcp(sdev)->port == port)
                        _zfcp_erp_lun_reopen(sdev, clear, id, 0);
+       spin_unlock(port->adapter->scsi_host->host_lock);
 }
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
                atomic_set_mask(common_mask, &port->status);
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
-       shost_for_each_device(sdev, adapter->scsi_host)
+       spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+       __shost_for_each_device(sdev, adapter->scsi_host)
                atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+       spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
 }
 
 /**
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
-       shost_for_each_device(sdev, adapter->scsi_host) {
+       spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+       __shost_for_each_device(sdev, adapter->scsi_host) {
                atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
                if (clear_counter)
                        atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
        }
+       spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
 }
 
 /**
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
 {
        struct scsi_device *sdev;
        u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+       unsigned long flags;
 
        atomic_set_mask(mask, &port->status);
 
        if (!common_mask)
                return;
 
-       shost_for_each_device(sdev, port->adapter->scsi_host)
+       spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+       __shost_for_each_device(sdev, port->adapter->scsi_host)
                if (sdev_to_zfcp(sdev)->port == port)
                        atomic_set_mask(common_mask,
                                        &sdev_to_zfcp(sdev)->status);
+       spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
 }
 
 /**
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
        struct scsi_device *sdev;
        u32 common_mask = mask & ZFCP_COMMON_FLAGS;
        u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+       unsigned long flags;
 
        atomic_clear_mask(mask, &port->status);
 
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
        if (clear_counter)
                atomic_set(&port->erp_counter, 0);
 
-       shost_for_each_device(sdev, port->adapter->scsi_host)
+       spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+       __shost_for_each_device(sdev, port->adapter->scsi_host)
                if (sdev_to_zfcp(sdev)->port == port) {
                        atomic_clear_mask(common_mask,
                                          &sdev_to_zfcp(sdev)->status);
                        if (clear_counter)
                                atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
                }
+       spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
 }
 
 /**
index 665e3cfaaf85d5b14859465163a3867073f2c5f5..de0598eaacd226ed2824ff94412069ed855a6420 100644 (file)
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 
 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
 {
-       spin_lock_irq(&qdio->req_q_lock);
        if (atomic_read(&qdio->req_q_free) ||
            !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
                return 1;
-       spin_unlock_irq(&qdio->req_q_lock);
        return 0;
 }
 
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
 {
        long ret;
 
-       spin_unlock_irq(&qdio->req_q_lock);
-       ret = wait_event_interruptible_timeout(qdio->req_q_wq,
-                              zfcp_qdio_sbal_check(qdio), 5 * HZ);
+       ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+                      zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
 
        if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
                return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
                zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
        }
 
-       spin_lock_irq(&qdio->req_q_lock);
        return -EIO;
 }
 
index 3f01bbf0609f6fb854911da5d9c0261271b184be..890639274bcfe83a1895a27b39a00d7d6ebf1ca0 100644 (file)
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,              \
 static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,                                   \
                     zfcp_sysfs_##_feat##_##_name##_show, NULL);
 
+#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value)                 \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev,        \
+                                                  struct device_attribute *at,\
+                                                  char *buf)                  \
+{                                                                             \
+       return sprintf(buf, _format, _value);                                  \
+}                                                                             \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO,                                   \
+                    zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
 #define ZFCP_DEFINE_A_ATTR(_name, _format, _value)                          \
 static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev,        \
                                                 struct device_attribute *at,\
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
                 (zfcp_unit_sdev_status(unit) &
                  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
 
 static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
                                           struct device_attribute *attr,
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
        &dev_attr_unit_in_recovery.attr,
        &dev_attr_unit_status.attr,
        &dev_attr_unit_access_denied.attr,
+       &dev_attr_unit_access_shared.attr,
+       &dev_attr_unit_access_readonly.attr,
        NULL
 };
 static struct attribute_group zfcp_unit_attr_group = {
index 48b2918e0d654b7e5978d0da1063d4e25b5e16a0..92ff027746f2a8bfbe6a8d0f3434d807c9e56708 100644 (file)
@@ -1353,7 +1353,6 @@ config SCSI_LPFC
        tristate "Emulex LightPulse Fibre Channel Support"
        depends on PCI && SCSI
        select SCSI_FC_ATTRS
-       select GENERIC_CSUM
        select CRC_T10DIF
        help
           This lpfc driver supports the Emulex LightPulse
index e25eba5713c153a9fd2e829561eb481b26a8dc47..b3b5125faa728988b1d1efe31e4116df70622cf2 100644 (file)
@@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
                ret = comedi_device_postconfig(dev);
        if (ret < 0) {
                comedi_device_detach(dev);
-               module_put(dev->driver->module);
+               module_put(driv->module);
        }
        /* On success, the driver module count has been incremented. */
        return ret;
index 08613e24189415641bc033d61af814e77b47143b..0f1d193fef02972163794fc611d8ee2f292cdc3a 100644 (file)
@@ -304,6 +304,11 @@ static int __init ohci_pci_init(void)
        pr_info("%s: " DRIVER_DESC "\n", hcd_name);
 
        ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
+
+       /* Entries for the PCI suspend/resume callbacks are special */
+       ohci_pci_hc_driver.pci_suspend = ohci_suspend;
+       ohci_pci_hc_driver.pci_resume = ohci_resume;
+
        return pci_register_driver(&ohci_pci_driver);
 }
 module_init(ohci_pci_init);
index ca266280895dc2f150484f1dbb8873150588175b..e1859b8ef5679dc5c39a367c7e86979a2cfde5ec 100644 (file)
@@ -15,7 +15,7 @@
  * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include "otg_fsm.h"
+#include "phy-fsm-usb.h"
 #include <linux/usb/otg.h>
 #include <linux/ioctl.h>
 
index c520b3548e7cf59cd0058eda330cd2d7ac792197..7f4596606e18b41565ef338598298b6f36c64269 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/usb/gadget.h>
 #include <linux/usb/otg.h>
 
-#include "phy-otg-fsm.h"
+#include "phy-fsm-usb.h"
 
 /* Change USB protocol when there is a protocol change */
 static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
index a58ac435a9a4a03f39274f8b5a41ea659d44eee7..5e8be462aed56da7060792b0abb8d4426e838d06 100644 (file)
@@ -348,7 +348,7 @@ static void init_evtchn_cpu_bindings(void)
 
        for_each_possible_cpu(i)
                memset(per_cpu(cpu_evtchn_mask, i),
-                      (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+                      (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
 }
 
 static inline void clear_evtchn(int port)
@@ -1493,8 +1493,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
 /* Rebind an evtchn so that it gets delivered to a specific cpu */
 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
 {
+       struct shared_info *s = HYPERVISOR_shared_info;
        struct evtchn_bind_vcpu bind_vcpu;
        int evtchn = evtchn_from_irq(irq);
+       int masked;
 
        if (!VALID_EVTCHN(evtchn))
                return -1;
@@ -1510,6 +1512,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
        bind_vcpu.port = evtchn;
        bind_vcpu.vcpu = tcpu;
 
+       /*
+        * Mask the event while changing the VCPU binding to prevent
+        * it being delivered on an unexpected VCPU.
+        */
+       masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+
        /*
         * If this fails, it usually just indicates that we're dealing with a
         * virq or IPI channel, which don't actually need to be rebound. Ignore
@@ -1518,6 +1526,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
        if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
                bind_evtchn_to_cpu(evtchn, tcpu);
 
+       if (!masked)
+               unmask_evtchn(evtchn);
+
        return 0;
 }
 
index 5e376bb934196d2b8a0e104473c637439e5e2244..8defc6b3f9a21b6e4971f9576a0fc202f7ba2e85 100644 (file)
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
        int block, off;
 
        inode = iget_locked(sb, ino);
-       if (IS_ERR(inode))
+       if (!inode)
                return ERR_PTR(-ENOMEM);
        if (!(inode->i_state & I_NEW))
                return inode;
index 94bbc04dba77053bb47d3d8b793a3a8218f0a0d2..c5eae7251490e9b48553000f8e582cb6c0b9420f 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
 int bio_uncopy_user(struct bio *bio)
 {
        struct bio_map_data *bmd = bio->bi_private;
-       int ret = 0;
+       struct bio_vec *bvec;
+       int ret = 0, i;
 
-       if (!bio_flagged(bio, BIO_NULL_MAPPED))
-               ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
-                                    bmd->nr_sgvecs, bio_data_dir(bio) == READ,
-                                    0, bmd->is_our_pages);
+       if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+               /*
+                * if we're in a workqueue, the request is orphaned, so
+                * don't copy into a random user address space, just free.
+                */
+               if (current->mm)
+                       ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+                                            bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+                                            0, bmd->is_our_pages);
+               else if (bmd->is_our_pages)
+                       bio_for_each_segment_all(bvec, bio, i)
+                               __free_page(bvec->bv_page);
+       }
        bio_free_map_data(bmd);
        bio_put(bio);
        return ret;
index 87bdb5329c3c90b99d67f4b5bab3ae0b347d2acf..83cfb834db0364b1ffed30d58815f826aaaf55dd 100644 (file)
@@ -2724,6 +2724,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
        return memcpy(buffer, temp, sz);
 }
 
+char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+       char *end = buffer + buflen;
+       /* these dentries are never renamed, so d_lock is not needed */
+       if (prepend(&end, &buflen, " (deleted)", 11) ||
+           prepend_name(&end, &buflen, &dentry->d_name) ||
+           prepend(&end, &buflen, "/", 1))  
+               end = ERR_PTR(-ENAMETOOLONG);
+       return end;  
+}
+
 /*
  * Write full pathname from the root of the filesystem into the buffer.
  */
index f3913eb2c47482739681f51551cd6a325e6fc40d..d15ccf20f1b37a9734c338941106fdc0982e0ca4 100644 (file)
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
        struct inode *inode;
 
        inode = iget_locked(super, ino);
-       if (IS_ERR(inode))
+       if (!inode)
                return ERR_PTR(-ENOMEM);
        if (!(inode->i_state & I_NEW))
                return inode;
index 9435384562a271cacd5219651269b36d5d2923c4..544a809819c3ee5c16ddaa42a8ce0ce26d2194f4 100644 (file)
@@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void)
 
        glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
                                          WQ_HIGHPRI | WQ_FREEZABLE, 0);
-       if (IS_ERR(glock_workqueue))
-               return PTR_ERR(glock_workqueue);
+       if (!glock_workqueue)
+               return -ENOMEM;
        gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
                                                WQ_MEM_RECLAIM | WQ_FREEZABLE,
                                                0);
-       if (IS_ERR(gfs2_delete_workqueue)) {
+       if (!gfs2_delete_workqueue) {
                destroy_workqueue(glock_workqueue);
-               return PTR_ERR(gfs2_delete_workqueue);
+               return -ENOMEM;
        }
 
        register_shrinker(&glock_shrinker);
index 5f2e5224c51c9ae79e34a1eb0f405a7b304cd0be..e2e0a90396e7823da9aa47c44a727d5e75751cc6 100644 (file)
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
  * None of the buffers should be dirty, locked, or pinned.
  */
 
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+                            unsigned int nr_revokes)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 
        gfs2_log_lock(sdp);
        spin_lock(&sdp->sd_ail_lock);
-       list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+       list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+               if (nr_revokes == 0)
+                       break;
                bh = bd->bd_bh;
                if (bh->b_state & b_state) {
                        if (fsync)
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
                        gfs2_ail_error(gl, bh);
                }
                gfs2_trans_add_revoke(sdp, bd);
+               nr_revokes--;
        }
        GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
        spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
        WARN_ON_ONCE(current->journal_info);
        current->journal_info = &tr;
 
-       __gfs2_ail_flush(gl, 0);
+       __gfs2_ail_flush(gl, 0, tr.tr_revokes);
 
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
 {
        struct gfs2_sbd *sdp = gl->gl_sbd;
        unsigned int revokes = atomic_read(&gl->gl_ail_count);
+       unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
        int ret;
 
        if (!revokes)
                return;
 
-       ret = gfs2_trans_begin(sdp, 0, revokes);
+       while (revokes > max_revokes)
+               max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+       ret = gfs2_trans_begin(sdp, 0, max_revokes);
        if (ret)
                return;
-       __gfs2_ail_flush(gl, fsync);
+       __gfs2_ail_flush(gl, fsync, max_revokes);
        gfs2_trans_end(sdp);
        gfs2_log_flush(sdp, NULL);
 }
index bbb2715171cd0c983770cf86b4b57ed69e04c98d..64915eeae5a7112f59256185a00a1cc9f3b2a193 100644 (file)
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
                }
                gfs2_glock_dq_uninit(ghs);
                if (IS_ERR(d))
-                       return PTR_RET(d);
+                       return PTR_ERR(d);
                return error;
        } else if (error != -ENOENT) {
                goto fail_gunlock;
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
        struct gfs2_holder gh;
        int ret;
 
+       /* For selinux during lookup */
+       if (gfs2_glock_is_locked_by_me(ip->i_gl))
+               return generic_getxattr(dentry, name, data, size);
+
        gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
        ret = gfs2_glock_nq(&gh);
        if (ret == 0) {
index e04d0e09ee7b59d5959d69e5df872a5871e9ae64..7b0f5043cf24c253612451787588d638da5483ce 100644 (file)
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void)
                goto fail_wq;
 
        gfs2_control_wq = alloc_workqueue("gfs2_control",
-                              WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
+                                         WQ_UNBOUND | WQ_FREEZABLE, 0);
        if (!gfs2_control_wq)
                goto fail_recovery;
 
index 34423978b17083d73b038d360d72b47dc08c6693..d19b30ababf10b115175f3b9814e10130c7c52ed 100644 (file)
@@ -926,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
        return h - hstates;
 }
 
-static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
-{
-       return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
-                               dentry->d_name.name);
-}
-
 static struct dentry_operations anon_ops = {
-       .d_dname = hugetlb_dname
+       .d_dname = simple_dname
 };
 
 /*
index 7b1ca9ba0b0a70213915f6d7bce688184ab7a56f..a45ba4f267fe6e834909f122dd166cdd608dcce5 100644 (file)
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
                         CL_COPY_ALL | CL_PRIVATE);
        namespace_unlock();
        if (IS_ERR(tree))
-               return NULL;
+               return ERR_CAST(tree);
        return &tree->mnt;
 }
 
index dc9a913784ab94127fba6e4503d12a0708069f41..2d8be51f90dc9257bf74cad77b719d17f781c739 100644 (file)
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
 
        if (err == -EOPNOTSUPP) {
                set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
-               bio_put(bio);
-               /* to be detected by submit_seg_bio() */
+               /* to be detected by nilfs_segbuf_submit_bio() */
        }
 
        if (!uptodate)
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
        bio->bi_private = segbuf;
        bio_get(bio);
        submit_bio(mode, bio);
+       segbuf->sb_nbio++;
        if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
                bio_put(bio);
                err = -EOPNOTSUPP;
                goto failed;
        }
-       segbuf->sb_nbio++;
        bio_put(bio);
 
        wi->bio = NULL;
index 75f2890abbd8ddcc8a674b1c42397cc1456fd33e..0ff80f9b930f7226124e670c2814d7f4618d4532 100644 (file)
@@ -228,8 +228,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
        if (!p)
                return -ENOENT;
 
-       if (!dir_emit_dots(file, ctx))
-               goto out;
        if (!dir_emit_dots(file, ctx))
                goto out;
        files = get_files_struct(p);
index 94441a407337bb02fb77e89fe93dfbbed169f827..737e15615b0490c40d002a315217033f5463d5b3 100644 (file)
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
                de = next;
        } while (de);
        spin_unlock(&proc_subdir_lock);
-       return 0;
+       return 1;
 }
 
 int proc_readdir(struct file *file, struct dir_context *ctx)
index 229e366598daecd4e905e8f51f13efaf0a44e773..e0a790da726d0f710a7585b0a8b6663e9902a0ac 100644 (file)
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
 static int proc_root_readdir(struct file *file, struct dir_context *ctx)
 {
        if (ctx->pos < FIRST_PROCESS_ENTRY) {
-               proc_readdir(file, ctx);
+               int error = proc_readdir(file, ctx);
+               if (unlikely(error <= 0))
+                       return error;
                ctx->pos = FIRST_PROCESS_ENTRY;
        }
 
index b90337c9d468ead13e94f4c6ed110bdf75723755..4a12532da8c4ab9f7783394043e3be76c99db741 100644 (file)
@@ -336,6 +336,7 @@ extern int d_validate(struct dentry *, struct dentry *);
  * helper function for dentry_operations.d_dname() members
  */
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
 
 extern char *__d_path(const struct path *, const struct path *, char *, int);
 extern char *d_absolute_path(const struct path *, char *, int);
index b99cd23f347489ea0a3c2a14d58193c6e1117331..79640e015a86c6eb4a8bab2c0d2ee78eaf241558 100644 (file)
@@ -5,45 +5,13 @@
 
 #include <linux/bitmap.h>
 #include <linux/if.h>
+#include <linux/ip.h>
 #include <linux/netdevice.h>
 #include <linux/rcupdate.h>
 #include <linux/timer.h>
 #include <linux/sysctl.h>
 #include <linux/rtnetlink.h>
 
-enum
-{
-       IPV4_DEVCONF_FORWARDING=1,
-       IPV4_DEVCONF_MC_FORWARDING,
-       IPV4_DEVCONF_PROXY_ARP,
-       IPV4_DEVCONF_ACCEPT_REDIRECTS,
-       IPV4_DEVCONF_SECURE_REDIRECTS,
-       IPV4_DEVCONF_SEND_REDIRECTS,
-       IPV4_DEVCONF_SHARED_MEDIA,
-       IPV4_DEVCONF_RP_FILTER,
-       IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
-       IPV4_DEVCONF_BOOTP_RELAY,
-       IPV4_DEVCONF_LOG_MARTIANS,
-       IPV4_DEVCONF_TAG,
-       IPV4_DEVCONF_ARPFILTER,
-       IPV4_DEVCONF_MEDIUM_ID,
-       IPV4_DEVCONF_NOXFRM,
-       IPV4_DEVCONF_NOPOLICY,
-       IPV4_DEVCONF_FORCE_IGMP_VERSION,
-       IPV4_DEVCONF_ARP_ANNOUNCE,
-       IPV4_DEVCONF_ARP_IGNORE,
-       IPV4_DEVCONF_PROMOTE_SECONDARIES,
-       IPV4_DEVCONF_ARP_ACCEPT,
-       IPV4_DEVCONF_ARP_NOTIFY,
-       IPV4_DEVCONF_ACCEPT_LOCAL,
-       IPV4_DEVCONF_SRC_VMARK,
-       IPV4_DEVCONF_PROXY_ARP_PVLAN,
-       IPV4_DEVCONF_ROUTE_LOCALNET,
-       __IPV4_DEVCONF_MAX
-};
-
-#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
-
 struct ipv4_devconf {
        void    *sysctl;
        int     data[IPV4_DEVCONF_MAX];
index 850e95bc766c8504d4fbc2c592c1327ef5994431..b8b7dc755752d8cfe6337ca92062b196cd19f073 100644 (file)
@@ -101,6 +101,7 @@ struct inet6_skb_parm {
 #define IP6SKB_FORWARDED       2
 #define IP6SKB_REROUTED                4
 #define IP6SKB_ROUTERALERT     8
+#define IP6SKB_FRAGMENTED      16
 };
 
 #define IP6CB(skb)     ((struct inet6_skb_parm*)((skb)->cb))
index fb425aa16c0149fdf35b9fe6a1be3a56577d3ff7..faf4b7c1ad12702db919bd03a448741b7e579789 100644 (file)
@@ -332,6 +332,7 @@ struct mm_struct {
                                unsigned long pgoff, unsigned long flags);
 #endif
        unsigned long mmap_base;                /* base of mmap area */
+       unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
        unsigned long task_size;                /* size of task vm space */
        unsigned long highest_vm_end;           /* highest vma end address */
        pgd_t * pgd;
index 8dfaa2ce2e956e0037c469daed8586cb9a94bc68..0f424698064f5ee2c0a0da036189843c2e8cead4 100644 (file)
@@ -114,6 +114,11 @@ extern const struct raid6_recov_calls raid6_recov_intx1;
 extern const struct raid6_recov_calls raid6_recov_ssse3;
 extern const struct raid6_recov_calls raid6_recov_avx2;
 
+extern const struct raid6_calls raid6_neonx1;
+extern const struct raid6_calls raid6_neonx2;
+extern const struct raid6_calls raid6_neonx4;
+extern const struct raid6_calls raid6_neonx8;
+
 /* Algorithm list */
 extern const struct raid6_calls * const raid6_algos[];
 extern const struct raid6_recov_calls *const raid6_recov_algos[];
index e9995eb5985cd50b28aaae49f528487acf3f018c..078066daffd486d426027e9c97c3a40b8202e950 100644 (file)
@@ -314,7 +314,6 @@ struct nsproxy;
 struct user_namespace;
 
 #ifdef CONFIG_MMU
-extern unsigned long mmap_legacy_base(void);
 extern void arch_pick_mmap_layout(struct mm_struct *mm);
 extern unsigned long
 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
index f487a4750b7f36e97acef1542cb6076d76ce7a59..a67fc1635592fd34717ef559049924b1add707ca 100644 (file)
@@ -811,6 +811,63 @@ do {                                                                       \
        __ret;                                                          \
 })
 
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition,     \
+                                                   lock, ret)          \
+do {                                                                   \
+       DEFINE_WAIT(__wait);                                            \
+                                                                       \
+       for (;;) {                                                      \
+               prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE);      \
+               if (condition)                                          \
+                       break;                                          \
+               if (signal_pending(current)) {                          \
+                       ret = -ERESTARTSYS;                             \
+                       break;                                          \
+               }                                                       \
+               spin_unlock_irq(&lock);                                 \
+               ret = schedule_timeout(ret);                            \
+               spin_lock_irq(&lock);                                   \
+               if (!ret)                                               \
+                       break;                                          \
+       }                                                               \
+       finish_wait(&wq, &__wait);                                      \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ *             The condition is checked under the lock. This is expected
+ *             to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ *       and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+                                                 timeout)              \
+({                                                                     \
+       int __ret = timeout;                                            \
+                                                                       \
+       if (!(condition))                                               \
+               __wait_event_interruptible_lock_irq_timeout(            \
+                                       wq, condition, lock, __ret);    \
+       __ret;                                                          \
+})
+
 
 /*
  * These are the old interfaces to sleep waiting for an event.
index 260f83f16bcfb3e79f2572604fc373c1830e2310..f667248202b6be9aacd3dc51f93eb43c9936a217 100644 (file)
@@ -135,6 +135,8 @@ extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
 extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
                               __be32 mtu);
 extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+                                  u32 mark);
 extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
 
 struct netlink_callback;
index 6cf06bfd841bc5e7c77df6cd32456309ff460d46..2fee45bdec0ab5dbaf50503c00851cf798363c7c 100644 (file)
@@ -133,4 +133,38 @@ struct ip_beet_phdr {
        __u8 reserved;
 };
 
+/* index values for the variables in ipv4_devconf */
+enum
+{
+       IPV4_DEVCONF_FORWARDING=1,
+       IPV4_DEVCONF_MC_FORWARDING,
+       IPV4_DEVCONF_PROXY_ARP,
+       IPV4_DEVCONF_ACCEPT_REDIRECTS,
+       IPV4_DEVCONF_SECURE_REDIRECTS,
+       IPV4_DEVCONF_SEND_REDIRECTS,
+       IPV4_DEVCONF_SHARED_MEDIA,
+       IPV4_DEVCONF_RP_FILTER,
+       IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
+       IPV4_DEVCONF_BOOTP_RELAY,
+       IPV4_DEVCONF_LOG_MARTIANS,
+       IPV4_DEVCONF_TAG,
+       IPV4_DEVCONF_ARPFILTER,
+       IPV4_DEVCONF_MEDIUM_ID,
+       IPV4_DEVCONF_NOXFRM,
+       IPV4_DEVCONF_NOPOLICY,
+       IPV4_DEVCONF_FORCE_IGMP_VERSION,
+       IPV4_DEVCONF_ARP_ANNOUNCE,
+       IPV4_DEVCONF_ARP_IGNORE,
+       IPV4_DEVCONF_PROMOTE_SECONDARIES,
+       IPV4_DEVCONF_ARP_ACCEPT,
+       IPV4_DEVCONF_ARP_NOTIFY,
+       IPV4_DEVCONF_ACCEPT_LOCAL,
+       IPV4_DEVCONF_SRC_VMARK,
+       IPV4_DEVCONF_PROXY_ARP_PVLAN,
+       IPV4_DEVCONF_ROUTE_LOCALNET,
+       __IPV4_DEVCONF_MAX
+};
+
+#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
+
 #endif /* _UAPI_LINUX_IP_H */
index 247084be059030162838199c953ebac6409f7e01..fed81b576f29ad8003c48ed293bb9cd42bd95922 100644 (file)
@@ -955,7 +955,7 @@ config MEMCG_SWAP_ENABLED
          Memory Resource Controller Swap Extension comes with its price in
          a bigger memory consumption. General purpose distribution kernels
          which want to enable the feature but keep it disabled by default
-         and let the user enable it by swapaccount boot command line
+         and let the user enable it by swapaccount=1 boot command line
          parameter should have this option unselected.
          For those who want to have the feature enabled by default should
          select this option (if, for some reason, they need to disable it
index 010a0083c0ae4cfa222e2d51f2e951893bac28c4..ea1966db34f2842d859045e2b2d8c7f20ebd196e 100644 (file)
@@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 
        /*
         * Cpusets with tasks - existing or newly being attached - can't
-        * have empty cpus_allowed or mems_allowed.
+        * be changed to have empty cpus_allowed or mems_allowed.
         */
        ret = -ENOSPC;
-       if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
-           (cpumask_empty(trial->cpus_allowed) &&
-            nodes_empty(trial->mems_allowed)))
-               goto out;
+       if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
+               if (!cpumask_empty(cur->cpus_allowed) &&
+                   cpumask_empty(trial->cpus_allowed))
+                       goto out;
+               if (!nodes_empty(cur->mems_allowed) &&
+                   nodes_empty(trial->mems_allowed))
+                       goto out;
+       }
 
        ret = 0;
 out:
index a326f27d7f09e0942ae2df9eb010d8caaabdb725..0b479a6a22bb8fe30e2b9d6c76c2ddb1d5646ae1 100644 (file)
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
        BUG_ON(bits > 32);
        WARN_ON(!irqs_disabled());
        read_sched_clock = read;
-       sched_clock_mask = (1 << bits) - 1;
+       sched_clock_mask = (1ULL << bits) - 1;
        cd.rate = rate;
 
        /* calculate the mult/shift to convert counter ticks to ns. */
index e77edc97e036b4e8216ae8b54fe0e6cafaa2fe71..e8a1516cc0a36d3c247d2bd4a18ef6d69f17b42c 100644 (file)
@@ -182,7 +182,8 @@ static bool can_stop_full_tick(void)
                 * Don't allow the user to think they can get
                 * full NO_HZ with this machine.
                 */
-               WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
+               WARN_ONCE(have_nohz_full_mask,
+                         "NO_HZ FULL will not work with unstable sched clock");
                return false;
        }
 #endif
@@ -343,8 +344,6 @@ static int tick_nohz_init_all(void)
 
 void __init tick_nohz_init(void)
 {
-       int cpu;
-
        if (!have_nohz_full_mask) {
                if (tick_nohz_init_all() < 0)
                        return;
index dec68bd4e9d8e996404faa065b67354f1defa8eb..d550920e040c4515c7c865bb6ba2ab884f7ef7bd 100644 (file)
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
 
 /**
  * wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @word: The word being waited on, a kernel virtual address
- * @bit: The bit of the word being waited on
+ * @p: The atomic_t being waited on, a kernel virtual address
  *
  * Wake up anyone waiting for the atomic_t to go to zero.
  *
index fd94058bd7f9496fe69699b85177e1c18e4af2e7..28321d8f75eff530ca6832f67f0418cf0abad936 100644 (file)
@@ -437,7 +437,7 @@ int lz4_compress(const unsigned char *src, size_t src_len,
 exit:
        return ret;
 }
-EXPORT_SYMBOL_GPL(lz4_compress);
+EXPORT_SYMBOL(lz4_compress);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("LZ4 compressor");
index d3414eae73a1dfcbbe520e116385ae7c44cb7b44..411be80ddb46942242fb11013844219f8a5bf39c 100644 (file)
@@ -299,7 +299,7 @@ exit_0:
        return ret;
 }
 #ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress);
+EXPORT_SYMBOL(lz4_decompress);
 #endif
 
 int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
@@ -319,8 +319,8 @@ exit_0:
        return ret;
 }
 #ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("LZ4 Decompressor");
 #endif
index eb1a74f5e36828ca3617e721daa06ae43ed658c6..f344f76b6559620bf7ae3bdaaeb6ab25265a344e 100644 (file)
@@ -533,7 +533,7 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
 exit:
        return ret;
 }
-EXPORT_SYMBOL_GPL(lz4hc_compress);
+EXPORT_SYMBOL(lz4hc_compress);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("LZ4HC compressor");
index 162becacf97c3650eb39e54b21a0139a9679a764..0a7e494b2bcdab875d7aebdef5ca7346972053b9 100644 (file)
@@ -2,3 +2,4 @@ mktables
 altivec*.c
 int*.c
 tables.c
+neon?.c
index 9f7c184725d7329d25fe35f1a0c3ae701243b753..b4625787c7eeb462d2ba220a5bf497fb522cf32c 100644 (file)
@@ -5,6 +5,7 @@ raid6_pq-y      += algos.o recov.o tables.o int1.o int2.o int4.o \
 
 raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
 raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
+raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
 
 hostprogs-y    += mktables
 
@@ -16,6 +17,21 @@ ifeq ($(CONFIG_ALTIVEC),y)
 altivec_flags := -maltivec -mabi=altivec
 endif
 
+# The GCC option -ffreestanding is required in order to compile code containing
+# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel)
+ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
+NEON_FLAGS := -ffreestanding
+ifeq ($(ARCH),arm)
+NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
+endif
+ifeq ($(ARCH),arm64)
+CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
+endif
+endif
+
 targets += int1.c
 $(obj)/int1.c:   UNROLL := 1
 $(obj)/int1.c:   $(src)/int.uc $(src)/unroll.awk FORCE
@@ -70,6 +86,30 @@ $(obj)/altivec8.c:   UNROLL := 8
 $(obj)/altivec8.c:   $(src)/altivec.uc $(src)/unroll.awk FORCE
        $(call if_changed,unroll)
 
+CFLAGS_neon1.o += $(NEON_FLAGS)
+targets += neon1.c
+$(obj)/neon1.c:   UNROLL := 1
+$(obj)/neon1.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
+CFLAGS_neon2.o += $(NEON_FLAGS)
+targets += neon2.c
+$(obj)/neon2.c:   UNROLL := 2
+$(obj)/neon2.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
+CFLAGS_neon4.o += $(NEON_FLAGS)
+targets += neon4.c
+$(obj)/neon4.c:   UNROLL := 4
+$(obj)/neon4.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
+CFLAGS_neon8.o += $(NEON_FLAGS)
+targets += neon8.c
+$(obj)/neon8.c:   UNROLL := 8
+$(obj)/neon8.c:   $(src)/neon.uc $(src)/unroll.awk FORCE
+       $(call if_changed,unroll)
+
 quiet_cmd_mktable = TABLE   $@
       cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
 
index 6d7316fe9f30207d83bfe6f086ca98984040207d..74e6f5629dbc793464f402087818df6fea11ee1a 100644 (file)
@@ -70,6 +70,12 @@ const struct raid6_calls * const raid6_algos[] = {
        &raid6_intx2,
        &raid6_intx4,
        &raid6_intx8,
+#ifdef CONFIG_KERNEL_MODE_NEON
+       &raid6_neonx1,
+       &raid6_neonx2,
+       &raid6_neonx4,
+       &raid6_neonx8,
+#endif
        NULL
 };
 
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
new file mode 100644 (file)
index 0000000..36ad470
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/raid/pq.h>
+
+#ifdef __KERNEL__
+#include <asm/neon.h>
+#else
+#define kernel_neon_begin()
+#define kernel_neon_end()
+#define cpu_has_neon()         (1)
+#endif
+
+/*
+ * There are 2 reasons these wrappers are kept in a separate compilation unit
+ * from the actual implementations in neonN.c (generated from neon.uc by
+ * unroll.awk):
+ * - the actual implementations use NEON intrinsics, and the GCC support header
+ *   (arm_neon.h) is not fully compatible (type wise) with the kernel;
+ * - the neonN.c files are compiled with -mfpu=neon and optimization enabled,
+ *   and we have to make sure that we never use *any* NEON/VFP instructions
+ *   outside a kernel_neon_begin()/kernel_neon_end() pair.
+ */
+
+#define RAID6_NEON_WRAPPER(_n)                                         \
+       static void raid6_neon ## _n ## _gen_syndrome(int disks,        \
+                                       size_t bytes, void **ptrs)      \
+       {                                                               \
+               void raid6_neon ## _n  ## _gen_syndrome_real(int,       \
+                                               unsigned long, void**); \
+               kernel_neon_begin();                                    \
+               raid6_neon ## _n ## _gen_syndrome_real(disks,           \
+                                       (unsigned long)bytes, ptrs);    \
+               kernel_neon_end();                                      \
+       }                                                               \
+       struct raid6_calls const raid6_neonx ## _n = {                  \
+               raid6_neon ## _n ## _gen_syndrome,                      \
+               raid6_have_neon,                                        \
+               "neonx" #_n,                                            \
+               0                                                       \
+       }
+
+static int raid6_have_neon(void)
+{
+       return cpu_has_neon();
+}
+
+RAID6_NEON_WRAPPER(1);
+RAID6_NEON_WRAPPER(2);
+RAID6_NEON_WRAPPER(4);
+RAID6_NEON_WRAPPER(8);
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
new file mode 100644 (file)
index 0000000..1b9ed79
--- /dev/null
@@ -0,0 +1,80 @@
+/* -----------------------------------------------------------------------
+ *
+ *   neon.uc - RAID-6 syndrome calculation using ARM NEON instructions
+ *
+ *   Copyright (C) 2012 Rob Herring
+ *
+ *   Based on altivec.uc:
+ *     Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ *   Boston MA 02111-1307, USA; either version 2 of the License, or
+ *   (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * neon$#.c
+ *
+ * $#-way unrolled NEON intrinsics math RAID-6 instruction set
+ *
+ * This file is postprocessed using unroll.awk
+ */
+
+#include <arm_neon.h>
+
+typedef uint8x16_t unative_t;
+
+#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
+#define NSIZE  sizeof(unative_t)
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline unative_t SHLBYTE(unative_t v)
+{
+       return vshlq_n_u8(v, 1);
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline unative_t MASK(unative_t v)
+{
+       const uint8x16_t temp = NBYTES(0);
+       return (unative_t)vcltq_s8((int8x16_t)v, (int8x16_t)temp);
+}
+
+void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+       uint8_t **dptr = (uint8_t **)ptrs;
+       uint8_t *p, *q;
+       int d, z, z0;
+
+       register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+       const unative_t x1d = NBYTES(0x1d);
+
+       z0 = disks - 3;         /* Highest data disk */
+       p = dptr[z0+1];         /* XOR parity */
+       q = dptr[z0+2];         /* RS syndrome */
+
+       for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+               wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+               for ( z = z0-1 ; z >= 0 ; z-- ) {
+                       wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+                       wp$$ = veorq_u8(wp$$, wd$$);
+                       w2$$ = MASK(wq$$);
+                       w1$$ = SHLBYTE(wq$$);
+
+                       w2$$ = vandq_u8(w2$$, x1d);
+                       w1$$ = veorq_u8(w1$$, w2$$);
+                       wq$$ = veorq_u8(w1$$, wd$$);
+               }
+               vst1q_u8(&p[d+NSIZE*$$], wp$$);
+               vst1q_u8(&q[d+NSIZE*$$], wq$$);
+       }
+}
index 087332dbf8aa164630148626e33c523dfd52dbf7..28afa1a06e033f264d2a9228d654e7fc50b6fea4 100644 (file)
@@ -22,11 +22,23 @@ ifeq ($(ARCH),x86_64)
         IS_X86 = yes
 endif
 
+ifeq ($(ARCH),arm)
+        CFLAGS += -I../../../arch/arm/include -mfpu=neon
+        HAS_NEON = yes
+endif
+ifeq ($(ARCH),arm64)
+        CFLAGS += -I../../../arch/arm64/include
+        HAS_NEON = yes
+endif
+
 ifeq ($(IS_X86),yes)
         OBJS   += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o
         CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" |   \
                     gcc -c -x assembler - >&/dev/null &&       \
                     rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+else ifeq ($(HAS_NEON),yes)
+        OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o
+        CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
 else
         HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\
                          gcc -c -x c - >&/dev/null && \
@@ -55,6 +67,18 @@ raid6.a: $(OBJS)
 raid6test: test.c raid6.a
        $(CC) $(CFLAGS) -o raid6test $^
 
+neon1.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=1 < neon.uc > $@
+
+neon2.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=2 < neon.uc > $@
+
+neon4.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=4 < neon.uc > $@
+
+neon8.c: neon.uc ../unroll.awk
+       $(AWK) ../unroll.awk -vN=8 < neon.uc > $@
+
 altivec1.c: altivec.uc ../unroll.awk
        $(AWK) ../unroll.awk -vN=1 < altivec.uc > $@
 
@@ -89,7 +113,7 @@ tables.c: mktables
        ./mktables > tables.c
 
 clean:
-       rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c tables.c raid6test
+       rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test
 
 spotless: clean
        rm -f *~
index c5792a5d87cede8cf2c5474156c1596f51f5ba61..0878ff7c26a95bcf40e23e6e257ee8b9e1704dac 100644 (file)
@@ -6969,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
 #ifdef CONFIG_MEMCG_SWAP
 static int __init enable_swap_account(char *s)
 {
-       /* consider enabled if no parameter or 1 is given */
        if (!strcmp(s, "1"))
                really_do_swap_account = 1;
        else if (!strcmp(s, "0"))
index 8335dbd3fc358ed1fe9d6521785ecee1dc031313..e43dc555069dbe609cd1afae76e56fc4b38e4951 100644 (file)
@@ -2909,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
 
 /* common code */
 
-static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
-{
-       return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
-                               dentry->d_name.name);
-}
-
 static struct dentry_operations anon_ops = {
-       .d_dname = shmem_dname
+       .d_dname = simple_dname
 };
 
 /**
index 688a0419756bfc6ce2a9f632f6e84a6ed42debcf..857e1b8349ee417e96fd4e6afb840c541245d04c 100644 (file)
@@ -432,12 +432,16 @@ find_router:
 
        switch (packet_type) {
        case BATADV_UNICAST:
-               batadv_unicast_prepare_skb(skb, orig_node);
+               if (!batadv_unicast_prepare_skb(skb, orig_node))
+                       goto out;
+
                header_len = sizeof(struct batadv_unicast_packet);
                break;
        case BATADV_UNICAST_4ADDR:
-               batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
-                                                packet_subtype);
+               if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+                                                     packet_subtype))
+                       goto out;
+
                header_len = sizeof(struct batadv_unicast_4addr_packet);
                break;
        default:
index 60aca9109a508d5a75c5b34befbdca8c7364fe52..ffd5874f25920a94c74f5d97ebf4a0e2aa77f48d 100644 (file)
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
        if (!pv)
                return;
 
-       for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+       for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
                f = __br_fdb_get(br, br->dev->dev_addr, vid);
                if (f && f->is_local && !f->dst)
                        fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                /* VID was specified, so use it. */
                err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
        } else {
-               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+               if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
                        err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
                        goto out;
                }
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                 * specify a VLAN.  To be nice, add/update entry for every
                 * vlan on this port.
                 */
-               for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
                        err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
                        if (err)
                                goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
                err = __br_fdb_delete(p, addr, vid);
        } else {
-               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+               if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
                        err = __br_fdb_delete(p, addr, 0);
                        goto out;
                }
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                 * vlan on this port.
                 */
                err = -ENOENT;
-               for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
                        err &= __br_fdb_delete(p, addr, vid);
                }
        }
index 1fc30abd3a523912376ce01fcae932f3b6b8c746..b9259efa636ef8fe2b79ec25de49a16efa9034db 100644 (file)
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                else
                        pv = br_get_vlan_info(br);
 
-               if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+               if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
                        goto done;
 
                af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                        goto nla_put_failure;
 
                pvid = br_get_pvid(pv);
-               for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+               for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
                        vinfo.vid = vid;
                        vinfo.flags = 0;
                        if (vid == pvid)
index bd58b45f5f901fd4c6a3ad00abe068baeba8d898..9a9ffe7e4019741d75456e3b9afdba21c44785b3 100644 (file)
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
 
        clear_bit(vid, v->vlan_bitmap);
        v->num_vlans--;
-       if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+       if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
                if (v->port_idx)
                        rcu_assign_pointer(v->parent.port->vlan_info, NULL);
                else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
 {
        smp_wmb();
        v->pvid = 0;
-       bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+       bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
        if (v->port_idx)
                rcu_assign_pointer(v->parent.port->vlan_info, NULL);
        else
index 5423223e93c25074f87c92dc60b338fddcb66f3f..b2f6c74861af6d8e1209c65823bef34f806609c2 100644 (file)
@@ -1120,6 +1120,13 @@ new_segment:
                                if (!skb)
                                        goto wait_for_memory;
 
+                               /*
+                                * All packets are restored as if they have
+                                * already been sent.
+                                */
+                               if (tp->repair)
+                                       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
                                /*
                                 * Check whether we can use HW checksum.
                                 */
index da4241c8c7dafe0004a53ed85dfc270cd3be16ba..498ea99194af69eeaab39f05134e4eb646e22c8a 100644 (file)
@@ -1126,12 +1126,10 @@ retry:
        if (ifp->flags & IFA_F_OPTIMISTIC)
                addr_flags |= IFA_F_OPTIMISTIC;
 
-       ift = !max_addresses ||
-             ipv6_count_addresses(idev) < max_addresses ?
-               ipv6_add_addr(idev, &addr, NULL, tmp_plen,
-                             ipv6_addr_scope(&addr), addr_flags,
-                             tmp_valid_lft, tmp_prefered_lft) : NULL;
-       if (IS_ERR_OR_NULL(ift)) {
+       ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
+                           ipv6_addr_scope(&addr), addr_flags,
+                           tmp_valid_lft, tmp_prefered_lft);
+       if (IS_ERR(ift)) {
                in6_ifa_put(ifp);
                in6_dev_put(idev);
                pr_info("%s: retry temporary address regeneration\n", __func__);
index 79aa9652ed86d9cac754366c075800fbbe4aca1b..04d31c2fbef1ede543bbc4942873722f66a9b524 100644 (file)
@@ -1369,8 +1369,10 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
        if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
                return;
 
-       if (!ndopts.nd_opts_rh)
+       if (!ndopts.nd_opts_rh) {
+               ip6_redirect_no_header(skb, dev_net(skb->dev), 0, 0);
                return;
+       }
 
        hdr = (u8 *)ndopts.nd_opts_rh;
        hdr += 8;
index 790d9f4b8b0b21c1d4dd4577ee6a472bd96fd729..1aeb473b2cc695d8d2b0a3696972ec9228455d14 100644 (file)
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
        ipv6_hdr(head)->payload_len = htons(payload_len);
        ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
        IP6CB(head)->nhoff = nhoff;
+       IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
 
        /* Yes, and fold redundant checksum back. 8) */
        if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        struct net *net = dev_net(skb_dst(skb)->dev);
        int evicted;
 
+       if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+               goto fail_hdr;
+
        IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
 
        /* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                                 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
 
                IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+               IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
                return 1;
        }
 
index b70f8979003b5e23c4b1e78e44713ba3d3520de9..8d9a93ed9c5926924b809b5bb68b267c07f3ca61 100644 (file)
@@ -1178,6 +1178,27 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
 }
 EXPORT_SYMBOL_GPL(ip6_redirect);
 
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+                           u32 mark)
+{
+       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.flowi6_oif = oif;
+       fl6.flowi6_mark = mark;
+       fl6.flowi6_flags = 0;
+       fl6.daddr = msg->dest;
+       fl6.saddr = iph->daddr;
+
+       dst = ip6_route_output(net, NULL, &fl6);
+       if (!dst->error)
+               rt6_do_redirect(dst, NULL, skb);
+       dst_release(dst);
+}
+
 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
 {
        ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
index f85f8a2ad6cf002fa438bef15597496897d00483..512718adb0d59df5120e047c69c973556d1c6fb6 100644 (file)
@@ -789,10 +789,6 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
        struct net *net = sock_net(skb->sk);
        int chains_to_skip = cb->args[0];
        int fams_to_skip = cb->args[1];
-       bool need_locking = chains_to_skip || fams_to_skip;
-
-       if (need_locking)
-               genl_lock();
 
        for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
                n = 0;
@@ -814,9 +810,6 @@ errout:
        cb->args[0] = i;
        cb->args[1] = n;
 
-       if (need_locking)
-               genl_unlock();
-
        return skb->len;
 }
 
index 4b66c752eae5d99b2bfe5fa7832a1301d20519ef..75c8bbf598c86b54b6160d88d1d1177ce04426b5 100644 (file)
@@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
 
                if (po->tp_version == TPACKET_V3) {
                        lv = sizeof(struct tpacket_stats_v3);
+                       st.stats3.tp_packets += st.stats3.tp_drops;
                        data = &st.stats3;
                } else {
                        lv = sizeof(struct tpacket_stats);
+                       st.stats1.tp_packets += st.stats1.tp_drops;
                        data = &st.stats1;
                }
 
index 3fcba69817e579244e836b2e2d39a1aab14cc210..5f6e982cdcf4f04fef43872756c44b3c14c4705d 100644 (file)
@@ -2622,8 +2622,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_NEW_KEY);
-       if (IS_ERR(hdr))
-               return PTR_ERR(hdr);
+       if (!hdr)
+               return -ENOBUFS;
 
        cookie.msg = msg;
        cookie.idx = key_idx;
@@ -6507,6 +6507,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                                           NL80211_CMD_TESTMODE);
                struct nlattr *tmdata;
 
+               if (!hdr)
+                       break;
+
                if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
                        genlmsg_cancel(skb, hdr);
                        break;
@@ -6951,9 +6954,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_REMAIN_ON_CHANNEL);
-
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
+       if (!hdr) {
+               err = -ENOBUFS;
                goto free_msg;
        }
 
@@ -7251,9 +7253,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
 
                hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                                     NL80211_CMD_FRAME);
-
-               if (IS_ERR(hdr)) {
-                       err = PTR_ERR(hdr);
+               if (!hdr) {
+                       err = -ENOBUFS;
                        goto free_msg;
                }
        }
@@ -8132,9 +8133,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
 
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_PROBE_CLIENT);
-
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
+       if (!hdr) {
+               err = -ENOBUFS;
                goto free_msg;
        }
 
index 81c8a10d743c04fb76981498b42588f9b821f33f..20e86a95dc4e0ed358485f04208c670297ee6517 100644 (file)
@@ -976,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
                        struct net_device *dev, u16 reason, bool wextev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
-       int err;
+       int err = 0;
 
        ASSERT_WDEV_LOCK(wdev);
 
        kfree(wdev->connect_keys);
        wdev->connect_keys = NULL;
 
-       if (wdev->conn) {
+       if (wdev->conn)
                err = cfg80211_sme_disconnect(wdev, reason);
-       } else if (!rdev->ops->disconnect) {
+       else if (!rdev->ops->disconnect)
                cfg80211_mlme_down(rdev, dev);
-               err = 0;
-       } else {
+       else if (wdev->current_bss)
                err = rdev_disconnect(rdev, dev, reason);
-       }
 
        return err;
 }