]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'iommu/fixes', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'arm/mediatek...
authorJoerg Roedel <jroedel@suse.de>
Fri, 10 Feb 2017 14:13:10 +0000 (15:13 +0100)
committerJoerg Roedel <jroedel@suse.de>
Fri, 10 Feb 2017 14:13:10 +0000 (15:13 +0100)
209 files changed:
MAINTAINERS
Makefile
arch/arm64/crypto/aes-modes.S
arch/powerpc/Kconfig
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/module.h
arch/powerpc/include/asm/stackprotector.h [deleted file]
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/mm/pgtable-radix.c
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/sstate.c
arch/sparc/kernel/traps_64.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/include/asm/microcode.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/hpet.c
arch/x86/kvm/x86.c
arch/x86/platform/efi/efi_64.c
arch/xtensa/kernel/setup.c
crypto/algapi.c
drivers/acpi/arm64/iort.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/base/firmware_class.c
drivers/base/memory.c
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon.c
drivers/bcma/driver_mips.c
drivers/dma/cppi41.c
drivers/dma/pl330.c
drivers/firmware/efi/libstub/fdt.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/nouveau/dispnv04/hw.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_led.h
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hv/ring_buffer.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/health/afe4403.c
drivers/iio/health/afe4404.c
drivers/iio/health/max30100.c
drivers/iio/humidity/dht11.c
drivers/input/rmi4/rmi_driver.c
drivers/input/touchscreen/wm97xx-core.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu-sysfs.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/msm_iommu.h
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu.h
drivers/iommu/of_iommu.c
drivers/mmc/host/sdhci.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/thunder/thunder_xcv.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/mellanox/mlx4/catas.c
drivers/net/ethernet/mellanox/mlx4/intf.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/hyperv/netvsc.c
drivers/net/phy/micrel.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/pci/pcie/aspm.c
drivers/pinctrl/berlin/berlin-bg4ct.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-merrifield.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/fixed.c
drivers/regulator/twl6030-regulator.c
drivers/rtc/Kconfig
drivers/rtc/rtc-jz4740.c
drivers/scsi/virtio_scsi.c
drivers/staging/greybus/timesync_platform.c
drivers/usb/core/quirks.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcserial.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vhost/vhost.c
drivers/virtio/virtio_ring.c
fs/cifs/readdir.c
fs/dax.c
fs/fscache/cookie.c
fs/fscache/netfs.c
fs/fscache/object.c
fs/iomap.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nfsd/vfs.c
include/asm-generic/export.h
include/drm/drmP.h
include/drm/drm_connector.h
include/linux/can/core.h
include/linux/cpuhotplug.h
include/linux/export.h
include/linux/fscache-cache.h
include/linux/hyperv.h
include/linux/intel-iommu.h
include/linux/iommu.h
include/linux/irq.h
include/linux/log2.h
include/linux/memory_hotplug.h
include/linux/module.h
include/linux/netdevice.h
include/linux/of_iommu.h
include/linux/percpu-refcount.h
include/net/ipv6.h
include/uapi/linux/ethtool.h
init/Kconfig
kernel/cgroup.c
kernel/events/core.c
kernel/irq/irqdomain.c
kernel/module.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_kprobe.c
mm/filemap.c
mm/kasan/report.c
mm/memory_hotplug.c
mm/shmem.c
mm/zswap.c
net/can/af_can.c
net/can/af_can.h
net/can/bcm.c
net/can/gw.c
net/can/raw.c
net/ipv4/tcp_output.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
scripts/Makefile.build
scripts/genksyms/genksyms.c
scripts/kallsyms.c
scripts/mod/modpost.c
tools/objtool/arch/x86/decode.c

index 5f10c28b2e158fbcd05f86832062c66121370d73..187b9615e31a85d49fcd10ca96f4f20e19272652 100644 (file)
@@ -10195,7 +10195,6 @@ F:      drivers/media/tuners/qt1010*
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
 M:     QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
 L:     linux-wireless@vger.kernel.org
-L:     ath9k-devel@lists.ath9k.org
 W:     http://wireless.kernel.org/en/users/Drivers/ath9k
 S:     Supported
 F:     drivers/net/wireless/ath/ath9k/
@@ -13066,7 +13065,7 @@ F:      drivers/input/serio/userio.c
 F:     include/uapi/linux/userio.h
 
 VIRTIO CONSOLE DRIVER
-M:     Amit Shah <amit.shah@redhat.com>
+M:     Amit Shah <amit@kernel.org>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
 F:     drivers/char/virtio_console.c
index 96b27a888285c5258dc57f666b2876fcece1a1aa..8e223e081c9d3b28f1b19c9472c1970bc0c19234 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -797,7 +797,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
        KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
        KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
index c53dbeae79f2f5fce8353b169e96ff6c79294aa5..838dad5c209fae0f3a660e79d1f5fef8eb1f0c68 100644 (file)
@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
        cbz             w6, .Lcbcencloop
 
        ld1             {v0.16b}, [x5]                  /* get iv */
-       enc_prepare     w3, x2, x5
+       enc_prepare     w3, x2, x6
 
 .Lcbcencloop:
        ld1             {v1.16b}, [x1], #16             /* get next pt block */
        eor             v0.16b, v0.16b, v1.16b          /* ..and xor with iv */
-       encrypt_block   v0, w3, x2, x5, w6
+       encrypt_block   v0, w3, x2, x6, w7
        st1             {v0.16b}, [x0], #16
        subs            w4, w4, #1
        bne             .Lcbcencloop
+       st1             {v0.16b}, [x5]                  /* return iv */
        ret
 AES_ENDPROC(aes_cbc_encrypt)
 
@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
        cbz             w6, .LcbcdecloopNx
 
        ld1             {v7.16b}, [x5]                  /* get iv */
-       dec_prepare     w3, x2, x5
+       dec_prepare     w3, x2, x6
 
 .LcbcdecloopNx:
 #if INTERLEAVE >= 2
@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
 .Lcbcdecloop:
        ld1             {v1.16b}, [x1], #16             /* get next ct block */
        mov             v0.16b, v1.16b                  /* ...and copy to v0 */
-       decrypt_block   v0, w3, x2, x5, w6
+       decrypt_block   v0, w3, x2, x6, w7
        eor             v0.16b, v0.16b, v7.16b          /* xor with iv => pt */
        mov             v7.16b, v1.16b                  /* ct is next iv */
        st1             {v0.16b}, [x0], #16
@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
        bne             .Lcbcdecloop
 .Lcbcdecout:
        FRAME_POP
+       st1             {v7.16b}, [x5]                  /* return iv */
        ret
 AES_ENDPROC(aes_cbc_decrypt)
 
@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
 
 AES_ENTRY(aes_ctr_encrypt)
        FRAME_PUSH
-       cbnz            w6, .Lctrfirst          /* 1st time around? */
-       umov            x5, v4.d[1]             /* keep swabbed ctr in reg */
-       rev             x5, x5
-#if INTERLEAVE >= 2
-       cmn             w5, w4                  /* 32 bit overflow? */
-       bcs             .Lctrinc
-       add             x5, x5, #1              /* increment BE ctr */
-       b               .LctrincNx
-#else
-       b               .Lctrinc
-#endif
-.Lctrfirst:
+       cbz             w6, .Lctrnotfirst       /* 1st time around? */
        enc_prepare     w3, x2, x6
        ld1             {v4.16b}, [x5]
-       umov            x5, v4.d[1]             /* keep swabbed ctr in reg */
-       rev             x5, x5
+
+.Lctrnotfirst:
+       umov            x8, v4.d[1]             /* keep swabbed ctr in reg */
+       rev             x8, x8
 #if INTERLEAVE >= 2
-       cmn             w5, w4                  /* 32 bit overflow? */
+       cmn             w8, w4                  /* 32 bit overflow? */
        bcs             .Lctrloop
 .LctrloopNx:
        subs            w4, w4, #INTERLEAVE
@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
 #if INTERLEAVE == 2
        mov             v0.8b, v4.8b
        mov             v1.8b, v4.8b
-       rev             x7, x5
-       add             x5, x5, #1
+       rev             x7, x8
+       add             x8, x8, #1
        ins             v0.d[1], x7
-       rev             x7, x5
-       add             x5, x5, #1
+       rev             x7, x8
+       add             x8, x8, #1
        ins             v1.d[1], x7
        ld1             {v2.16b-v3.16b}, [x1], #32      /* get 2 input blocks */
        do_encrypt_block2x
@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
        st1             {v0.16b-v1.16b}, [x0], #32
 #else
        ldr             q8, =0x30000000200000001        /* addends 1,2,3[,0] */
-       dup             v7.4s, w5
+       dup             v7.4s, w8
        mov             v0.16b, v4.16b
        add             v7.4s, v7.4s, v8.4s
        mov             v1.16b, v4.16b
@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
        eor             v2.16b, v7.16b, v2.16b
        eor             v3.16b, v5.16b, v3.16b
        st1             {v0.16b-v3.16b}, [x0], #64
-       add             x5, x5, #INTERLEAVE
+       add             x8, x8, #INTERLEAVE
 #endif
-       cbz             w4, .LctroutNx
-.LctrincNx:
-       rev             x7, x5
+       rev             x7, x8
        ins             v4.d[1], x7
+       cbz             w4, .Lctrout
        b               .LctrloopNx
-.LctroutNx:
-       sub             x5, x5, #1
-       rev             x7, x5
-       ins             v4.d[1], x7
-       b               .Lctrout
 .Lctr1x:
        adds            w4, w4, #INTERLEAVE
        beq             .Lctrout
@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
 .Lctrloop:
        mov             v0.16b, v4.16b
        encrypt_block   v0, w3, x2, x6, w7
+
+       adds            x8, x8, #1              /* increment BE ctr */
+       rev             x7, x8
+       ins             v4.d[1], x7
+       bcs             .Lctrcarry              /* overflow? */
+
+.Lctrcarrydone:
        subs            w4, w4, #1
        bmi             .Lctrhalfblock          /* blocks < 0 means 1/2 block */
        ld1             {v3.16b}, [x1], #16
        eor             v3.16b, v0.16b, v3.16b
        st1             {v3.16b}, [x0], #16
-       beq             .Lctrout
-.Lctrinc:
-       adds            x5, x5, #1              /* increment BE ctr */
-       rev             x7, x5
-       ins             v4.d[1], x7
-       bcc             .Lctrloop               /* no overflow? */
-       umov            x7, v4.d[0]             /* load upper word of ctr  */
-       rev             x7, x7                  /* ... to handle the carry */
-       add             x7, x7, #1
-       rev             x7, x7
-       ins             v4.d[0], x7
-       b               .Lctrloop
+       bne             .Lctrloop
+
+.Lctrout:
+       st1             {v4.16b}, [x5]          /* return next CTR value */
+       FRAME_POP
+       ret
+
 .Lctrhalfblock:
        ld1             {v3.8b}, [x1]
        eor             v3.8b, v0.8b, v3.8b
        st1             {v3.8b}, [x0]
-.Lctrout:
        FRAME_POP
        ret
+
+.Lctrcarry:
+       umov            x7, v4.d[0]             /* load upper word of ctr  */
+       rev             x7, x7                  /* ... to handle the carry */
+       add             x7, x7, #1
+       rev             x7, x7
+       ins             v4.d[0], x7
+       b               .Lctrcarrydone
 AES_ENDPROC(aes_ctr_encrypt)
        .ltorg
 
index a8ee573fe610bd5e2d8191b4dffb05e134a6d3c2..281f4f1fcd1f68ab2fbc613afa3f2597bd090550 100644 (file)
@@ -164,7 +164,6 @@ config PPC
        select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
        select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_KERNEL_GZIP
-       select HAVE_CC_STACKPROTECTOR
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
        bool "Build a relocatable kernel"
        depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
        select NONSTATIC_KERNEL
+       select MODULE_REL_CRCS if MODVERSIONS
        help
          This builds a kernel image that is capable of running at the
          location the kernel is loaded at. For ppc32, there is no any
index b312b152461b0539a22c5939ba7728a38f5d8d32..6e834caa37206a476792823463e81ac4c9617f9c 100644 (file)
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index a34c764ca8dd83435faf75307e30e5149e55de4b..233a7e8cc8e32d6cf0ac904b3f02b2f340e3883b 100644 (file)
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index cc12c61ef315fc6ca5d43233bd54889cd19a01a0..53885512b8d31b12acec28dc3ba6688e57fb9615 100644 (file)
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 #endif
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644 (file)
index 6720190..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-       unsigned long canary;
-
-       /* Try to get a semi random initial value. */
-       get_random_bytes(&canary, sizeof(canary));
-       canary ^= mftb();
-       canary ^= LINUX_VERSION_CODE;
-
-       current->stack_canary = canary;
-       __stack_chk_guard = current->stack_canary;
-}
-
-#endif /* _ASM_STACKPROTECTOR_H */
index 23f8082d7bfad95f4c9fbb8201e3e58c3104928f..f4c2b52e58b36eb44bcb6be2a2428bc0458f7660 100644 (file)
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
index 0601e6a7297c64ea4b2129011d32ae42a662ac07..195a9fc8f81c8fc41fca8b05239948d4f28d54af 100644 (file)
@@ -91,9 +91,6 @@ int main(void)
        DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-       DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
index d88573bdd0907c6682cf03395bc296155cea9124..b94887165a101557c97fd6a53139ae7df67dbbd9 100644 (file)
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
        struct eeh_pe *pe = (struct eeh_pe *)data;
-       bool *clear_sw_state = flag;
+       bool clear_sw_state = *(bool *)flag;
        int i, rc = 1;
 
        for (i = 0; rc && i < 3; i++)
index 5742dbdbee4677924ebf0019b891e43879410131..3841d749a430069f4d4f2705c4199c08609b3757 100644 (file)
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       lwz     r0,TSK_STACK_CANARY(r2)
-       lis     r4,__stack_chk_guard@ha
-       stw     r0,__stack_chk_guard@l(r4)
-#endif
+
        lwz     r0,_CCR(r1)
        mtcrf   0xFF,r0
        /* r3-r12 are destroyed -- Cort */
index bb1807184bad5da5f9b65ce69087184e67534c03..0b0f89685b679745895251b011d0db522d4558e1 100644 (file)
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
        for (end = (void *)vers + size; vers < end; vers++)
                if (vers->name[0] == '.') {
                        memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-                       /* The TOC symbol has no CRC computed. To avoid CRC
-                        * check failing, we must force it to the expected
-                        * value (see CRC check in module.c).
-                        */
-                       if (!strcmp(vers->name, "TOC."))
-                               vers->crc = -(unsigned long)reloc_start;
-#endif
                }
 }
 
index 04885cec24df1413f90121115cf6569e4aa444e6..5dd056df0baaec576431adb4cbe8ab370d8f44c1 100644 (file)
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
index ec47a939cbdd6dd81c6c05ed6707f28e12d9f0ea..ac83eb04a8b871293c53e7bd6ff4d439b89704a9 100644 (file)
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
        cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
+       if (!PHANDLE_VALID(cpu_pkg))
+               return;
+
        prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
        prom.cpu = be32_to_cpu(rval);
 
index cfa53ccc8bafc908e80532a4a64e44ad358dffc7..34f1a0dbc898ee4a28a6adcd41c0ce5d3fe61198 100644 (file)
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                if (!pmdp)
                        return -ENOMEM;
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                }
                pmdp = pmd_offset(pudp, ea);
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                if (!pmd_present(*pmdp)) {
index b84be675e507857e27766b6339e438270aea0ebe..d0317993e9476fd1178a693638d9ede23860171b 100644 (file)
@@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
        __tsb_context_switch(__pa(mm->pgd),
-                            &mm->context.tsb_block[0],
+                            &mm->context.tsb_block[MM_TSB_BASE],
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-                            (mm->context.tsb_block[1].tsb ?
-                             &mm->context.tsb_block[1] :
+                            (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
+                             &mm->context.tsb_block[MM_TSB_HUGE] :
                              NULL)
 #else
                             NULL
 #endif
-                            , __pa(&mm->context.tsb_descr[0]));
+                            , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
 }
 
 void tsb_grow(struct mm_struct *mm,
index 3bebf395252cc63ee3b39996f8a0d0431e7faf37..4d0248aa0928695597161d93f325a49311b43e2c 100644 (file)
@@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
        unsigned long order = get_order(size);
        unsigned long p;
 
-       p = __get_free_pages(GFP_KERNEL, order);
+       p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
        if (!p) {
                prom_printf("SUN4V: Error, cannot allocate queue.\n");
                prom_halt();
index c59af546f522999342361a5babcb64a1dfab3ccd..3caed40235898698751ff0cf3d6ca7d98dc83da6 100644 (file)
@@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
        "Linux powering off";
 static const char rebooting_msg[32] __attribute__((aligned(32))) =
        "Linux rebooting";
-static const char panicing_msg[32] __attribute__((aligned(32))) =
-       "Linux panicing";
+static const char panicking_msg[32] __attribute__((aligned(32))) =
+       "Linux panicking";
 
 static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
 {
@@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
 
 static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
 {
-       do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
+       do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
 
        return NOTIFY_DONE;
 }
index 4bc10e44d1ca32a0acdf69b8492e128e7ef0e600..dfc97a47c9a08a330f31040fe120030ebe8cc098 100644 (file)
@@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
        atomic_inc(&sun4v_resum_oflow_cnt);
 }
 
+/* Given a set of registers, get the virtual addressi that was being accessed
+ * by the faulting instructions at tpc.
+ */
+static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
+{
+       unsigned int insn;
+
+       if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
+               return compute_effective_address(regs, insn,
+                                                (insn >> 25) & 0x1f);
+       }
+       return 0;
+}
+
+/* Attempt to handle non-resumable errors generated from userspace.
+ * Returns true if the signal was handled, false otherwise.
+ */
+bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
+                                 struct sun4v_error_entry *ent) {
+
+       unsigned int attrs = ent->err_attrs;
+
+       if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
+               unsigned long addr = ent->err_raddr;
+               siginfo_t info;
+
+               if (addr == ~(u64)0) {
+                       /* This seems highly unlikely to ever occur */
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
+               } else {
+                       unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
+                                                             PAGE_SIZE);
+
+                       /* Break the unfortunate news. */
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
+                                addr);
+                       pr_emerg("SUN4V NON-RECOVERABLE ERROR:   Claiming %lu ages.\n",
+                                page_cnt);
+
+                       while (page_cnt-- > 0) {
+                               if (pfn_valid(addr >> PAGE_SHIFT))
+                                       get_page(pfn_to_page(addr >> PAGE_SHIFT));
+                               addr += PAGE_SIZE;
+                       }
+               }
+               info.si_signo = SIGKILL;
+               info.si_errno = 0;
+               info.si_trapno = 0;
+               force_sig_info(info.si_signo, &info, current);
+
+               return true;
+       }
+       if (attrs & SUN4V_ERR_ATTRS_PIO) {
+               siginfo_t info;
+
+               info.si_signo = SIGBUS;
+               info.si_code = BUS_ADRERR;
+               info.si_addr = (void __user *)sun4v_get_vaddr(regs);
+               force_sig_info(info.si_signo, &info, current);
+
+               return true;
+       }
+
+       /* Default to doing nothing */
+       return false;
+}
+
 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
  * Log the event, clear the first word of the entry, and die.
  */
@@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
 
        put_cpu();
 
+       if (!(regs->tstate & TSTATE_PRIV) &&
+           sun4v_nonresum_error_user_handled(regs, &local_copy)) {
+               /* DON'T PANIC: This userspace error was handled. */
+               return;
+       }
+
 #ifdef CONFIG_PCI
        /* Check for the special PCI poke sequence. */
        if (pci_poke_in_progress && pci_poke_cpu == cpu) {
index 17c3564d087a48bc24e41417fe6f128b5d7b9f0d..22ef4f72cf320adf510545ce08fd342bc89839d2 100644 (file)
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
-       return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
        /* must be done before validate_group */
        pmu = cpu_to_rapl_pmu(event->cpu);
+       if (!pmu)
+               return -EINVAL;
        event->cpu = pmu->cpu;
        event->pmu_private = pmu;
        event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
        struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
        int target;
 
+       if (!pmu) {
+               pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+               if (!pmu)
+                       return -ENOMEM;
+
+               raw_spin_lock_init(&pmu->lock);
+               INIT_LIST_HEAD(&pmu->active_list);
+               pmu->pmu = &rapl_pmus->pmu;
+               pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+               rapl_hrtimer_init(pmu);
+
+               rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+       }
+
        /*
         * Check if there is an online cpu in the package which collects rapl
         * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
        return 0;
 }
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-       if (pmu)
-               return 0;
-
-       pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-       if (!pmu)
-               return -ENOMEM;
-
-       raw_spin_lock_init(&pmu->lock);
-       INIT_LIST_HEAD(&pmu->active_list);
-       pmu->pmu = &rapl_pmus->pmu;
-       pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-       pmu->cpu = -1;
-       rapl_hrtimer_init(pmu);
-       rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-       return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 {
        u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
        /*
         * Install callbacks. Core will call them for each online cpu.
         */
-
-       ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-                               rapl_cpu_prepare, NULL);
-       if (ret)
-               goto out;
-
        ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
                                "perf/x86/rapl:online",
                                rapl_cpu_online, rapl_cpu_offline);
        if (ret)
-               goto out1;
+               goto out;
 
        ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
        if (ret)
-               goto out2;
+               goto out1;
 
        rapl_advertise();
        return 0;
 
-out2:
-       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
-       cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
        pr_warn("Initialization failed (%d), disabled\n", ret);
        cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 {
        cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
        perf_pmu_unregister(&rapl_pmus->pmu);
        cleanup_rapl_pmus();
 }
index 8c4ccdc3a3f3607ee0af4f4006029df3000e0839..1ab45976474d52597700b291e24b0b0a235dff03 100644 (file)
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
-       return pmu->boxes[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
        pmu->registered = false;
 }
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-       struct intel_uncore_pmu *pmu = type->pmus;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       if (pmu) {
-               pkg = topology_physical_package_id(cpu);
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box)
-                               uncore_box_exit(box);
-               }
-       }
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-       struct intel_uncore_type **types;
-
-       for (types = uncore_msr_uncores; *types; types++)
-               __uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
        int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
        }
 }
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box && atomic_dec_return(&box->refcnt) == 0)
-                               uncore_box_exit(box);
-               }
-       }
-       return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg, ncpus = 1;
-
-       if (first_init) {
-               /*
-                * On init we get the number of online cpus in the package
-                * and set refcount for all of them.
-                */
-               ncpus = cpumask_weight(topology_core_cpumask(cpu));
-       }
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (!box)
-                               continue;
-                       /* The first cpu on a package activates the box */
-                       if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-                               uncore_box_init(box);
-               }
-       }
-
-       return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       if (pmu->boxes[pkg])
-                               continue;
-                       /* First cpu of a package allocates the box */
-                       box = uncore_alloc_box(type, cpu_to_node(cpu));
-                       if (!box)
-                               return -ENOMEM;
-                       box->pmu = pmu;
-                       box->pkgid = pkg;
-                       pmu->boxes[pkg] = box;
-               }
-       }
-       return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
                                   int new_cpu)
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, pkg, target;
 
        /* Check if exiting cpu is used for collecting uncore events */
        if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-               return 0;
-
+               goto unref;
        /* Find a new cpu to collect uncore events */
        target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
        uncore_change_context(uncore_msr_uncores, cpu, target);
        uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+       /* Clear the references */
+       pkg = topology_logical_package_id(cpu);
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (box && atomic_dec_return(&box->refcnt) == 0)
+                               uncore_box_exit(box);
+               }
+       }
        return 0;
 }
 
+static int allocate_boxes(struct intel_uncore_type **types,
+                        unsigned int pkg, unsigned int cpu)
+{
+       struct intel_uncore_box *box, *tmp;
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       LIST_HEAD(allocated);
+       int i;
+
+       /* Try to allocate all required boxes */
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       if (pmu->boxes[pkg])
+                               continue;
+                       box = uncore_alloc_box(type, cpu_to_node(cpu));
+                       if (!box)
+                               goto cleanup;
+                       box->pmu = pmu;
+                       box->pkgid = pkg;
+                       list_add(&box->active_list, &allocated);
+               }
+       }
+       /* Install them in the pmus */
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               box->pmu->boxes[pkg] = box;
+       }
+       return 0;
+
+cleanup:
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               kfree(box);
+       }
+       return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, ret, pkg, target;
+
+       pkg = topology_logical_package_id(cpu);
+       ret = allocate_boxes(types, pkg, cpu);
+       if (ret)
+               return ret;
+
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (!box && atomic_inc_return(&box->refcnt) == 1)
+                               uncore_box_init(box);
+               }
+       }
 
        /*
         * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
        if (cret && pret)
                return -ENODEV;
 
-       /*
-        * Install callbacks. Core will call them for each online cpu.
-        *
-        * The first online cpu of each package allocates and takes
-        * the refcounts for all other online cpus in that package.
-        * If msrs are not enabled no allocation is required and
-        * uncore_cpu_prepare() is not called for each online cpu.
-        */
-       if (!cret) {
-              ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-                                      "perf/x86/intel/uncore:prepare",
-                                      uncore_cpu_prepare, NULL);
-               if (ret)
-                       goto err;
-       } else {
-               cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-                                         "perf/x86/intel/uncore:prepare",
-                                         uncore_cpu_prepare, NULL);
-       }
-       first_init = 1;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-                         "perf/x86/uncore:starting",
-                         uncore_cpu_starting, uncore_cpu_dying);
-       first_init = 0;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-                         "perf/x86/uncore:online",
-                         uncore_event_cpu_online, uncore_event_cpu_offline);
+       /* Install hotplug callbacks to setup the targets for each package */
+       ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+                               "perf/x86/intel/uncore:online",
+                               uncore_event_cpu_online,
+                               uncore_event_cpu_offline);
+       if (ret)
+               goto err;
        return 0;
 
 err:
-       /* Undo box->init_box() */
-       on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
        return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
 
 static void __exit intel_uncore_exit(void)
 {
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
 }
index 38711df3bcb56b6939f2f84af16b920f0409049b..2266f864b7477a3ff88fee26f07df62eb5073ab4 100644 (file)
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 static inline int __init microcode_init(void)                  { return 0; };
 static inline void __init load_ucode_bsp(void)                 { }
index 1e35dd06b090ee91189cb5a52fdf026f2ca5e74b..52f352b063fdf372bddd62d5887296fa43661931 100644 (file)
@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
                        if (idx != -1 && irq_trigger(idx))
                                unmask_ioapic_irq(irq_get_chip_data(0));
                }
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                if (timer_irq_works()) {
                        if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
                 * legacy devices should be connected to IO APIC #0
                 */
                replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                legacy_pic->unmask(0);
                if (timer_irq_works()) {
index 00ef43233e034b0cde9b2adc88b8003ddc42d00b..537c6647d84ca3e7cca771feb6c98bda94ef8c70 100644 (file)
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
        unsigned long when = jiffies + interval;
        unsigned long flags;
 
        local_irq_save(flags);
 
-       if (timer_pending(t)) {
-               if (time_before(when, t->expires))
-                       mod_timer(t, when);
-       } else {
-               t->expires = round_jiffies(when);
-               add_timer_on(t, smp_processor_id());
-       }
+       if (!timer_pending(t) || time_before(when, t->expires))
+               mod_timer(t, round_jiffies(when));
 
        local_irq_restore(flags);
 }
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
 
 done:
        __this_cpu_write(mce_next_interval, iv);
-       __restart_timer(t, iv);
+       __start_timer(t, iv);
 }
 
 /*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
        struct timer_list *t = this_cpu_ptr(&mce_timer);
        unsigned long iv = __this_cpu_read(mce_next_interval);
 
-       __restart_timer(t, interval);
+       __start_timer(t, interval);
 
        if (interval < iv)
                __this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
        }
 }
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
        unsigned long iv = check_interval * HZ;
 
        if (mca_cfg.ignore_ce || !iv)
                return;
 
-       per_cpu(mce_next_interval, cpu) = iv;
-
-       t->expires = round_jiffies(jiffies + iv);
-       add_timer_on(t, cpu);
+       this_cpu_write(mce_next_interval, iv);
+       __start_timer(t, iv);
 }
 
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
        unsigned int cpu = smp_processor_id();
 
        setup_pinned_timer(t, mce_timer_fn, cpu);
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
 }
 
 /* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 static int mce_cpu_online(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
        int ret;
 
        mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
                return ret;
        }
        mce_reenable_cpu();
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
        return 0;
 }
 
 static int mce_cpu_pre_down(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
 
        mce_disable_cpu();
        del_timer_sync(t);
index 6a31e2691f3aa0ac68620459c371ca42912c4475..079e81733a58950486a7012b70e06f45e71af688 100644 (file)
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
 reget:
                if (!get_builtin_microcode(&cp, family)) {
 #ifdef CONFIG_BLK_DEV_INITRD
-                       cp = find_cpio_data(ucode_path, (void *)initrd_start,
-                                           initrd_end - initrd_start, NULL);
+                       if (!initrd_gone)
+                               cp = find_cpio_data(ucode_path, (void *)initrd_start,
+                                                   initrd_end - initrd_start, NULL);
 #endif
                        if (!(cp.data && cp.size)) {
                                /*
index 2af69d27da629a5c802498e692300f9980862a2a..73102d932760b871896a956a88ff178362cc5a94 100644 (file)
@@ -46,6 +46,8 @@
 static struct microcode_ops    *microcode_ops;
 static bool dis_ucode_ldr = true;
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 
 /*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
+       int ret = -EINVAL;
 
        switch (c->x86_vendor) {
        case X86_VENDOR_INTEL:
                if (c->x86 >= 6)
-                       return save_microcode_in_initrd_intel();
+                       ret = save_microcode_in_initrd_intel();
                break;
        case X86_VENDOR_AMD:
                if (c->x86 >= 0x10)
-                       return save_microcode_in_initrd_amd(c->x86);
+                       ret = save_microcode_in_initrd_amd(c->x86);
                break;
        default:
                break;
        }
 
-       return -EINVAL;
+       initrd_gone = true;
+
+       return ret;
 }
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
         * has the virtual address of the beginning of the initrd. It also
         * possibly relocates the ramdisk. In either case, initrd_start contains
         * the updated address so use that instead.
+        *
+        * initrd_gone is for the hotplug case where we've thrown out initrd
+        * already.
         */
-       if (!use_pa && initrd_start)
-               start = initrd_start;
+       if (!use_pa) {
+               if (initrd_gone)
+                       return (struct cpio_data){ NULL, 0, "" };
+               if (initrd_start)
+                       start = initrd_start;
+       }
 
        return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
index 3f329b74e040c23b6b85dfd12a85f80d630c63ac..8325d8a09ab0768dd08156b8e4c5b755b78c10f9 100644 (file)
@@ -41,7 +41,7 @@
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
        struct ucode_cpu_info uci;
        struct cpio_data cp;
 
-       /*
-        * AP loading didn't find any microcode patch, no need to save anything.
-        */
-       if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-               return 0;
-
        if (!load_builtin_intel_microcode(&cp))
                cp = find_microcode_in_initrd(ucode_path, false);
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
        return 0;
 }
 
-
 /*
  * @res_patch, output: a pointer to the patch we found.
  */
index e4e97a5355ce852ac49937fd180f62b614c1286c..de7234401275b56760f27573eea5669e2bda4f68 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
 #include <asm/traps.h>
 
 #include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
         * it will #GP. Make sure it is replaced after the memset().
         */
        if (static_cpu_has(X86_FEATURE_XSAVES))
-               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+                                              xfeatures_mask;
 
        if (static_cpu_has(X86_FEATURE_FXSR))
                fpstate_init_fxstate(&state->fxsave);
index 85e87b46c318026ed28d87056c516aec3e5fb9ed..dc6ba5bda9fc83630c773a80c4adea6871db0a59 100644 (file)
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
        } else {
                struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
+               irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
                irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
                disable_irq(hdev->irq);
                irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
index d153be8929a68440ae5e5894497cbb7fa1ab9913..e52c9088660fac47d6da377b39412378ff0157b0 100644 (file)
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
        memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
        /* Set XSTATE_BV */
+       xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
        *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
        /*
index 319148bd4b05091d24576a7535b10aad7bec0c2d..2f25a363068cf9723e8b418e8c1942a6d3ca4029 100644 (file)
@@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
        efi_scratch.use_pgd = true;
 
+       /*
+        * Certain firmware versions are way too sentimential and still believe
+        * they are exclusive and unquestionable owners of the first physical page,
+        * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+        * (but then write-access it later during SetVirtualAddressMap()).
+        *
+        * Create a 1:1 mapping for this page, to avoid triple faults during early
+        * boot with such firmware. We are free to hand this page to the BIOS,
+        * as trim_bios_range() will reserve the first page and isolate it away
+        * from memory allocators anyway.
+        */
+       if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+               pr_err("Failed to create 1:1 mapping for the first page!\n");
+               return 1;
+       }
+
        /*
         * When making calls to the firmware everything needs to be 1:1
         * mapped and addressable with 32-bit pointers. Map the kernel
index 848e8568fb3c4a90c2eb89783c0420f8b5526cd6..8fd4be610607c2683b16a3e0da4249f4aea732e4 100644 (file)
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 void cpu_reset(void)
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
        local_irq_disable();
        /*
         * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
index df939b54b09f731eac02657957f3f573c51a1ec5..1fad2a6b3bbbf0d1d4ee07f585bdc4d501467b5d 100644 (file)
@@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
        struct crypto_larval *larval;
        int err;
 
+       alg->cra_flags &= ~CRYPTO_ALG_DEAD;
        err = crypto_check_alg(alg);
        if (err)
                return err;
index e0d2e6e6e40caf03b7c1bf4712664071e433becf..3752521c62aba1fc71700646f53ca241f305fda4 100644 (file)
@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
                if (!iort_fwnode)
                        return NULL;
 
-               ops = iommu_get_instance(iort_fwnode);
+               ops = iommu_ops_from_fwnode(iort_fwnode);
                if (!ops)
                        return NULL;
 
index 9cd0a2d4181699d94f73f2af82490e59f23373e2..c2d3785ec2279f42013cdc4816beb60785279d95 100644 (file)
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
 
                if (qc->err_mask & ~AC_ERR_OTHER)
                        qc->err_mask &= ~AC_ERR_OTHER;
+       } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+               qc->result_tf.command |= ATA_SENSE;
        }
 
        /* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
 
        /*
-        * Device times out with higher max sects.
+        * These devices time out with higher max sects.
         * https://bugzilla.kernel.org/show_bug.cgi?id=121671
         */
-       { "LITEON CX1-JB256-HP", NULL,          ATA_HORKAGE_MAX_SEC_1024 },
+       { "LITEON CX1-JB*-HP",  NULL,           ATA_HORKAGE_MAX_SEC_1024 },
 
        /* Devices we expect to fail diagnostics */
 
index 823e938c9a7877a1cadefde9127d447832630061..2f32782cea6d9c584797d1f7d9dc8e99eac0b796 100644 (file)
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
        host->iomap = NULL;
        hpriv->base = devm_ioremap(&pdev->dev, res->start,
                                   resource_size(res));
+       if (!hpriv->base)
+               return -ENOMEM;
+
        hpriv->base -= SATAHC0_REG_BASE;
 
        hpriv->clk = clk_get(&pdev->dev, NULL);
index 4497d263209fb861e08e1650ddf654a98f2102e3..ac350c518e0c9479c05c4d9ff9f6ae918f26b96c 100644 (file)
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
        struct firmware_buf *buf = fw_priv->buf;
 
        __fw_load_abort(buf);
-
-       /* avoid user action after loading abort */
-       fw_priv->buf = NULL;
 }
 
 static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 
        mutex_lock(&fw_lock);
        fw_buf = fw_priv->buf;
-       if (!fw_buf)
+       if (fw_state_is_aborted(&fw_buf->fw_st))
                goto out;
 
        switch (loading) {
index dacb6a8418aa927e8d75a86470b35b414bf48598..fa26ffd25fa61bae95bd441699a54ee8e16818d2 100644 (file)
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
        struct memory_block *mem = to_memory_block(dev);
        unsigned long start_pfn, end_pfn;
+       unsigned long valid_start, valid_end, valid_pages;
        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-       struct page *first_page;
        struct zone *zone;
        int zone_shift = 0;
 
        start_pfn = section_nr_to_pfn(mem->start_section_nr);
        end_pfn = start_pfn + nr_pages;
-       first_page = pfn_to_page(start_pfn);
 
        /* The block contains more than one zone can not be offlined. */
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return sprintf(buf, "none\n");
 
-       zone = page_zone(first_page);
+       zone = page_zone(pfn_to_page(valid_start));
+       valid_pages = valid_end - valid_start;
 
        /* MMOP_ONLINE_KEEP */
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+       zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+       zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index f642c4264c277bc05d98dc99eb15ac8091886ba5..168fa175d65a08319f5e92ad10f8f5ff4baea54a 100644 (file)
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
 
 /* driver_chipcommon_b.c */
 int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
index b4f6520e74f05b8b7a0c1ec710042c0f38a02d7d..62f5bfa5065d919ee3acd9e596373923070e574a 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
                                         u32 mask, u32 value)
 {
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
        if (cc->capabilities & BCMA_CC_CAP_PMU)
                bcma_pmu_early_init(cc);
 
-       if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
-               bcma_chipco_serial_init(cc);
-
        if (bus->hosttype == BCMA_HOSTTYPE_SOC)
                bcma_core_chipcommon_flash_detect(cc);
 
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
        return res;
 }
 
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
-#if IS_BUILTIN(CONFIG_BCM47XX)
        unsigned int irq;
        u32 baud_base;
        u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
                ports[i].baud_base = baud_base;
                ports[i].reg_shift = 0;
        }
-#endif /* CONFIG_BCM47XX */
 }
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
index 96f17132820080843e9216523bc1328b2f8f7939..89af807cf29ce49e38f60e9e1c3e177ceb0e261a 100644 (file)
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
+       struct bcma_bus *bus = mcore->core->bus;
+
        if (mcore->early_setup_done)
                return;
 
+       bcma_chipco_serial_init(&bus->drv_cc);
        bcma_core_mips_nvram_init(mcore);
 
        mcore->early_setup_done = true;
index d5ba43a87a682b6e718d5e2ad7c804498bad61de..200828c60db9ffce12b968956271954a88ac54d0 100644 (file)
@@ -153,6 +153,8 @@ struct cppi41_dd {
 
        /* context for suspend/resume */
        unsigned int dma_tdfdq;
+
+       bool is_suspended;
 };
 
 #define FIST_COMPLETION_QUEUE  93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
        BUG_ON(desc_num >= ALLOC_DECS_NUM);
        c = cdd->chan_busy[desc_num];
        cdd->chan_busy[desc_num] = NULL;
+
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
        return c;
 }
 
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
 
                while (val) {
                        u32 desc, len;
-                       int error;
 
-                       error = pm_runtime_get(cdd->ddev.dev);
-                       if (error < 0)
-                               dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
-                                       __func__, error);
+                       /*
+                        * This should never trigger, see the comments in
+                        * push_desc_queue()
+                        */
+                       WARN_ON(cdd->is_suspended);
 
                        q_num = __fls(val);
                        val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                        c->residue = pd_trans_len(c->desc->pd6) - len;
                        dma_cookie_complete(&c->txd);
                        dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
-                       pm_runtime_mark_last_busy(cdd->ddev.dev);
-                       pm_runtime_put_autosuspend(cdd->ddev.dev);
                }
        }
        return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
         */
        __iowmb();
 
+       /*
+        * DMA transfers can take at least 200ms to complete with USB mass
+        * storage connected. To prevent autosuspend timeouts, we must use
+        * pm_runtime_get/put() when chan_busy[] is modified. This will get
+        * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+        * outcome of the transfer.
+        */
+       pm_runtime_get(cdd->ddev.dev);
+
        desc_phys = lower_32_bits(c->desc_phys);
        desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
        WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
        cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
 }
 
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
 {
-       struct cppi41_dd *cdd = c->cdd;
-       unsigned long flags;
+       struct cppi41_channel *c, *_c;
 
-       spin_lock_irqsave(&cdd->lock, flags);
-       list_add_tail(&c->node, &cdd->pending);
-       spin_unlock_irqrestore(&cdd->lock, flags);
+       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+               push_desc_queue(c);
+               list_del(&c->node);
+       }
 }
 
 static void cppi41_dma_issue_pending(struct dma_chan *chan)
 {
        struct cppi41_channel *c = to_cpp41_chan(chan);
        struct cppi41_dd *cdd = c->cdd;
+       unsigned long flags;
        int error;
 
        error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
                return;
        }
 
-       if (likely(pm_runtime_active(cdd->ddev.dev)))
-               push_desc_queue(c);
-       else
-               pending_desc(c);
+       spin_lock_irqsave(&cdd->lock, flags);
+       list_add_tail(&c->node, &cdd->pending);
+       if (!cdd->is_suspended)
+               cppi41_run_queue(cdd);
+       spin_unlock_irqrestore(&cdd->lock, flags);
 
        pm_runtime_mark_last_busy(cdd->ddev.dev);
        pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
        WARN_ON(!cdd->chan_busy[desc_num]);
        cdd->chan_busy[desc_num] = NULL;
 
+       /* Usecount for chan_busy[], paired with push_desc_queue() */
+       pm_runtime_put(cdd->ddev.dev);
+
        return 0;
 }
 
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 {
        struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       unsigned long flags;
 
+       spin_lock_irqsave(&cdd->lock, flags);
+       cdd->is_suspended = true;
        WARN_ON(!list_empty(&cdd->pending));
+       spin_unlock_irqrestore(&cdd->lock, flags);
 
        return 0;
 }
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
 {
        struct cppi41_dd *cdd = dev_get_drvdata(dev);
-       struct cppi41_channel *c, *_c;
        unsigned long flags;
 
        spin_lock_irqsave(&cdd->lock, flags);
-       list_for_each_entry_safe(c, _c, &cdd->pending, node) {
-               push_desc_queue(c);
-               list_del(&c->node);
-       }
+       cdd->is_suspended = false;
+       cppi41_run_queue(cdd);
        spin_unlock_irqrestore(&cdd->lock, flags);
 
        return 0;
index 48720800c438761ae8eba65118ab78da38fb677e..f37f4978dabbb2e43dab75d95255cafa398b0c73 100644 (file)
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 {
        struct pl330_thread *thrd = NULL;
-       unsigned long flags;
        int chans, i;
 
        if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
 
        chans = pl330->pcfg.num_chan;
 
-       spin_lock_irqsave(&pl330->lock, flags);
-
        for (i = 0; i < chans; i++) {
                thrd = &pl330->channels[i];
                if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
                thrd = NULL;
        }
 
-       spin_unlock_irqrestore(&pl330->lock, flags);
-
        return thrd;
 }
 
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
 static void pl330_release_channel(struct pl330_thread *thrd)
 {
        struct pl330_dmac *pl330;
-       unsigned long flags;
 
        if (!thrd || thrd->free)
                return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
 
        pl330 = thrd->dmac;
 
-       spin_lock_irqsave(&pl330->lock, flags);
        _free_event(thrd, thrd->ev);
        thrd->free = true;
-       spin_unlock_irqrestore(&pl330->lock, flags);
 }
 
 /* Initialize the structure for PL330 configuration, that can be used
@@ -2123,20 +2115,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
        struct pl330_dmac *pl330 = pch->dmac;
        unsigned long flags;
 
-       spin_lock_irqsave(&pch->lock, flags);
+       spin_lock_irqsave(&pl330->lock, flags);
 
        dma_cookie_init(chan);
        pch->cyclic = false;
 
        pch->thread = pl330_request_channel(pl330);
        if (!pch->thread) {
-               spin_unlock_irqrestore(&pch->lock, flags);
+               spin_unlock_irqrestore(&pl330->lock, flags);
                return -ENOMEM;
        }
 
        tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 
-       spin_unlock_irqrestore(&pch->lock, flags);
+       spin_unlock_irqrestore(&pl330->lock, flags);
 
        return 1;
 }
@@ -2239,12 +2231,13 @@ static int pl330_pause(struct dma_chan *chan)
 static void pl330_free_chan_resources(struct dma_chan *chan)
 {
        struct dma_pl330_chan *pch = to_pchan(chan);
+       struct pl330_dmac *pl330 = pch->dmac;
        unsigned long flags;
 
        tasklet_kill(&pch->task);
 
        pm_runtime_get_sync(pch->dmac->ddma.dev);
-       spin_lock_irqsave(&pch->lock, flags);
+       spin_lock_irqsave(&pl330->lock, flags);
 
        pl330_release_channel(pch->thread);
        pch->thread = NULL;
@@ -2252,7 +2245,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
        if (pch->cyclic)
                list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
 
-       spin_unlock_irqrestore(&pch->lock, flags);
+       spin_unlock_irqrestore(&pl330->lock, flags);
        pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
        pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
 }
index 921dfa047202952c9064cd39971e68e0e3c28b49..260c4b4b492ec38735715859522068da40c21381 100644 (file)
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
 struct exit_boot_struct {
        efi_memory_desc_t *runtime_map;
        int *runtime_entry_count;
+       void *new_fdt_addr;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
        efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
                        p->runtime_map, p->runtime_entry_count);
 
-       return EFI_SUCCESS;
+       return update_fdt_memmap(p->new_fdt_addr, map);
 }
 
 /*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
        priv.runtime_map = runtime_map;
        priv.runtime_entry_count = &runtime_entry_count;
+       priv.new_fdt_addr = (void *)*new_fdt_addr;
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
                                        exit_boot_func);
 
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
 
-               status = update_fdt_memmap((void *)*new_fdt_addr, &map);
-               if (status != EFI_SUCCESS) {
-                       /*
-                        * The kernel won't get far without the memory map, but
-                        * may still be able to print something meaningful so
-                        * return success here.
-                        */
-                       return EFI_SUCCESS;
-               }
-
                /* Install the new virtual address map */
                svam = sys_table->runtime->set_virtual_address_map;
                status = svam(runtime_entry_count * desc_size, desc_size,
index e2b0b1646f995fd94d12e17a8cb1258bec34061f..0635829b18cf3aed41239079e4208336b00cda0f 100644 (file)
@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
        }
        WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_set_vga_render_state(adev, false);
+
        gmc_v6_0_mc_stop(adev, &save);
 
        if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v6_0_mc_resume(adev, &save);
-       amdgpu_display_set_vga_render_state(adev, false);
 }
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
index 50f5cf7b69d1dc55fd427efa61e7e8e5eeff80d0..fdfb1ec17e660efa0b1f2c1f7273fa8d6fd8567a 100644 (file)
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct drm_pending_vblank_event *event = crtc_state->event;
                /*
-                * TEST_ONLY and PAGE_FLIP_EVENT are mutually
-                * exclusive, if they weren't, this code should be
-                * called on success for TEST_ONLY too.
+                * Free the allocated event. drm_atomic_helper_setup_commit
+                * can allocate an event too, so only free it if it's ours
+                * to prevent a double free in drm_atomic_state_clear.
                 */
-               if (crtc_state->event)
-                       drm_event_cancel_free(dev, &crtc_state->event->base);
+               if (event && (event->base.fence || event->base.file_priv)) {
+                       drm_event_cancel_free(dev, &event->base);
+                       crtc_state->event = NULL;
+               }
        }
 
        if (!fence_state)
index 34f757bcabae8d88382f18392467e67f4e0f6100..4594477dee00bc0ffb7847da556985848df717e7 100644 (file)
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
                funcs = plane->helper_private;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                if (funcs->prepare_fb) {
                        ret = funcs->prepare_fb(plane, plane_state);
                        if (ret)
@@ -1685,9 +1682,6 @@ fail:
                if (j >= i)
                        continue;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
        for_each_plane_in_state(old_state, plane, plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
index 5a452628939272969c54239498b9a1ca86cd4a67..7a7019ac93884eeeba046ad62b3c81037796beb7 100644 (file)
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
 
        INIT_LIST_HEAD(&connector->probed_modes);
        INIT_LIST_HEAD(&connector->modes);
+       mutex_init(&connector->mutex);
        connector->edid_blob_ptr = NULL;
        connector->status = connector_status_unknown;
 
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
                connector->funcs->atomic_destroy_state(connector,
                                                       connector->state);
 
+       mutex_destroy(&connector->mutex);
+
        memset(connector, 0, sizeof(*connector));
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
  */
 int drm_connector_register(struct drm_connector *connector)
 {
-       int ret;
+       int ret = 0;
 
-       if (connector->registered)
+       if (!connector->dev->registered)
                return 0;
 
+       mutex_lock(&connector->mutex);
+       if (connector->registered)
+               goto unlock;
+
        ret = drm_sysfs_connector_add(connector);
        if (ret)
-               return ret;
+               goto unlock;
 
        ret = drm_debugfs_connector_add(connector);
        if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
        drm_mode_object_register(connector->dev, &connector->base);
 
        connector->registered = true;
-       return 0;
+       goto unlock;
 
 err_debugfs:
        drm_debugfs_connector_remove(connector);
 err_sysfs:
        drm_sysfs_connector_remove(connector);
+unlock:
+       mutex_unlock(&connector->mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
  */
 void drm_connector_unregister(struct drm_connector *connector)
 {
-       if (!connector->registered)
+       mutex_lock(&connector->mutex);
+       if (!connector->registered) {
+               mutex_unlock(&connector->mutex);
                return;
+       }
 
        if (connector->funcs->early_unregister)
                connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
        drm_debugfs_connector_remove(connector);
 
        connector->registered = false;
+       mutex_unlock(&connector->mutex);
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
index a525751b4559e9f3cc99850d5aac6e5c3eb6f16c..6594b4088f11bc8e5aa6a6c308ba3fdf633921a3 100644 (file)
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto err_minors;
 
+       dev->registered = true;
+
        if (dev->driver->load) {
                ret = dev->driver->load(dev, flags);
                if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
        drm_lastclose(dev);
 
+       dev->registered = false;
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_modeset_unregister_all(dev);
 
index 69bc3b0c43905eccf19ad142cf32c7064a09feb0..8493e19b563a134ba588e9df39c3fb5785a25a69 100644 (file)
@@ -1012,6 +1012,8 @@ struct intel_fbc {
        struct work_struct underrun_work;
 
        struct intel_fbc_state_cache {
+               struct i915_vma *vma;
+
                struct {
                        unsigned int mode_flags;
                        uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
                } plane;
 
                struct {
-                       u64 ilk_ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
-                       unsigned int tiling_mode;
                } fb;
        } state_cache;
 
        struct intel_fbc_reg_params {
+               struct i915_vma *vma;
+
                struct {
                        enum pipe pipe;
                        enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
                } crtc;
 
                struct {
-                       u64 ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
                } fb;
 
                int cfb_size;
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
        return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-                           const struct i915_ggtt_view *view)
-{
-       return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
index dbe9fb41ae535449f996ab36e51bd626be6651fc..8d3e515f27bade27acda9544a30d975628ac8ab3 100644 (file)
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
        __drm_atomic_helper_plane_duplicate_state(plane, state);
 
+       intel_state->vma = NULL;
+
        return state;
 }
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+       /*
+        * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+        * We currently don't clear all planes during driver unload, so we have
+        * to be able to unpin vma here for now.
+        *
+        * Normally this can only happen during unload when kmscon is disabled
+        * and userspace doesn't attempt to set a framebuffer at all.
+        */
+       if (vma) {
+               mutex_lock(&plane->dev->struct_mutex);
+               intel_unpin_fb_vma(vma);
+               mutex_unlock(&plane->dev->struct_mutex);
+       }
+
        drm_atomic_helper_plane_destroy_state(plane, state);
 }
 
index f0b9aa7a0483d1928aecec439d342fa2c85b78d9..f1e4a21d46643b53a3cf492671b872ed03215105 100644 (file)
@@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
                        i915_vma_pin_fence(vma);
        }
 
+       i915_vma_get(vma);
 err:
        intel_runtime_pm_put(dev_priv);
        return vma;
 }
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-       vma = i915_gem_object_to_ggtt(obj, &view);
+       lockdep_assert_held(&vma->vm->dev->struct_mutex);
 
        if (WARN_ON_ONCE(!vma))
                return;
 
        i915_vma_unpin_fence(vma);
        i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
 }
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
-       struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
        struct drm_plane *primary = intel_crtc->base.primary;
        struct drm_plane_state *plane_state = primary->state;
@@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * an fb with another CRTC instead
         */
        for_each_crtc(dev, c) {
-               i = to_intel_crtc(c);
+               struct intel_plane_state *state;
 
                if (c == &intel_crtc->base)
                        continue;
 
-               if (!i->active)
+               if (!to_intel_crtc(c)->active)
                        continue;
 
-               fb = c->primary->fb;
-               if (!fb)
+               state = to_intel_plane_state(c->primary->state);
+               if (!state->vma)
                        continue;
 
-               obj = intel_fb_obj(fb);
-               if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+               if (intel_plane_ggtt_offset(state) == plane_config->base) {
+                       fb = c->primary->fb;
                        drm_framebuffer_reference(fb);
                        goto valid_fb;
                }
@@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       mutex_lock(&dev->struct_mutex);
+       intel_state->vma =
+               intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(intel_state->vma)) {
+               DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+               intel_state->vma = NULL;
+               drm_framebuffer_unreference(fb);
+               return;
+       }
+
        plane_state->src_x = 0;
        plane_state->src_y = 0;
        plane_state->src_w = fb->width << 16;
@@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_GEN(dev_priv) >= 4) {
                I915_WRITE(DSPSURF(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else {
                I915_WRITE(DSPADDR(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
        }
        POSTING_READ(reg);
@@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane),
-                  intel_fb_gtt_offset(fb, rotation) +
+                  intel_plane_ggtt_offset(plane_state) +
                   intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
        }
 }
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-                       unsigned int rotation)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-       vma = i915_gem_object_to_ggtt(obj, &view);
-       if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-                view.type))
-               return -1;
-
-       return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
        struct drm_device *dev = intel_crtc->base.dev;
@@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        }
 
        I915_WRITE(PLANE_SURF(pipe, 0),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
 
        POSTING_READ(PLANE_SURF(pipe, 0));
 }
@@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
                flush_work(&work->mmio_work);
 
        mutex_lock(&dev->struct_mutex);
-       intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+       intel_unpin_fb_vma(work->old_vma);
        i915_gem_object_put(work->pending_flip_obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto cleanup_pending;
        }
 
-       work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-       work->gtt_offset += intel_crtc->dspaddr_offset;
+       work->old_vma = to_intel_plane_state(primary->state)->vma;
+       to_intel_plane_state(primary->state)->vma = vma;
+
+       work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
        work->rotation = crtc->primary->state->rotation;
 
        /*
@@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
        i915_add_request_no_flush(request);
 cleanup_unpin:
-       intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+       to_intel_plane_state(primary->state)->vma = work->old_vma;
+       intel_unpin_fb_vma(vma);
 cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
@@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                        DRM_DEBUG_KMS("failed to pin object\n");
                        return PTR_ERR(vma);
                }
+
+               to_intel_plane_state(new_state)->vma = vma;
        }
 
        return 0;
@@ -14812,19 +14807,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
                       struct drm_plane_state *old_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct intel_plane_state *old_intel_state;
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-       struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-       old_intel_state = to_intel_plane_state(old_state);
-
-       if (!obj && !old_obj)
-               return;
+       struct i915_vma *vma;
 
-       if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-           !INTEL_INFO(dev_priv)->cursor_needs_physical))
-               intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+       /* Should only be called after a successful intel_prepare_plane_fb()! */
+       vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+       if (vma)
+               intel_unpin_fb_vma(vma);
 }
 
 int
@@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
        if (!obj)
                addr = 0;
        else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-               addr = i915_gem_object_ggtt_offset(obj, NULL);
+               addr = intel_plane_ggtt_offset(state);
        else
                addr = obj->phys_handle->busaddr;
 
@@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *c;
-       struct drm_i915_gem_object *obj;
 
        intel_init_gt_powersave(dev_priv);
 
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev_priv);
-
-       /*
-        * Make sure any fbs we allocated at startup are properly
-        * pinned & fenced.  When we do the allocation it's too early
-        * for this.
-        */
-       for_each_crtc(dev, c) {
-               struct i915_vma *vma;
-
-               obj = intel_fb_obj(c->primary->fb);
-               if (obj == NULL)
-                       continue;
-
-               mutex_lock(&dev->struct_mutex);
-               vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-                                                c->primary->state->rotation);
-               mutex_unlock(&dev->struct_mutex);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin boot fb on pipe %d\n",
-                                 to_intel_crtc(c)->pipe);
-                       drm_framebuffer_unreference(c->primary->fb);
-                       c->primary->fb = NULL;
-                       c->primary->crtc = c->primary->state->crtc = NULL;
-                       update_state_fb(c->primary);
-                       c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-               }
-       }
 }
 
 int intel_connector_register(struct drm_connector *connector)
index cd72ae171eeb673de11f1ca6dcc6f1ab06fde81c..03a2112004f91e1d5ac011cabc255a36867e0e71 100644 (file)
@@ -377,6 +377,7 @@ struct intel_atomic_state {
 struct intel_plane_state {
        struct drm_plane_state base;
        struct drm_rect clip;
+       struct i915_vma *vma;
 
        struct {
                u32 offset;
@@ -1046,6 +1047,7 @@ struct intel_flip_work {
        struct work_struct mmio_work;
 
        struct drm_crtc *crtc;
+       struct i915_vma *old_vma;
        struct drm_framebuffer *old_fb;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+       return i915_ggtt_offset(state->vma);
+}
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
index 62f215b12eb5274b8251d3f46d2a4fdbfc590e96..f3a1d6a5cabe9fcf76f5812dc526781b678f7e41 100644 (file)
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
        if (IS_I945GM(dev_priv))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= params->fb.fence_reg;
+       fbc_ctl |= params->vma->fence->id;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+       if (params->vma->fence) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
                I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN;
                if (IS_GEN5(dev_priv))
-                       dpfc_ctl |= params->fb.fence_reg;
+                       dpfc_ctl |= params->vma->fence->id;
                if (IS_GEN6(dev_priv)) {
                        I915_WRITE(SNB_DPFC_CTL_SA,
-                                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                                  SNB_CPU_FENCE_ENABLE |
+                                  params->vma->fence->id);
                        I915_WRITE(DPFC_CPU_FENCE_OFFSET,
                                   params->crtc.fence_y_offset);
                }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
        }
 
        I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-       I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE,
+                  i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
                I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                          SNB_CPU_FENCE_ENABLE |
+                          params->vma->fence->id);
                I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-       struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-       return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         struct intel_crtc_state *crtc_state,
                                         struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj;
+
+       cache->vma = NULL;
 
        cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        if (!cache->plane.visible)
                return;
 
-       obj = intel_fb_obj(fb);
-
-       /* FIXME: We lack the proper locking here, so only run this on the
-        * platforms that need. */
-       if (IS_GEN(dev_priv, 5, 6))
-               cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
        cache->fb.pixel_format = fb->pixel_format;
        cache->fb.stride = fb->pitches[0];
-       cache->fb.fence_reg = get_fence_id(fb);
-       cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+       cache->vma = plane_state->vma;
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
-       if (!cache->plane.visible) {
+       if (!cache->vma) {
                fbc->no_fbc_reason = "primary plane not visible";
                return false;
        }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         * so have no fence associated with it) due to aperture constaints
         * at the time of pinning.
         */
-       if (cache->fb.tiling_mode != I915_TILING_X ||
-           cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+       if (!cache->vma->fence) {
                fbc->no_fbc_reason = "framebuffer not tiled or fenced";
                return false;
        }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
         * zero. */
        memset(params, 0, sizeof(*params));
 
+       params->vma = cache->vma;
+
        params->crtc.pipe = crtc->pipe;
        params->crtc.plane = crtc->plane;
        params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
        params->fb.pixel_format = cache->fb.pixel_format;
        params->fb.stride = cache->fb.stride;
-       params->fb.fence_reg = cache->fb.fence_reg;
 
        params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-       params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
index 8cf2d80f22540a35dc4245a842d8473ce7d852d9..f4a8c4fc57c4e654a1af91903275189c841b35bd 100644 (file)
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_unpin:
-       intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+       intel_unpin_fb_vma(vma);
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
        if (ifbdev->fb) {
                mutex_lock(&ifbdev->helper.dev->struct_mutex);
-               intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+               intel_unpin_fb_vma(ifbdev->vma);
                mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
                drm_framebuffer_remove(&ifbdev->fb->base);
index 8f131a08d440cf02cbcd9ffe255b94e3f4eb6077..242a73e66d82862bea3ba881474efe327cd02d6e 100644 (file)
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
        I915_WRITE(PLANE_SURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
        POSTING_READ(PLANE_SURF(pipe, plane));
 }
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
        I915_WRITE(SPSURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
        I915_WRITE(SPRSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 }
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
        I915_WRITE(DVSSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
index 74856a8b8f35943b08a59f8aed5a546e98058d3c..e64f52464ecf55b83a17f25b434cb9e1474c10e1 100644 (file)
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t mpllP;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+               mpllP = (mpllP >> 8) & 0xf;
                if (!mpllP)
                        mpllP = 4;
 
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t clock;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
-               return clock;
+               return clock / 1000;
        }
 
        ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
index ccdce1b4eec4b8bf183235ebae2eb5395a306430..d5e58a38f160182354b8c9a126bdcbb5d459f40a 100644 (file)
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
        struct nouveau_bo *bo;
        struct nouveau_bo *bo_gart;
        u32 *suspend;
+       struct mutex mutex;
 };
 
 int  nv84_fence_context_new(struct nouveau_channel *);
index 187ecdb8200273baa77c41a42fbc65c3bcb93db6..21a5775028cc612e9a6c81e280777329f18233fd 100644 (file)
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
 }
 
 /* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 int  nouveau_led_init(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);
index 08f9c6fa0f7f210d3e3fd5a0fbe8f11ff40b1972..1fba3862274474f0001deaec9fefaf6b0fd324b4 100644 (file)
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
        if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
                /* block access to objects not created via this interface */
                owner = argv->v0.owner;
-               if (argv->v0.object == 0ULL)
+               if (argv->v0.object == 0ULL &&
+                   argv->v0.type != NVIF_IOCTL_V0_DEL)
                        argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
                else
                        argv->v0.owner = NVDRM_OBJECT_USIF;
index 2c2c645076614b4f9c187d24a5e9d2667ea27778..32097fd615fd1e3a5954019c02dec36bd2e8db8c 100644 (file)
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                }
        }
 
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (crtc->state->event)
+                       drm_crtc_vblank_get(crtc);
+       }
+
        /* Update plane(s). */
        for_each_plane_in_state(state, plane, plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                        drm_crtc_send_vblank_event(crtc, crtc->state->event);
                        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                        crtc->state->event = NULL;
+                       drm_crtc_vblank_put(crtc);
                }
        }
 
index 52b87ae83e7b4d0df54e003d58783eddd8deb6f5..f0b322bec7df22de23bbae372333fb484ac6d9ed 100644 (file)
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
        struct nv84_fence_chan *fctx = chan->fence;
 
        nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+       mutex_lock(&priv->mutex);
        nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
        nouveau_bo_vma_del(priv->bo, &fctx->vma);
+       mutex_unlock(&priv->mutex);
        nouveau_fence_context_del(&fctx->base);
        chan->fence = NULL;
        nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
        fctx->base.sync32 = nv84_fence_sync32;
        fctx->base.sequence = nv84_fence_read(chan);
 
+       mutex_lock(&priv->mutex);
        ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
        if (ret == 0) {
                ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
                                        &fctx->vma_gart);
        }
+       mutex_unlock(&priv->mutex);
 
        if (ret)
                nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
        priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
        priv->base.uevent = true;
 
+       mutex_init(&priv->mutex);
+
        /* Use VRAM if there is any ; otherwise fallback to system memory */
        domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
                         /*
index 6f0436df021953337ba29f0ff9c02c507214b07b..f8f2f16c22a2a2502bf283c63a5d1fc124d0ff89 100644 (file)
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
                        );
                }
                for (i = 0; i < size; i++)
-                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
                for (; i < 0x60; i++)
                        nvkm_wr32(device, 0x61c440 + soff, (i << 8));
                nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
index 567466f93cd5d9645020e3b89c282808d88397e4..0db8efbf1c2e2e9cd84b689098bdba9d666843bc 100644 (file)
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
        case 0x94:
        case 0x96:
        case 0x98:
-       case 0xaa:
-       case 0xac:
                return true;
        default:
                break;
index e0c143b865f39cb36074b5e524638530cadeede9..30bd4a6a9d466e11755bf95cee6929473ae65adf 100644 (file)
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       48
+#define KMS_DRIVER_MINOR       49
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 0bcffd8a7bd3ceac0de37cd0b44344011963be77..96683f5b2b1b722db08de97550aa8cdf99444a1c 100644 (file)
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 
        man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
-       args->vram_size = rdev->mc.real_vram_size;
-       args->vram_visible = (u64)man->size << PAGE_SHIFT;
+       args->vram_size = (u64)man->size << PAGE_SHIFT;
+       args->vram_visible = rdev->mc.visible_vram_size;
        args->vram_visible -= rdev->vram_pin_size;
        args->gart_size = rdev->mc.gtt_size;
        args->gart_size -= rdev->gart_pin_size;
index f31a778b085148fea4a52599c6b89be203889781..b22d0f83f8e38a9ee0d0eb7381e95d6b90442b61 100644 (file)
@@ -168,7 +168,7 @@ struct cp2112_device {
        atomic_t xfer_avail;
        struct gpio_chip gc;
        u8 *in_out_buffer;
-       spinlock_t lock;
+       struct mutex lock;
 
        struct gpio_desc *desc[8];
        bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
                                 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        ret = 0;
 
 exit:
-       spin_unlock_irqrestore(&dev->lock, flags);
-       return ret <= 0 ? ret : -EIO;
+       mutex_unlock(&dev->lock);
+       return ret < 0 ? ret : -EIO;
 }
 
 static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        buf[0] = CP2112_GPIO_SET;
        buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        if (ret < 0)
                hid_err(hdev, "error setting GPIO values: %d\n", ret);
 
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 }
 
 static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
                                 CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
        ret = buf[1];
 
 exit:
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 
        return ret;
 }
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
        struct cp2112_device *dev = gpiochip_get_data(chip);
        struct hid_device *hdev = dev->hdev;
        u8 *buf = dev->in_out_buffer;
-       unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&dev->lock, flags);
+       mutex_lock(&dev->lock);
 
        ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
                                 CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
                goto fail;
        }
 
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
 
        /*
         * Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
        return 0;
 
 fail:
-       spin_unlock_irqrestore(&dev->lock, flags);
+       mutex_unlock(&dev->lock);
        return ret < 0 ? ret : -EIO;
 }
 
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (!dev->in_out_buffer)
                return -ENOMEM;
 
-       spin_lock_init(&dev->lock);
+       mutex_init(&dev->lock);
 
        ret = hid_parse(hdev);
        if (ret) {
index f46f2c5117fae76a1c87105363e5c8db4c8673a3..350accfee8e85b2e545c0171f6582f4e62655b50 100644 (file)
@@ -76,6 +76,9 @@
 #define USB_VENDOR_ID_ALPS_JP          0x044E
 #define HID_DEVICE_ID_ALPS_U1_DUAL     0x120B
 
+#define USB_VENDOR_ID_AMI              0x046b
+#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE      0xff10
+
 #define USB_VENDOR_ID_ANTON            0x1130
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD  0x3101
 
index c5c5fbe9d60577f44085d86a7fb5cf60efb6acd3..52026dc94d5c4b0306ce585be293cbe2cb1910d9 100644 (file)
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
                .driver_data = LG_NOGET | LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
-               .driver_data = LG_FF2 },
+               .driver_data = LG_NOGET | LG_FF2 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
                .driver_data = LG_FF3 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
index e9d6cc7cdfc5c8019422d45914dc0363448bcb12..30a2977e26454f10fd72b855ec0b634aa868dcae 100644 (file)
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
index 0884dc9554fdf632e684aa3689292368d5fb7e3b..672145b0d8f584d8fce50a74e799b36319448cb4 100644 (file)
@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
                wacom->id[0] = STYLUS_DEVICE_ID;
        }
 
-       pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
-       if (features->pressure_max > 255)
-               pressure = (pressure << 1) | ((data[4] >> 6) & 1);
-       pressure += (features->pressure_max + 1) / 2;
-
-       input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
-       input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
-       input_report_abs(input, ABS_PRESSURE, pressure);
-
-       input_report_key(input, BTN_TOUCH, data[4] & 0x08);
-       input_report_key(input, BTN_STYLUS, data[4] & 0x10);
-       /* Only allow the stylus2 button to be reported for the pen tool. */
-       input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+       if (prox) {
+               pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+               if (features->pressure_max > 255)
+                       pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+               pressure += (features->pressure_max + 1) / 2;
+
+               input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+               input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+               input_report_abs(input, ABS_PRESSURE, pressure);
+
+               input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+               input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+               /* Only allow the stylus2 button to be reported for the pen tool. */
+               input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+       }
 
        if (!prox)
                wacom->id[0] = 0;
index cd49cb17eb7fb385ddbf507ef14b2ea2be090159..308dbda700ebdaeb02f222aa46dc7bb79c24c0da 100644 (file)
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
                return ret;
        }
 
+       init_cached_read_index(channel);
        next_read_location = hv_get_next_read_location(inring_info);
        next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
                                                    sizeof(desc),
index 2bbf0c521bebb5c44840a8ce5296024910d8d73d..7d61b566e148dddd21ce2986cba233567802f93c 100644 (file)
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
 
 static int palmas_gpadc_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
 
 static int palmas_gpadc_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
index 9a081465c42f4225d26747a734c5daceea68c842..6bb23a49e81eb8cf304a2f81f50c5e36a91e2d48 100644 (file)
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
 
 static int __maybe_unused afe4403_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
 
 static int __maybe_unused afe4403_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
index 45266404f7e3b5bd7c7790a0b05ff298fc24d5a9..964f5231a831c437c277e4bcfe292720104043f7 100644 (file)
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
 
 static int __maybe_unused afe4404_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
 
 static int __maybe_unused afe4404_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
index 90ab8a2d2846f8a8591ee6b1615dce2c984020ef..183c14329d6e350f6325b0e77a95dcb56de892dd 100644 (file)
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
 
        mutex_lock(&data->lock);
 
-       while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+       while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
                ret = max30100_read_measurement(data);
                if (ret)
                        break;
index 9c47bc98f3acdea4cb4b56b23a72e582c34a413b..2a22ad92033306d02eec60f288c00a65d5186c37 100644 (file)
@@ -71,7 +71,8 @@
  * a) select an implementation using busy loop polling on those systems
  * b) use the checksum to do some probabilistic decoding
  */
-#define DHT11_START_TRANSMISSION       18  /* ms */
+#define DHT11_START_TRANSMISSION_MIN   18000  /* us */
+#define DHT11_START_TRANSMISSION_MAX   20000  /* us */
 #define DHT11_MIN_TIMERES      34000  /* ns */
 #define DHT11_THRESHOLD                49000  /* ns */
 #define DHT11_AMBIG_LOW                23000  /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
                ret = gpio_direction_output(dht11->gpio, 0);
                if (ret)
                        goto err;
-               msleep(DHT11_START_TRANSMISSION);
+               usleep_range(DHT11_START_TRANSMISSION_MIN,
+                            DHT11_START_TRANSMISSION_MAX);
                ret = gpio_direction_input(dht11->gpio);
                if (ret)
                        goto err;
index 11447ab1055cd4beadf7eca752bdf9494d76cef1..bf5c36e229bacd63dd7e77d028aa65fb4555ce89 100644 (file)
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
        data->enabled = true;
        if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
                retval = disable_irq_wake(irq);
-               if (!retval)
+               if (retval)
                        dev_warn(&rmi_dev->dev,
                                 "Failed to disable irq for wake: %d\n",
                                 retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
        disable_irq(irq);
        if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
                retval = enable_irq_wake(irq);
-               if (!retval)
+               if (retval)
                        dev_warn(&rmi_dev->dev,
                                 "Failed to enable irq for wake: %d\n",
                                 retval);
index 83cf11312fd971e0cacc16bd70eb474dd9c13b52..c9d1c91e1887094f2ef740d9eee3af8d16ee6f82 100644 (file)
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
        }
        platform_set_drvdata(wm->battery_dev, wm);
        wm->battery_dev->dev.parent = dev;
-       wm->battery_dev->dev.platform_data = pdata->batt_pdata;
+       wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
        ret = platform_device_add(wm->battery_dev);
        if (ret < 0)
                goto batt_reg_err;
index 8ee54d71c7eb3ad1e2a43f14068e75939e0dd077..37e204f3d9becccd31706ea4fa1241552868a975 100644 (file)
@@ -352,9 +352,6 @@ config MTK_IOMMU_V1
        select IOMMU_API
        select MEMORY
        select MTK_SMI
-       select COMMON_CLK_MT2701_MMSYS
-       select COMMON_CLK_MT2701_IMGSYS
-       select COMMON_CLK_MT2701_VDECSYS
        help
          Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
          Multimedia Memory Managememt Unit. This option enables remapping of
index d109e41204e84229fa515a82d678aaf4974847f1..1b5b8c5361c506f2b835642eff2e3e57fbdf1370 100644 (file)
@@ -112,7 +112,7 @@ static struct timer_list queue_timer;
  * Domain for untranslated devices - only allocated
  * if iommu=pt passed on kernel cmd line.
  */
-static const struct iommu_ops amd_iommu_ops;
+const struct iommu_ops amd_iommu_ops;
 
 static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
 static int iommu_init_device(struct device *dev)
 {
        struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu;
        int devid;
 
        if (dev->archdata.iommu)
@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
        if (devid < 0)
                return devid;
 
+       iommu = amd_iommu_rlookup_table[devid];
+
        dev_data = find_dev_data(devid);
        if (!dev_data)
                return -ENOMEM;
@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
 
        dev->archdata.iommu = dev_data;
 
-       iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
-                         dev);
+       iommu_device_link(&iommu->iommu, dev);
 
        return 0;
 }
@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
 
 static void iommu_uninit_device(struct device *dev)
 {
-       int devid;
        struct iommu_dev_data *dev_data;
+       struct amd_iommu *iommu;
+       int devid;
 
        devid = get_device_id(dev);
        if (devid < 0)
                return;
 
+       iommu = amd_iommu_rlookup_table[devid];
+
        dev_data = search_dev_data(devid);
        if (!dev_data)
                return;
@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
        if (dev_data->domain)
                detach_device(dev);
 
-       iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
-                           dev);
+       iommu_device_unlink(&iommu->iommu, dev);
 
        iommu_group_remove_device(dev);
 
@@ -3233,7 +3237,7 @@ static void amd_iommu_apply_resv_region(struct device *dev,
        WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
 }
 
-static const struct iommu_ops amd_iommu_ops = {
+const struct iommu_ops amd_iommu_ops = {
        .capable = amd_iommu_capable,
        .domain_alloc = amd_iommu_domain_alloc,
        .domain_free  = amd_iommu_domain_free,
index 6799cf9713f77f460f990e6bc0f38b31422c0745..04cdac7ab3e34bbcc3a4cba19fbb57d44129549b 100644 (file)
@@ -94,6 +94,8 @@
  * out of it.
  */
 
+extern const struct iommu_ops amd_iommu_ops;
+
 /*
  * structure describing one IOMMU in the ACPI table. Typically followed by one
  * or more ivhd_entrys.
@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
        amd_iommu_erratum_746_workaround(iommu);
        amd_iommu_ats_write_check_workaround(iommu);
 
-       iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
-                                              amd_iommu_groups, "ivhd%d",
-                                              iommu->index);
+       iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+                              amd_iommu_groups, "ivhd%d", iommu->index);
+       iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
+       iommu_device_register(&iommu->iommu);
 
        return pci_enable_device(iommu->dev);
 }
@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
         */
        ret = check_ivrs_checksum(ivrs_base);
        if (ret)
-               return ret;
+               goto out;
 
        amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
        DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
index 0d91785ebdc34accca7c4e9ed45da300ddbb68a7..af00f381a7b1a34e060039db569b274d04a532d1 100644 (file)
@@ -535,8 +535,8 @@ struct amd_iommu {
        /* if one, we need to send a completion wait command */
        bool need_sync;
 
-       /* IOMMU sysfs device */
-       struct device *iommu_dev;
+       /* Handle for IOMMU core code */
+       struct iommu_device iommu;
 
        /*
         * We can't rely on the BIOS to restore all values on reinit, so we
index 6cdd501a7df9a7a7f508c102b3ea4e0e24452327..5806a6acc94ecd7543c2435558a0907ec0934ff2 100644 (file)
@@ -616,6 +616,9 @@ struct arm_smmu_device {
        unsigned int                    sid_bits;
 
        struct arm_smmu_strtab_cfg      strtab_cfg;
+
+       /* IOMMU core code handle */
+       struct iommu_device             iommu;
 };
 
 /* SMMU private data for each master */
@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
                }
        }
 
-       /* Nuke the existing Config, as we're going to rewrite it */
-       val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
-
-       if (ste->valid)
-               val |= STRTAB_STE_0_V;
-       else
-               val &= ~STRTAB_STE_0_V;
+       /* Nuke the existing STE_0 value, as we're going to rewrite it */
+       val = ste->valid ? STRTAB_STE_0_V : 0;
 
        if (ste->bypass) {
                val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1081,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
                val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
                        << STRTAB_STE_0_S1CTXPTR_SHIFT) |
                        STRTAB_STE_0_CFG_S1_TRANS;
-
        }
 
        if (ste->s2_cfg) {
@@ -1791,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
        }
 
        group = iommu_group_get_for_dev(dev);
-       if (!IS_ERR(group))
+       if (!IS_ERR(group)) {
                iommu_group_put(group);
+               iommu_device_link(&smmu->iommu, dev);
+       }
 
        return PTR_ERR_OR_ZERO(group);
 }
@@ -1801,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
        struct arm_smmu_master_data *master;
+       struct arm_smmu_device *smmu;
 
        if (!fwspec || fwspec->ops != &arm_smmu_ops)
                return;
 
        master = fwspec->iommu_priv;
+       smmu = master->smmu;
        if (master && master->ste.valid)
                arm_smmu_detach_dev(dev);
        iommu_group_remove_device(dev);
+       iommu_device_unlink(&smmu->iommu, dev);
        kfree(master);
        iommu_fwspec_free(dev);
 }
@@ -2004,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
        u32 size, l1size;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
-       /*
-        * If we can resolve everything with a single L2 table, then we
-        * just need a single L1 descriptor. Otherwise, calculate the L1
-        * size, capped to the SIDSIZE.
-        */
-       if (smmu->sid_bits < STRTAB_SPLIT) {
-               size = 0;
-       } else {
-               size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
-               size = min(size, smmu->sid_bits - STRTAB_SPLIT);
-       }
+       /* Calculate the L1 size, capped to the SIDSIZE. */
+       size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+       size = min(size, smmu->sid_bits - STRTAB_SPLIT);
        cfg->num_l1_ents = 1 << size;
 
        size += STRTAB_SPLIT;
@@ -2525,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
        smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
        smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
 
+       /*
+        * If the SMMU supports fewer bits than would fill a single L2 stream
+        * table, use a linear table instead.
+        */
+       if (smmu->sid_bits <= STRTAB_SPLIT)
+               smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+
        /* IDR5 */
        reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
 
@@ -2634,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
 {
        int irq, ret;
        struct resource *res;
+       resource_size_t ioaddr;
        struct arm_smmu_device *smmu;
        struct device *dev = &pdev->dev;
        bool bypass;
@@ -2651,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                dev_err(dev, "MMIO region too small (%pr)\n", res);
                return -EINVAL;
        }
+       ioaddr = res->start;
 
        smmu->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(smmu->base))
@@ -2703,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                return ret;
 
        /* And we're up. Go go go! */
-       iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+       ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+                                    "smmu3.%pa", &ioaddr);
+       if (ret)
+               return ret;
+
+       iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+       iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+       ret = iommu_device_register(&smmu->iommu);
 
 #ifdef CONFIG_PCI
        if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
index 54368f557bed855ea7f594735ecdde836ef10236..abf6496843a617070289377ffad3fd1e119b0aa6 100644 (file)
@@ -24,6 +24,7 @@
  *     - v7/v8 long-descriptor format
  *     - Non-secure access to the SMMU
  *     - Context fault reporting
+ *     - Extended Stream ID (16 bit)
  */
 
 #define pr_fmt(fmt) "arm-smmu: " fmt
@@ -87,6 +88,7 @@
 #define sCR0_CLIENTPD                  (1 << 0)
 #define sCR0_GFRE                      (1 << 1)
 #define sCR0_GFIE                      (1 << 2)
+#define sCR0_EXIDENABLE                        (1 << 3)
 #define sCR0_GCFGFRE                   (1 << 4)
 #define sCR0_GCFGFIE                   (1 << 5)
 #define sCR0_USFCFG                    (1 << 10)
 #define ID0_NUMIRPT_MASK               0xff
 #define ID0_NUMSIDB_SHIFT              9
 #define ID0_NUMSIDB_MASK               0xf
+#define ID0_EXIDS                      (1 << 8)
 #define ID0_NUMSMRG_SHIFT              0
 #define ID0_NUMSMRG_MASK               0xff
 
 #define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
 #define S2CR_CBNDX_SHIFT               0
 #define S2CR_CBNDX_MASK                        0xff
+#define S2CR_EXIDVALID                 (1 << 10)
 #define S2CR_TYPE_SHIFT                        16
 #define S2CR_TYPE_MASK                 0x3
 enum arm_smmu_s2cr_type {
@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
 
 #define TTBCR2_SEP_SHIFT               15
 #define TTBCR2_SEP_UPSTREAM            (0x7 << TTBCR2_SEP_SHIFT)
+#define TTBCR2_AS                      (1 << 4)
 
 #define TTBRn_ASID_SHIFT               48
 
@@ -354,6 +359,7 @@ struct arm_smmu_device {
 #define ARM_SMMU_FEAT_FMT_AARCH64_64K  (1 << 9)
 #define ARM_SMMU_FEAT_FMT_AARCH32_L    (1 << 10)
 #define ARM_SMMU_FEAT_FMT_AARCH32_S    (1 << 11)
+#define ARM_SMMU_FEAT_EXIDS            (1 << 12)
        u32                             features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -383,6 +389,9 @@ struct arm_smmu_device {
        unsigned int                    *irqs;
 
        u32                             cavium_id_base; /* Specific to Cavium */
+
+       /* IOMMU core code handle */
+       struct iommu_device             iommu;
 };
 
 enum arm_smmu_context_fmt {
@@ -781,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                        reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
                        reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
                        reg2 |= TTBCR2_SEP_UPSTREAM;
+                       if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+                               reg2 |= TTBCR2_AS;
                }
                if (smmu->version > ARM_SMMU_V1)
                        writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
@@ -1051,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
        struct arm_smmu_smr *smr = smmu->smrs + idx;
        u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
 
-       if (smr->valid)
+       if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
                reg |= SMR_VALID;
        writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
 }
@@ -1063,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
                  (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
                  (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
 
+       if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
+           smmu->smrs[idx].valid)
+               reg |= S2CR_EXIDVALID;
        writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
 }
 
@@ -1073,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
                arm_smmu_write_smr(smmu, idx);
 }
 
+/*
+ * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
+ * should be called after sCR0 is written.
+ */
+static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
+{
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       u32 smr;
+
+       if (!smmu->smrs)
+               return;
+
+       /*
+        * SMR.ID bits may not be preserved if the corresponding MASK
+        * bits are set, so check each one separately. We can reject
+        * masters later if they try to claim IDs outside these masks.
+        */
+       smr = smmu->streamid_mask << SMR_ID_SHIFT;
+       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+       smmu->streamid_mask = smr >> SMR_ID_SHIFT;
+
+       smr = smmu->streamid_mask << SMR_MASK_SHIFT;
+       writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+       smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+       smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+}
+
 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
 {
        struct arm_smmu_smr *smrs = smmu->smrs;
@@ -1445,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev)
        if (ret)
                goto out_free;
 
+       iommu_device_link(&smmu->iommu, dev);
+
        return 0;
 
 out_free:
@@ -1457,10 +1501,17 @@ out_free:
 static void arm_smmu_remove_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+       struct arm_smmu_master_cfg *cfg;
+       struct arm_smmu_device *smmu;
+
 
        if (!fwspec || fwspec->ops != &arm_smmu_ops)
                return;
 
+       cfg  = fwspec->iommu_priv;
+       smmu = cfg->smmu;
+
+       iommu_device_unlink(&smmu->iommu, dev);
        arm_smmu_master_free_smes(fwspec);
        iommu_group_remove_device(dev);
        kfree(fwspec->iommu_priv);
@@ -1674,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
        if (smmu->features & ARM_SMMU_FEAT_VMID16)
                reg |= sCR0_VMID16EN;
 
+       if (smmu->features & ARM_SMMU_FEAT_EXIDS)
+               reg |= sCR0_EXIDENABLE;
+
        /* Push the button */
        __arm_smmu_tlb_sync(smmu);
        writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -1761,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                           "\t(IDR0.CTTW overridden by FW configuration)\n");
 
        /* Max. number of entries we have for stream matching/indexing */
-       size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+       if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+               smmu->features |= ARM_SMMU_FEAT_EXIDS;
+               size = 1 << 16;
+       } else {
+               size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+       }
        smmu->streamid_mask = size - 1;
        if (id & ID0_SMS) {
-               u32 smr;
-
                smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
                size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
                if (size == 0) {
@@ -1774,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        return -ENODEV;
                }
 
-               /*
-                * SMR.ID bits may not be preserved if the corresponding MASK
-                * bits are set, so check each one separately. We can reject
-                * masters later if they try to claim IDs outside these masks.
-                */
-               smr = smmu->streamid_mask << SMR_ID_SHIFT;
-               writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-               smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-               smmu->streamid_mask = smr >> SMR_ID_SHIFT;
-
-               smr = smmu->streamid_mask << SMR_MASK_SHIFT;
-               writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-               smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-               smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
-
                /* Zero-initialised to mark as invalid */
                smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
                                          GFP_KERNEL);
@@ -1796,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                        return -ENOMEM;
 
                dev_notice(smmu->dev,
-                          "\tstream matching with %lu register groups, mask 0x%x",
-                          size, smmu->smr_mask_mask);
+                          "\tstream matching with %lu register groups", size);
        }
        /* s2cr->type == 0 means translation, so initialise explicitly */
        smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
@@ -2037,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
 static int arm_smmu_device_probe(struct platform_device *pdev)
 {
        struct resource *res;
+       resource_size_t ioaddr;
        struct arm_smmu_device *smmu;
        struct device *dev = &pdev->dev;
        int num_irqs, i, err;
@@ -2057,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                return err;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ioaddr = res->start;
        smmu->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(smmu->base))
                return PTR_ERR(smmu->base);
@@ -2117,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
                }
        }
 
-       iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+       err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
+                                    "smmu.%pa", &ioaddr);
+       if (err) {
+               dev_err(dev, "Failed to register iommu in sysfs\n");
+               return err;
+       }
+
+       iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+       iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+       err = iommu_device_register(&smmu->iommu);
+       if (err) {
+               dev_err(dev, "Failed to register iommu\n");
+               return err;
+       }
+
        platform_set_drvdata(pdev, smmu);
        arm_smmu_device_reset(smmu);
+       arm_smmu_test_smr_masks(smmu);
 
        /* Oh, for a proper bus abstraction */
        if (!iommu_present(&platform_bus_type))
index 8ccbd7023194ee592fa91dafb67565d1ad9928aa..d9c0decfc91ae2cef9ccf21c3a99afb455bbb9fa 100644 (file)
@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
 static int alloc_iommu(struct dmar_drhd_unit *drhd);
 static void free_iommu(struct intel_iommu *iommu);
 
+extern const struct iommu_ops intel_iommu_ops;
+
 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
 {
        /*
@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        raw_spin_lock_init(&iommu->register_lock);
 
        if (intel_iommu_enabled) {
-               iommu->iommu_dev = iommu_device_create(NULL, iommu,
-                                                      intel_iommu_groups,
-                                                      "%s", iommu->name);
+               err = iommu_device_sysfs_add(&iommu->iommu, NULL,
+                                            intel_iommu_groups,
+                                            "%s", iommu->name);
+               if (err)
+                       goto err_unmap;
 
-               if (IS_ERR(iommu->iommu_dev)) {
-                       err = PTR_ERR(iommu->iommu_dev);
+               iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+
+               err = iommu_device_register(&iommu->iommu);
+               if (err)
                        goto err_unmap;
-               }
        }
 
        drhd->iommu = iommu;
@@ -1103,7 +1108,8 @@ error:
 
 static void free_iommu(struct intel_iommu *iommu)
 {
-       iommu_device_destroy(iommu->iommu_dev);
+       iommu_device_sysfs_remove(&iommu->iommu);
+       iommu_device_unregister(&iommu->iommu);
 
        if (iommu->irq) {
                if (iommu->pr_irq) {
index 57ba0d3091ea257a221de36f3143311db3989d63..a7e0821c9967e490258921238e6640723e79375d 100644 (file)
@@ -276,6 +276,8 @@ struct sysmmu_drvdata {
        struct list_head owner_node;    /* node for owner controllers list */
        phys_addr_t pgtable;            /* assigned page table structure */
        unsigned int version;           /* our version */
+
+       struct iommu_device iommu;      /* IOMMU core handle */
 };
 
 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data,
 {
        sysmmu_pte_t *ent;
 
-       dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
-               finfo->name, fault_addr, &data->pgtable);
+       dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
+               dev_name(data->master), finfo->name, fault_addr);
+       dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
        ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
-       dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
+       dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
        if (lv1ent_page(ent)) {
                ent = page_entry(ent, fault_addr);
-               dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
+               dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
        }
 }
 
@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
        data->sysmmu = dev;
        spin_lock_init(&data->lock);
 
+       ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+                                    dev_name(data->sysmmu));
+       if (ret)
+               return ret;
+
+       iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
+       iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
+
+       ret = iommu_device_register(&data->iommu);
+       if (ret)
+               return ret;
+
        platform_set_drvdata(pdev, data);
 
        __sysmmu_get_version(data);
@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
 
-       of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
-
        return 0;
 }
 
@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
                                DMA_TO_DEVICE);
        /* For mapping page table entries we rely on dma == phys */
        BUG_ON(handle != virt_to_phys(domain->pgtable));
+       if (dma_mapping_error(dma_dev, handle))
+               goto err_lv2ent;
 
        spin_lock_init(&domain->lock);
        spin_lock_init(&domain->pgtablelock);
@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
 
        return &domain->domain;
 
+err_lv2ent:
+       free_pages((unsigned long)domain->lv2entcnt, 1);
 err_counter:
        free_pages((unsigned long)domain->pgtable, 2);
 err_dma_cookie:
@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
        }
 
        if (lv1ent_fault(sent)) {
+               dma_addr_t handle;
                sysmmu_pte_t *pent;
                bool need_flush_flpd_cache = lv1ent_zero(sent);
 
@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
                update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
                kmemleak_ignore(pent);
                *pgcounter = NUM_LV2ENTRIES;
-               dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
+               handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
+                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(dma_dev, handle)) {
+                       kmem_cache_free(lv2table_kmem_cache, pent);
+                       return ERR_PTR(-EADDRINUSE);
+               }
 
                /*
                 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev)
 
 static void exynos_iommu_remove_device(struct device *dev)
 {
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+
        if (!has_sysmmu(dev))
                return;
 
+       if (owner->domain) {
+               struct iommu_group *group = iommu_group_get(dev);
+
+               if (group) {
+                       WARN_ON(owner->domain !=
+                               iommu_group_default_domain(group));
+                       exynos_iommu_detach_device(owner->domain, dev);
+                       iommu_group_put(group);
+               }
+       }
        iommu_group_remove_device(dev);
 }
 
@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
 {
        struct exynos_iommu_owner *owner = dev->archdata.iommu;
        struct platform_device *sysmmu = of_find_device_by_node(spec->np);
-       struct sysmmu_drvdata *data;
+       struct sysmmu_drvdata *data, *entry;
 
        if (!sysmmu)
                return -ENODEV;
@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev,
                dev->archdata.iommu = owner;
        }
 
+       list_for_each_entry(entry, &owner->controllers, owner_node)
+               if (entry == data)
+                       return 0;
+
        list_add_tail(&data->owner_node, &owner->controllers);
        data->master = dev;
 
index bce59a53c2a6db08fced9d0366516ea58566976b..f5e02f8e737113123991607219ad23a12b2d1c54 100644 (file)
@@ -548,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
 static DEFINE_SPINLOCK(device_domain_lock);
 static LIST_HEAD(device_domain_list);
 
-static const struct iommu_ops intel_iommu_ops;
+const struct iommu_ops intel_iommu_ops;
 
 static bool translation_pre_enabled(struct intel_iommu *iommu)
 {
@@ -1145,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
                if (!dma_pte_present(pte) || dma_pte_superpage(pte))
                        goto next;
 
-               level_pfn = pfn & level_mask(level - 1);
+               level_pfn = pfn & level_mask(level);
                level_pte = phys_to_virt(dma_pte_addr(pte));
 
                if (level > 2)
@@ -3326,13 +3326,14 @@ static int __init init_dmars(void)
        iommu_identity_mapping |= IDENTMAP_GFX;
 #endif
 
+       check_tylersburg_isoch();
+
        if (iommu_identity_mapping) {
                ret = si_domain_init(hw_pass_through);
                if (ret)
                        goto free_iommu;
        }
 
-       check_tylersburg_isoch();
 
        /*
         * If we copied translations from a previous kernel in the kdump
@@ -4868,10 +4869,13 @@ int __init intel_iommu_init(void)
 
        init_iommu_pm_ops();
 
-       for_each_active_iommu(iommu, drhd)
-               iommu->iommu_dev = iommu_device_create(NULL, iommu,
-                                                      intel_iommu_groups,
-                                                      "%s", iommu->name);
+       for_each_active_iommu(iommu, drhd) {
+               iommu_device_sysfs_add(&iommu->iommu, NULL,
+                                      intel_iommu_groups,
+                                      "%s", iommu->name);
+               iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+               iommu_device_register(&iommu->iommu);
+       }
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        bus_register_notifier(&pci_bus_type, &device_nb);
@@ -5193,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev)
        if (!iommu)
                return -ENODEV;
 
-       iommu_device_link(iommu->iommu_dev, dev);
+       iommu_device_link(&iommu->iommu, dev);
 
        group = iommu_group_get_for_dev(dev);
 
@@ -5215,7 +5219,7 @@ static void intel_iommu_remove_device(struct device *dev)
 
        iommu_group_remove_device(dev);
 
-       iommu_device_unlink(iommu->iommu_dev, dev);
+       iommu_device_unlink(&iommu->iommu, dev);
 }
 
 static void intel_iommu_get_resv_regions(struct device *device,
@@ -5386,7 +5390,7 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
 }
 #endif /* CONFIG_INTEL_IOMMU_SVM */
 
-static const struct iommu_ops intel_iommu_ops = {
+const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
        .domain_free            = intel_iommu_domain_free,
index 39b2d9127dbf80f49a8432610da217c9dabb042e..c58351ed61c14309c7a72346bd56f659a7039637 100644 (file)
@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void)
 postcore_initcall(iommu_dev_init);
 
 /*
- * Create an IOMMU device and return a pointer to it.  IOMMU specific
- * attributes can be provided as an attribute group, allowing a unique
- * namespace per IOMMU type.
+ * Init the struct device for the IOMMU. IOMMU specific attributes can
+ * be provided as an attribute group, allowing a unique namespace per
+ * IOMMU type.
  */
-struct device *iommu_device_create(struct device *parent, void *drvdata,
-                                  const struct attribute_group **groups,
-                                  const char *fmt, ...)
+int iommu_device_sysfs_add(struct iommu_device *iommu,
+                          struct device *parent,
+                          const struct attribute_group **groups,
+                          const char *fmt, ...)
 {
-       struct device *dev;
        va_list vargs;
        int ret;
 
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return ERR_PTR(-ENOMEM);
+       device_initialize(&iommu->dev);
 
-       device_initialize(dev);
-
-       dev->class = &iommu_class;
-       dev->parent = parent;
-       dev->groups = groups;
-       dev_set_drvdata(dev, drvdata);
+       iommu->dev.class = &iommu_class;
+       iommu->dev.parent = parent;
+       iommu->dev.groups = groups;
 
        va_start(vargs, fmt);
-       ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
+       ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
        va_end(vargs);
        if (ret)
                goto error;
 
-       ret = device_add(dev);
+       ret = device_add(&iommu->dev);
        if (ret)
                goto error;
 
-       return dev;
+       return 0;
 
 error:
-       put_device(dev);
-       return ERR_PTR(ret);
+       put_device(&iommu->dev);
+       return ret;
 }
 
-void iommu_device_destroy(struct device *dev)
+void iommu_device_sysfs_remove(struct iommu_device *iommu)
 {
-       if (!dev || IS_ERR(dev))
-               return;
-
-       device_unregister(dev);
+       device_unregister(&iommu->dev);
 }
-
 /*
  * IOMMU drivers can indicate a device is managed by a given IOMMU using
  * this interface.  A link to the device will be created in the "devices"
  * directory of the IOMMU device in sysfs and an "iommu" link will be
  * created under the linked device, pointing back at the IOMMU device.
  */
-int iommu_device_link(struct device *dev, struct device *link)
+int iommu_device_link(struct iommu_device *iommu, struct device *link)
 {
        int ret;
 
-       if (!dev || IS_ERR(dev))
+       if (!iommu || IS_ERR(iommu))
                return -ENODEV;
 
-       ret = sysfs_add_link_to_group(&dev->kobj, "devices",
+       ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
                                      &link->kobj, dev_name(link));
        if (ret)
                return ret;
 
-       ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu");
+       ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
        if (ret)
-               sysfs_remove_link_from_group(&dev->kobj, "devices",
+               sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
                                             dev_name(link));
 
        return ret;
 }
 
-void iommu_device_unlink(struct device *dev, struct device *link)
+void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
 {
-       if (!dev || IS_ERR(dev))
+       if (!iommu || IS_ERR(iommu))
                return;
 
        sysfs_remove_link(&link->kobj, "iommu");
-       sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link));
+       sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
 }
index c37d701ddaa2bf9683ca14b065163586cedb4c3e..8ea14f41a979fd4e72e3a6093e5fa8d2a0eff24a 100644 (file)
@@ -55,7 +55,7 @@ struct iommu_group {
        struct iommu_domain *domain;
 };
 
-struct iommu_device {
+struct group_device {
        struct list_head list;
        struct device *dev;
        char *name;
@@ -83,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name =              \
 #define to_iommu_group(_kobj)          \
        container_of(_kobj, struct iommu_group, kobj)
 
+static LIST_HEAD(iommu_device_list);
+static DEFINE_SPINLOCK(iommu_device_lock);
+
+int iommu_device_register(struct iommu_device *iommu)
+{
+       spin_lock(&iommu_device_lock);
+       list_add_tail(&iommu->list, &iommu_device_list);
+       spin_unlock(&iommu_device_lock);
+
+       return 0;
+}
+
+void iommu_device_unregister(struct iommu_device *iommu)
+{
+       spin_lock(&iommu_device_lock);
+       list_del(&iommu->list);
+       spin_unlock(&iommu_device_lock);
+}
+
 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
                                                 unsigned type);
 static int __iommu_attach_device(struct iommu_domain *domain,
@@ -218,7 +237,7 @@ iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
 int iommu_get_group_resv_regions(struct iommu_group *group,
                                 struct list_head *head)
 {
-       struct iommu_device *device;
+       struct group_device *device;
        int ret = 0;
 
        mutex_lock(&group->mutex);
@@ -511,7 +530,7 @@ out:
 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 {
        int ret, i = 0;
-       struct iommu_device *device;
+       struct group_device *device;
 
        device = kzalloc(sizeof(*device), GFP_KERNEL);
        if (!device)
@@ -520,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
        device->dev = dev;
 
        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
-       if (ret) {
-               kfree(device);
-               return ret;
-       }
+       if (ret)
+               goto err_free_device;
 
        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 rename:
        if (!device->name) {
-               sysfs_remove_link(&dev->kobj, "iommu_group");
-               kfree(device);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_remove_link;
        }
 
        ret = sysfs_create_link_nowarn(group->devices_kobj,
                                       &dev->kobj, device->name);
        if (ret) {
-               kfree(device->name);
                if (ret == -EEXIST && i >= 0) {
                        /*
                         * Account for the slim chance of collision
                         * and append an instance to the name.
                         */
+                       kfree(device->name);
                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
                                                 kobject_name(&dev->kobj), i++);
                        goto rename;
                }
-
-               sysfs_remove_link(&dev->kobj, "iommu_group");
-               kfree(device);
-               return ret;
+               goto err_free_name;
        }
 
        kobject_get(group->devices_kobj);
@@ -561,8 +574,10 @@ rename:
        mutex_lock(&group->mutex);
        list_add_tail(&device->list, &group->devices);
        if (group->domain)
-               __iommu_attach_device(group->domain, dev);
+               ret = __iommu_attach_device(group->domain, dev);
        mutex_unlock(&group->mutex);
+       if (ret)
+               goto err_put_group;
 
        /* Notify any listeners about change to group. */
        blocking_notifier_call_chain(&group->notifier,
@@ -573,6 +588,21 @@ rename:
        pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
 
        return 0;
+
+err_put_group:
+       mutex_lock(&group->mutex);
+       list_del(&device->list);
+       mutex_unlock(&group->mutex);
+       dev->iommu_group = NULL;
+       kobject_put(group->devices_kobj);
+err_free_name:
+       kfree(device->name);
+err_remove_link:
+       sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+       kfree(device);
+       pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_group_add_device);
 
@@ -586,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
 void iommu_group_remove_device(struct device *dev)
 {
        struct iommu_group *group = dev->iommu_group;
-       struct iommu_device *tmp_device, *device = NULL;
+       struct group_device *tmp_device, *device = NULL;
 
        pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
 
@@ -621,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 
 static int iommu_group_device_count(struct iommu_group *group)
 {
-       struct iommu_device *entry;
+       struct group_device *entry;
        int ret = 0;
 
        list_for_each_entry(entry, &group->devices, list)
@@ -644,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group)
 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
                                      int (*fn)(struct device *, void *))
 {
-       struct iommu_device *device;
+       struct group_device *device;
        int ret = 0;
 
        list_for_each_entry(device, &group->devices, list) {
@@ -1783,43 +1813,18 @@ out:
        return ret;
 }
 
-struct iommu_instance {
-       struct list_head list;
-       struct fwnode_handle *fwnode;
-       const struct iommu_ops *ops;
-};
-static LIST_HEAD(iommu_instance_list);
-static DEFINE_SPINLOCK(iommu_instance_lock);
-
-void iommu_register_instance(struct fwnode_handle *fwnode,
-                            const struct iommu_ops *ops)
-{
-       struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
-       if (WARN_ON(!iommu))
-               return;
-
-       of_node_get(to_of_node(fwnode));
-       INIT_LIST_HEAD(&iommu->list);
-       iommu->fwnode = fwnode;
-       iommu->ops = ops;
-       spin_lock(&iommu_instance_lock);
-       list_add_tail(&iommu->list, &iommu_instance_list);
-       spin_unlock(&iommu_instance_lock);
-}
-
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 {
-       struct iommu_instance *instance;
        const struct iommu_ops *ops = NULL;
+       struct iommu_device *iommu;
 
-       spin_lock(&iommu_instance_lock);
-       list_for_each_entry(instance, &iommu_instance_list, list)
-               if (instance->fwnode == fwnode) {
-                       ops = instance->ops;
+       spin_lock(&iommu_device_lock);
+       list_for_each_entry(iommu, &iommu_device_list, list)
+               if (iommu->fwnode == fwnode) {
+                       ops = iommu->ops;
                        break;
                }
-       spin_unlock(&iommu_instance_lock);
+       spin_unlock(&iommu_device_lock);
        return ops;
 }
 
index 080beca0197dcec22802dfc7fc0aebbbfec3c023..b7268a14184f220bef244f38bfcc2d13900c443a 100644 (file)
@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
        else {
                struct rb_node *prev_node = rb_prev(iovad->cached32_node);
                struct iova *curr_iova =
-                       container_of(iovad->cached32_node, struct iova, node);
+                       rb_entry(iovad->cached32_node, struct iova, node);
                *limit_pfn = curr_iova->pfn_lo - 1;
                return prev_node;
        }
@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
        if (!iovad->cached32_node)
                return;
        curr = iovad->cached32_node;
-       cached_iova = container_of(curr, struct iova, node);
+       cached_iova = rb_entry(curr, struct iova, node);
 
        if (free->pfn_lo >= cached_iova->pfn_lo) {
                struct rb_node *node = rb_next(&free->node);
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
 
                /* only cache if it's below 32bit pfn */
                if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
        curr = __get_cached_rbnode(iovad, &limit_pfn);
        prev = curr;
        while (curr) {
-               struct iova *curr_iova = container_of(curr, struct iova, node);
+               struct iova *curr_iova = rb_entry(curr, struct iova, node);
 
                if (limit_pfn < curr_iova->pfn_lo)
                        goto move_left;
@@ -171,8 +171,7 @@ move_left:
 
                /* Figure out where to put new node */
                while (*entry) {
-                       struct iova *this = container_of(*entry,
-                                                       struct iova, node);
+                       struct iova *this = rb_entry(*entry, struct iova, node);
                        parent = *entry;
 
                        if (new->pfn_lo < this->pfn_lo)
@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
        struct rb_node **new = &(root->rb_node), *parent = NULL;
        /* Figure out where to put new node */
        while (*new) {
-               struct iova *this = container_of(*new, struct iova, node);
+               struct iova *this = rb_entry(*new, struct iova, node);
 
                parent = *new;
 
@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
        assert_spin_locked(&iovad->iova_rbtree_lock);
 
        while (node) {
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
 
                /* If pfn falls within iova's range, return iova */
                if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
        node = rb_first(&iovad->rbroot);
        while (node) {
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
 
                rb_erase(node, &iovad->rbroot);
                free_iova_mem(iova);
@@ -477,7 +476,7 @@ static int
 __is_range_overlap(struct rb_node *node,
        unsigned long pfn_lo, unsigned long pfn_hi)
 {
-       struct iova *iova = container_of(node, struct iova, node);
+       struct iova *iova = rb_entry(node, struct iova, node);
 
        if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
                return 1;
@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
        spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
        for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
                if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
-                       iova = container_of(node, struct iova, node);
+                       iova = rb_entry(node, struct iova, node);
                        __adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
                        if ((pfn_lo >= iova->pfn_lo) &&
                                (pfn_hi <= iova->pfn_hi))
@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
 
        spin_lock_irqsave(&from->iova_rbtree_lock, flags);
        for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
-               struct iova *iova = container_of(node, struct iova, node);
+               struct iova *iova = rb_entry(node, struct iova, node);
                struct iova *new_iova;
 
                new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
index ace331da6459473685016aa8ac53601fe9c8ca84..b7e14ee863f92446997a66fc4b7532f1b8d93355 100644 (file)
@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
        domain->cfg.ias = 32;
        domain->cfg.oas = 40;
        domain->cfg.tlb = &ipmmu_gather_ops;
+       domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
+       domain->io_domain.geometry.force_aperture = true;
        /*
         * TODO: Add support for coherent walk through CCI with DVM and remove
         * cache handling. For now, delegate it to the io-pgtable code.
index b09692bb5b0a209c321401253a853c652ed374e2..d0448353d5011dae8376f43569e169c8051001db 100644 (file)
@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
        return 0;
 }
 
+/* Must be called under msm_iommu_lock */
+static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
+{
+       struct msm_iommu_dev *iommu, *ret = NULL;
+       struct msm_iommu_ctx_dev *master;
+
+       list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+               master = list_first_entry(&iommu->ctx_list,
+                                         struct msm_iommu_ctx_dev,
+                                         list);
+               if (master->of_node == dev->of_node) {
+                       ret = iommu;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static int msm_iommu_add_device(struct device *dev)
+{
+       struct msm_iommu_dev *iommu;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&msm_iommu_lock, flags);
+
+       iommu = find_iommu_for_dev(dev);
+       if (iommu)
+               iommu_device_link(&iommu->iommu, dev);
+       else
+               ret = -ENODEV;
+
+       spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+       return ret;
+}
+
+static void msm_iommu_remove_device(struct device *dev)
+{
+       struct msm_iommu_dev *iommu;
+       unsigned long flags;
+
+       spin_lock_irqsave(&msm_iommu_lock, flags);
+
+       iommu = find_iommu_for_dev(dev);
+       if (iommu)
+               iommu_device_unlink(&iommu->iommu, dev);
+
+       spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
+
 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
        int ret = 0;
@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = {
        .unmap = msm_iommu_unmap,
        .map_sg = default_iommu_map_sg,
        .iova_to_phys = msm_iommu_iova_to_phys,
+       .add_device = msm_iommu_add_device,
+       .remove_device = msm_iommu_remove_device,
        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
        .of_xlate = qcom_iommu_of_xlate,
 };
@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = {
 static int msm_iommu_probe(struct platform_device *pdev)
 {
        struct resource *r;
+       resource_size_t ioaddr;
        struct msm_iommu_dev *iommu;
        int ret, par, val;
 
@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
                ret = PTR_ERR(iommu->base);
                goto fail;
        }
+       ioaddr = r->start;
 
        iommu->irq = platform_get_irq(pdev, 0);
        if (iommu->irq < 0) {
@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev)
        }
 
        list_add(&iommu->dev_node, &qcom_iommu_devices);
-       of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+       ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
+                                    "msm-smmu.%pa", &ioaddr);
+       if (ret) {
+               pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
+               goto fail;
+       }
+
+       iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
+       iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
+
+       ret = iommu_device_register(&iommu->iommu);
+       if (ret) {
+               pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
+               goto fail;
+       }
 
        pr_info("device mapped at %p, irq %d with %d ctx banks\n",
                iommu->base, iommu->irq, iommu->ncb);
index 4ca25d50d679687d81aaac8b0913e3e1a18f6768..ae92d2779c420ad46aaee5afe3a13c3041f78b51 100644 (file)
@@ -19,6 +19,7 @@
 #define MSM_IOMMU_H
 
 #include <linux/interrupt.h>
+#include <linux/iommu.h>
 #include <linux/clk.h>
 
 /* Sharability attributes of MSM IOMMU mappings */
@@ -68,6 +69,8 @@ struct msm_iommu_dev {
        struct list_head dom_node;
        struct list_head ctx_list;
        DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
+
+       struct iommu_device iommu;
 };
 
 /**
index 1479c76ece9ec31793ec4d0f78a5611d3f09a6cb..5d14cd15198db5cb6361d060abf208260c086ebe 100644 (file)
@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
 
 static int mtk_iommu_add_device(struct device *dev)
 {
+       struct mtk_iommu_data *data;
        struct iommu_group *group;
 
        if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
                return -ENODEV; /* Not a iommu client device */
 
+       data = dev->iommu_fwspec->iommu_priv;
+       iommu_device_link(&data->iommu, dev);
+
        group = iommu_group_get_for_dev(dev);
        if (IS_ERR(group))
                return PTR_ERR(group);
@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev)
 
 static void mtk_iommu_remove_device(struct device *dev)
 {
+       struct mtk_iommu_data *data;
+
        if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
                return;
 
+       data = dev->iommu_fwspec->iommu_priv;
+       iommu_device_unlink(&data->iommu, dev);
+
        iommu_group_remove_device(dev);
        iommu_fwspec_free(dev);
 }
@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        struct mtk_iommu_data   *data;
        struct device           *dev = &pdev->dev;
        struct resource         *res;
+       resource_size_t         ioaddr;
        struct component_match  *match = NULL;
        void                    *protect;
        int                     i, larb_nr, ret;
@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        data->base = devm_ioremap_resource(dev, res);
        if (IS_ERR(data->base))
                return PTR_ERR(data->base);
+       ioaddr = res->start;
 
        data->irq = platform_get_irq(pdev, 0);
        if (data->irq < 0)
@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
+                                    "mtk-iommu.%pa", &ioaddr);
+       if (ret)
+               return ret;
+
+       iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+       iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
+
+       ret = iommu_device_register(&data->iommu);
+       if (ret)
+               return ret;
+
        if (!iommu_present(&platform_bus_type))
                bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
 
@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
 {
        struct mtk_iommu_data *data = platform_get_drvdata(pdev);
 
+       iommu_device_sysfs_remove(&data->iommu);
+       iommu_device_unregister(&data->iommu);
+
        if (iommu_present(&platform_bus_type))
                bus_set_iommu(&platform_bus_type, NULL);
 
@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np)
                return ret;
        }
 
-       of_iommu_set_ops(np, &mtk_iommu_ops);
        return 0;
 }
 
index 50177f738e4e012251795149241ecaff890dbbd7..2a28eadeea0ec3cf2ad363f7076f8555245520a2 100644 (file)
@@ -47,6 +47,8 @@ struct mtk_iommu_data {
        struct iommu_group              *m4u_group;
        struct mtk_smi_iommu            smi_imu;      /* SMI larb iommu info */
        bool                            enable_4GB;
+
+       struct iommu_device             iommu;
 };
 
 static inline int compare_of(struct device *dev, void *data)
index 0f57ddc4ecc274cbbe2be26618ec9fa90234ea9d..2683e9fc0dcf5cc0886034b7fccafb8861d958f1 100644 (file)
@@ -127,7 +127,7 @@ static const struct iommu_ops
                           "iommu-map-mask", &iommu_spec.np, iommu_spec.args))
                return NULL;
 
-       ops = of_iommu_get_ops(iommu_spec.np);
+       ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
        if (!ops || !ops->of_xlate ||
            iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
            ops->of_xlate(&pdev->dev, &iommu_spec))
@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                                           "#iommu-cells", idx,
                                           &iommu_spec)) {
                np = iommu_spec.np;
-               ops = of_iommu_get_ops(np);
+               ops = iommu_ops_from_fwnode(&np->fwnode);
 
                if (!ops || !ops->of_xlate ||
                    iommu_fwspec_init(dev, &np->fwnode, ops) ||
index 23909804ffb840d3187f21f67180a634a769425e..0def99590d162ebcfb86a16a6b9d5adf96f19cb6 100644 (file)
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
                if (intmask & SDHCI_INT_RETUNE)
                        mmc_retune_needed(host->mmc);
 
-               if (intmask & SDHCI_INT_CARD_INT) {
+               if ((intmask & SDHCI_INT_CARD_INT) &&
+                   (host->ier & SDHCI_INT_CARD_INT)) {
                        sdhci_enable_sdio_irq_nolock(host, false);
                        host->thread_isr |= SDHCI_INT_CARD_INT;
                        result = IRQ_WAKE_THREAD;
index c12d2618eebf76397b2e71eaeb6f2fafa938fea6..3872ab96b80a39eecbb1d0b8150a2e8288915e46 100644 (file)
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
                if (skb == NULL)
                        break;
                np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+               if (pci_dma_mapping_error(np->pci_dev,
+                                         np->rx_info[i].mapping)) {
+                       dev_kfree_skb(skb);
+                       np->rx_info[i].skb = NULL;
+                       break;
+               }
                /* Grrr, we cannot offset to correctly align the IP header. */
                np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
        }
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct netdev_private *np = netdev_priv(dev);
        unsigned int entry;
+       unsigned int prev_tx;
        u32 status;
-       int i;
+       int i, j;
 
        /*
         * be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
        }
 #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
 
+       prev_tx = np->cur_tx;
        entry = np->cur_tx % TX_RING_SIZE;
        for (i = 0; i < skb_num_frags(skb); i++) {
                int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                                               skb_frag_size(this_frag),
                                               PCI_DMA_TODEVICE);
                }
+               if (pci_dma_mapping_error(np->pci_dev,
+                                         np->tx_info[entry].mapping)) {
+                       dev->stats.tx_dropped++;
+                       goto err_out;
+               }
 
                np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
                np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                netif_stop_queue(dev);
 
        return NETDEV_TX_OK;
-}
 
+err_out:
+       entry = prev_tx % TX_RING_SIZE;
+       np->tx_info[entry].skb = NULL;
+       if (i > 0) {
+               pci_unmap_single(np->pci_dev,
+                                np->tx_info[entry].mapping,
+                                skb_first_frag_len(skb),
+                                PCI_DMA_TODEVICE);
+               np->tx_info[entry].mapping = 0;
+               entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+               for (j = 1; j < i; j++) {
+                       pci_unmap_single(np->pci_dev,
+                                        np->tx_info[entry].mapping,
+                                        skb_frag_size(
+                                               &skb_shinfo(skb)->frags[j-1]),
+                                        PCI_DMA_TODEVICE);
+                       entry++;
+               }
+       }
+       dev_kfree_skb_any(skb);
+       np->cur_tx = prev_tx;
+       return NETDEV_TX_OK;
+}
 
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
                                break;  /* Better luck next round. */
                        np->rx_info[entry].mapping =
                                pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       if (pci_dma_mapping_error(np->pci_dev,
+                                               np->rx_info[entry].mapping)) {
+                               dev_kfree_skb(skb);
+                               np->rx_info[entry].skb = NULL;
+                               break;
+                       }
                        np->rx_ring[entry].rxaddr =
                                cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
                }
index c0fb80acc2dad4b91d3b3cb8198be50dc9597fbd..baba2db9d9c25988da94cb323e5d1a6832a12b51 100644 (file)
 #define DEFAULT_RX_RING_SIZE   512 /* must be power of 2 */
 #define MIN_RX_RING_SIZE       64
 #define MAX_RX_RING_SIZE       8192
-#define RX_RING_BYTES(bp)      (sizeof(struct macb_dma_desc)   \
+#define RX_RING_BYTES(bp)      (macb_dma_desc_get_size(bp)     \
                                 * (bp)->rx_ring_size)
 
 #define DEFAULT_TX_RING_SIZE   512 /* must be power of 2 */
 #define MIN_TX_RING_SIZE       64
 #define MAX_TX_RING_SIZE       4096
-#define TX_RING_BYTES(bp)      (sizeof(struct macb_dma_desc)   \
+#define TX_RING_BYTES(bp)      (macb_dma_desc_get_size(bp)     \
                                 * (bp)->tx_ring_size)
 
 /* level of occupied TX descriptors under which we wake up TX process */
  */
 #define MACB_HALT_TIMEOUT      1230
 
+/* DMA buffer descriptor might be different size
+ * depends on hardware configuration.
+ */
+static unsigned int macb_dma_desc_get_size(struct macb *bp)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#endif
+       return sizeof(struct macb_dma_desc);
+}
+
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       /* Dma buffer descriptor is 4 words length (instead of 2 words)
+        * for 64b GEM.
+        */
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               idx <<= 1;
+#endif
+       return idx;
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
+{
+       return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+}
+#endif
+
 /* Ring buffer accessors */
 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 {
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
                                          unsigned int index)
 {
-       return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
+       index = macb_tx_ring_wrap(queue->bp, index);
+       index = macb_adj_dma_desc_idx(queue->bp, index);
+       return &queue->tx_ring[index];
 }
 
 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
        dma_addr_t offset;
 
        offset = macb_tx_ring_wrap(queue->bp, index) *
-                sizeof(struct macb_dma_desc);
+                       macb_dma_desc_get_size(queue->bp);
 
        return queue->tx_ring_dma + offset;
 }
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
 
 static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
 {
-       return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
+       index = macb_rx_ring_wrap(bp, index);
+       index = macb_adj_dma_desc_idx(bp, index);
+       return &bp->rx_ring[index];
 }
 
 static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
        }
 }
 
-static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
 {
-       desc->addr = (u32)addr;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       desc->addrh = (u32)(addr >> 32);
+       struct macb_dma_desc_64 *desc_64;
+
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+               desc_64 = macb_64b_desc(bp, desc);
+               desc_64->addrh = upper_32_bits(addr);
+       }
 #endif
+       desc->addr = lower_32_bits(addr);
+}
+
+static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+{
+       dma_addr_t addr = 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       struct macb_dma_desc_64 *desc_64;
+
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+               desc_64 = macb_64b_desc(bp, desc);
+               addr = ((u64)(desc_64->addrh) << 32);
+       }
+#endif
+       addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+       return addr;
 }
 
 static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
 
        /* Set end of TX queue */
        desc = macb_tx_desc(queue, 0);
-       macb_set_addr(desc, 0);
+       macb_set_addr(bp, desc, 0);
        desc->ctrl = MACB_BIT(TX_USED);
 
        /* Make descriptor updates visible to hardware */
        wmb();
 
        /* Reinitialize the TX desc queue */
-       queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+       queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
        /* Make TX ring reflect state of hardware */
        queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
        unsigned int            entry;
        struct sk_buff          *skb;
        dma_addr_t              paddr;
+       struct macb_dma_desc *desc;
 
        while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
                          bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
                rmb();
 
                bp->rx_prepared_head++;
+               desc = macb_rx_desc(bp, entry);
 
                if (!bp->rx_skbuff[entry]) {
                        /* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
 
                        if (entry == bp->rx_ring_size - 1)
                                paddr |= MACB_BIT(RX_WRAP);
-                       macb_set_addr(&(bp->rx_ring[entry]), paddr);
-                       bp->rx_ring[entry].ctrl = 0;
+                       macb_set_addr(bp, desc, paddr);
+                       desc->ctrl = 0;
 
                        /* properly align Ethernet header */
                        skb_reserve(skb, NET_IP_ALIGN);
                } else {
-                       bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
-                       bp->rx_ring[entry].ctrl = 0;
+                       desc->addr &= ~MACB_BIT(RX_USED);
+                       desc->ctrl = 0;
                }
        }
 
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
                bool rxused;
 
                entry = macb_rx_ring_wrap(bp, bp->rx_tail);
-               desc = &bp->rx_ring[entry];
+               desc = macb_rx_desc(bp, entry);
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
                rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
-               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               addr |= ((u64)(desc->addrh) << 32);
-#endif
+               addr = macb_get_addr(bp, desc);
                ctrl = desc->ctrl;
 
                if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 static inline void macb_init_rx_ring(struct macb *bp)
 {
        dma_addr_t addr;
+       struct macb_dma_desc *desc = NULL;
        int i;
 
        addr = bp->rx_buffers_dma;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               bp->rx_ring[i].addr = addr;
-               bp->rx_ring[i].ctrl = 0;
+               desc = macb_rx_desc(bp, i);
+               macb_set_addr(bp, desc, addr);
+               desc->ctrl = 0;
                addr += bp->rx_buffer_size;
        }
-       bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+       desc->addr |= MACB_BIT(RX_WRAP);
        bp->rx_tail = 0;
 }
 
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
 
        for (tail = bp->rx_tail; budget > 0; tail++) {
                struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
-               u32 addr, ctrl;
+               u32 ctrl;
 
                /* Make hw descriptor updates visible to CPU */
                rmb();
 
-               addr = desc->addr;
                ctrl = desc->ctrl;
 
-               if (!(addr & MACB_BIT(RX_USED)))
+               if (!(desc->addr & MACB_BIT(RX_USED)))
                        break;
 
                if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
        i = tx_head;
        entry = macb_tx_ring_wrap(bp, i);
        ctrl = MACB_BIT(TX_USED);
-       desc = &queue->tx_ring[entry];
+       desc = macb_tx_desc(queue, entry);
        desc->ctrl = ctrl;
 
        if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
                i--;
                entry = macb_tx_ring_wrap(bp, i);
                tx_skb = &queue->tx_skb[entry];
-               desc = &queue->tx_ring[entry];
+               desc = macb_tx_desc(queue, entry);
 
                ctrl = (u32)tx_skb->size;
                if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
                        ctrl |= MACB_BF(MSS_MFS, mss_mfs);
 
                /* Set TX buffer descriptor */
-               macb_set_addr(desc, tx_skb->mapping);
+               macb_set_addr(bp, desc, tx_skb->mapping);
                /* desc->addr must be visible to hardware before clearing
                 * 'TX_USED' bit in desc->ctrl.
                 */
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
                if (!skb)
                        continue;
 
-               desc = &bp->rx_ring[i];
-               addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               addr |= ((u64)(desc->addrh) << 32);
-#endif
+               desc = macb_rx_desc(bp, i);
+               addr = macb_get_addr(bp, desc);
+
                dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
                                 DMA_FROM_DEVICE);
                dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
 static void gem_init_rings(struct macb *bp)
 {
        struct macb_queue *queue;
+       struct macb_dma_desc *desc = NULL;
        unsigned int q;
        int i;
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                for (i = 0; i < bp->tx_ring_size; i++) {
-                       queue->tx_ring[i].addr = 0;
-                       queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+                       desc = macb_tx_desc(queue, i);
+                       macb_set_addr(bp, desc, 0);
+                       desc->ctrl = MACB_BIT(TX_USED);
                }
-               queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+               desc->ctrl |= MACB_BIT(TX_WRAP);
                queue->tx_head = 0;
                queue->tx_tail = 0;
        }
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
 static void macb_init_rings(struct macb *bp)
 {
        int i;
+       struct macb_dma_desc *desc = NULL;
 
        macb_init_rx_ring(bp);
 
        for (i = 0; i < bp->tx_ring_size; i++) {
-               bp->queues[0].tx_ring[i].addr = 0;
-               bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+               desc = macb_tx_desc(&bp->queues[0], i);
+               macb_set_addr(bp, desc, 0);
+               desc->ctrl = MACB_BIT(TX_USED);
        }
        bp->queues[0].tx_head = 0;
        bp->queues[0].tx_tail = 0;
-       bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+       desc->ctrl |= MACB_BIT(TX_WRAP);
 }
 
 static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
                        dmacfg &= ~GEM_BIT(TXCOEN);
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               dmacfg |= GEM_BIT(ADDR64);
+               if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                       dmacfg |= GEM_BIT(ADDR64);
 #endif
                netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
                           dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
        macb_configure_dma(bp);
 
        /* Initialize TX and RX buffers */
-       macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+       macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+               macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
 #endif
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-               queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+               queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-               queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+               if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                       queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
 #endif
 
                /* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
                        queue->IMR  = GEM_IMR(hw_q - 1);
                        queue->TBQP = GEM_TBQP(hw_q - 1);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-                       queue->TBQPH = GEM_TBQPH(hw_q -1);
+                       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                               queue->TBQPH = GEM_TBQPH(hw_q - 1);
 #endif
                } else {
                        /* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
                        queue->IMR  = MACB_IMR;
                        queue->TBQP = MACB_TBQP;
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-                       queue->TBQPH = MACB_TBQPH;
+                       if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+                               queue->TBQPH = MACB_TBQPH;
 #endif
                }
 
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
 static int at91ether_start(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
+       struct macb_dma_desc *desc;
        dma_addr_t addr;
        u32 ctl;
        int i;
 
        lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
                                         (AT91ETHER_MAX_RX_DESCR *
-                                         sizeof(struct macb_dma_desc)),
+                                         macb_dma_desc_get_size(lp)),
                                         &lp->rx_ring_dma, GFP_KERNEL);
        if (!lp->rx_ring)
                return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
        if (!lp->rx_buffers) {
                dma_free_coherent(&lp->pdev->dev,
                                  AT91ETHER_MAX_RX_DESCR *
-                                 sizeof(struct macb_dma_desc),
+                                 macb_dma_desc_get_size(lp),
                                  lp->rx_ring, lp->rx_ring_dma);
                lp->rx_ring = NULL;
                return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
 
        addr = lp->rx_buffers_dma;
        for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
-               lp->rx_ring[i].addr = addr;
-               lp->rx_ring[i].ctrl = 0;
+               desc = macb_rx_desc(lp, i);
+               macb_set_addr(lp, desc, addr);
+               desc->ctrl = 0;
                addr += AT91ETHER_MAX_RBUFF_SZ;
        }
 
        /* Set the Wrap bit on the last descriptor */
-       lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+       desc->addr |= MACB_BIT(RX_WRAP);
 
        /* Reset buffer index */
        lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
 
        dma_free_coherent(&lp->pdev->dev,
                          AT91ETHER_MAX_RX_DESCR *
-                         sizeof(struct macb_dma_desc),
+                         macb_dma_desc_get_size(lp),
                          lp->rx_ring, lp->rx_ring_dma);
        lp->rx_ring = NULL;
 
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
 static void at91ether_rx(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
+       struct macb_dma_desc *desc;
        unsigned char *p_recv;
        struct sk_buff *skb;
        unsigned int pktlen;
 
-       while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+       desc = macb_rx_desc(lp, lp->rx_tail);
+       while (desc->addr & MACB_BIT(RX_USED)) {
                p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
-               pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+               pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
                skb = netdev_alloc_skb(dev, pktlen + 2);
                if (skb) {
                        skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
                        lp->stats.rx_dropped++;
                }
 
-               if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+               if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
                        lp->stats.multicast++;
 
                /* reset ownership bit */
-               lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+               desc->addr &= ~MACB_BIT(RX_USED);
 
                /* wrap after last buffer */
                if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
                        lp->rx_tail = 0;
                else
                        lp->rx_tail++;
+
+               desc = macb_rx_desc(lp, lp->rx_tail);
        }
 }
 
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
        device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+       if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
                dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+               bp->hw_dma_cap = HW_DMA_CAP_64B;
+       } else
+               bp->hw_dma_cap = HW_DMA_CAP_32B;
 #endif
 
        spin_lock_init(&bp->lock);
index d67adad67be1c097a339d993a866597b4d202f4d..fc8550a5d47f75df540521c27f07f5c2d03995f9 100644 (file)
 /* Bitfields in DCFG6. */
 #define GEM_PBUF_LSO_OFFSET                    27
 #define GEM_PBUF_LSO_SIZE                      1
+#define GEM_DAW64_OFFSET                       23
+#define GEM_DAW64_SIZE                         1
 
 /* Constants for CLK */
 #define MACB_CLK_DIV8                          0
 struct macb_dma_desc {
        u32     addr;
        u32     ctrl;
+};
+
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       u32     addrh;
-       u32     resvd;
-#endif
+enum macb_hw_dma_cap {
+       HW_DMA_CAP_32B,
+       HW_DMA_CAP_64B,
 };
 
+struct macb_dma_desc_64 {
+       u32 addrh;
+       u32 resvd;
+};
+#endif
+
 /* DMA descriptor bitfields */
 #define MACB_RX_USED_OFFSET                    0
 #define MACB_RX_USED_SIZE                      1
@@ -874,6 +884,10 @@ struct macb {
        unsigned int            jumbo_max_len;
 
        u32                     wol;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       enum macb_hw_dma_cap hw_dma_cap;
+#endif
 };
 
 static inline bool macb_is_gem(struct macb *bp)
index 67befedef7098ddbde763738eb7940116efa283e..578c7f8f11bf23add2ac4d3c2263e371b4509136 100644 (file)
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
        int speed = 2;
 
        if (!xcv) {
-               dev_err(&xcv->pdev->dev,
-                       "XCV init not done, probe may have failed\n");
+               pr_err("XCV init not done, probe may have failed\n");
                return;
        }
 
index 1a7f8ad7b9c6111ea2f8839a5d28c82af1ef13a8..cd49a54c538d5202f1bb0cb632b8fdb306a66989 100644 (file)
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                status = -EPERM;
                goto err;
        }
-done:
+
+       /* Remember currently programmed MAC */
        ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
        dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
        return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
 {
        /* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
        if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-           check_privilege(adapter, BE_PRIV_FILTMGMT))
+           check_privilege(adapter, BE_PRIV_FILTMGMT)) {
                be_dev_mac_del(adapter, adapter->pmac_id[0]);
+               eth_zero_addr(adapter->dev_mac);
+       }
 
        be_clear_uc_list(adapter);
        be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
        if (status)
                return status;
 
-       /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
-       if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
-           check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+       /* Normally this condition usually true as the ->dev_mac is zeroed.
+        * But on BE3 VFs the initial MAC is pre-programmed by PF and
+        * subsequent be_dev_mac_add() can fail (after fresh boot)
+        */
+       if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+               int old_pmac_id = -1;
+
+               /* Remember old programmed MAC if any - can happen on BE3 VF */
+               if (!is_zero_ether_addr(adapter->dev_mac))
+                       old_pmac_id = adapter->pmac_id[0];
+
                status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
                if (status)
                        return status;
+
+               /* Delete the old programmed MAC as we successfully programmed
+                * a new MAC
+                */
+               if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+                       be_dev_mac_del(adapter, old_pmac_id);
+
                ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
        }
 
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
 
                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+               /* Initial MAC for BE3 VFs is already programmed by PF */
+               if (BEx_chip(adapter) && be_virtfn(adapter))
+                       memcpy(adapter->dev_mac, mac, ETH_ALEN);
        }
 
        return 0;
index c1b6716679208a69bc66f017a68e5b6e35c60064..957bfc220978479a5ccee32b58ae26d4236fe939 100644 (file)
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
                if (!rxb->page)
                        continue;
 
-               dma_unmap_single(rx_queue->dev, rxb->dma,
-                                PAGE_SIZE, DMA_FROM_DEVICE);
+               dma_unmap_page(rx_queue->dev, rxb->dma,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
                __free_page(rxb->page);
 
                rxb->page = NULL;
index c7e939945259dc876b66cfedd0d85f9d7e90a914..53daa6ca5d83b60f7ad8632694658922921f82f5 100644 (file)
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
        return -ETIMEDOUT;
 }
 
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
 {
        return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
                (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
index 0e8b7c44931f907ed881d093077e93b92ae0305d..8258d08acd8c2029a8bcb812dd5efd85d8c7b0f2 100644 (file)
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
                return;
 
        mlx4_stop_catas_poll(dev);
+       if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+           mlx4_is_slave(dev)) {
+               /* In mlx4_remove_one on a VF */
+               u32 slave_read =
+                       swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+               if (mlx4_comm_internal_err(slave_read)) {
+                       mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+                                __func__);
+                       mlx4_enter_error_state(dev->persist);
+               }
+       }
        mutex_lock(&intf_mutex);
 
        list_for_each_entry(intf, &intf_list, list)
index 88ee7d8a59231a47d6b7aca2006f9780dbefa578..086920b615af7180e891893ffd00928c0bd0238f 100644 (file)
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
 void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
 
 void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
 
 int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
                    enum mlx4_port_type *type);
index 3797cc7c1288078298ec655921f9fc9f804df97e..caa837e5e2b991fc3666776d2050fe20b1c6c7f6 100644 (file)
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
        if (cmd->cmdif_rev > CMD_IF_REV) {
                dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
                        CMD_IF_REV, cmd->cmdif_rev);
-               err = -ENOTSUPP;
+               err = -EOPNOTSUPP;
                goto err_free_page;
        }
 
index 951dbd58594dcd3b32b680f752c1105132d85ba8..d5ecb8f53fd43684f185d590c8dc5553a4f25ab4 100644 (file)
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
 int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
 
 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+                                   enum mlx5e_traffic_types tt);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
 
 static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 {
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 {
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 #else
 int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
index f0b460f47f2992caad4eec7ea0d655296a46e99c..0523ed47f597c715296c5ea843245625bf3dac62 100644 (file)
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
        int i;
 
        if (!MLX5_CAP_GEN(priv->mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
        for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
        int err;
 
        if (!MLX5_CAP_GEN(priv->mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        err = mlx5e_dbcnl_validate_ets(netdev, ets);
        if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
        struct mlx5_core_dev *mdev = priv->mdev;
        struct ieee_ets ets;
        struct ieee_pfc pfc;
-       int err = -ENOTSUPP;
+       int err = -EOPNOTSUPP;
        int i;
 
        if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
 
+       if (!MLX5_CAP_GEN(priv->mdev, ets)) {
+               netdev_err(netdev, "%s, ets is not supported\n", __func__);
+               return;
+       }
+
        if (priority >= CEE_DCBX_MAX_PRIO) {
                netdev_err(netdev,
                           "%s, priority is out of range\n", __func__);
index 5197817e4b2f8a6b24af61b4fb08c271e69d1a5f..bb67863aa361168a8566349ef356d9a991d411be 100644 (file)
@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
        if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
        coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
        int i;
 
        if (!MLX5_CAP_GEN(mdev, cq_moderation))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        mutex_lock(&priv->state_lock);
 
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 
 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
        void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-       int i;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+       int tt;
 
        MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-       mlx5e_build_tir_ctx_hash(tirc, priv);
 
-       for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
-               mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+       for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+               memset(tirc, 0, ctxlen);
+               mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+               mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+       }
 }
 
 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+       bool hash_changed = false;
        void *in;
 
        if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
                mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
        }
 
-       if (key)
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+           hfunc != priv->params.rss_hfunc) {
+               priv->params.rss_hfunc = hfunc;
+               hash_changed = true;
+       }
+
+       if (key) {
                memcpy(priv->params.toeplitz_hash_key, key,
                       sizeof(priv->params.toeplitz_hash_key));
+               hash_changed = hash_changed ||
+                              priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+       }
 
-       if (hfunc != ETH_RSS_HASH_NO_CHANGE)
-               priv->params.rss_hfunc = hfunc;
-
-       mlx5e_modify_tirs_hash(priv, in, inlen);
+       if (hash_changed)
+               mlx5e_modify_tirs_hash(priv, in, inlen);
 
        mutex_unlock(&priv->state_lock);
 
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        u32 mlx5_wol_mode;
 
        if (!wol_supported)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (wol->wolopts & ~wol_supported)
                return -EINVAL;
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
 
        if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
            !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (!rx_mode_changed)
                return 0;
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
        bool reset;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
                netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
index 1fe80de5d68f1f3cf09c6e32530cd32114580051..a0e5a69402b30a349b196eaa72ce1a413b5479b2 100644 (file)
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
                                               MLX5_FLOW_NAMESPACE_KERNEL);
 
        if (!priv->fs.ns)
-               return -EINVAL;
+               return -EOPNOTSUPP;
 
        err = mlx5e_arfs_create_tables(priv);
        if (err) {
index d088effd7160355849faacead1326f2198d12e8d..f33f72d0237c1bafc702f4066dab31ab22963a47 100644 (file)
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
        ns = mlx5_get_flow_namespace(priv->mdev,
                                     MLX5_FLOW_NAMESPACE_ETHTOOL);
        if (!ns)
-               return ERR_PTR(-ENOTSUPP);
+               return ERR_PTR(-EOPNOTSUPP);
 
        table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
                                                       flow_table_properties_nic_receive.log_max_ft_size)),
index 2b7dd315020cd9e1a21b28643621122695cd06dd..f14ca3385fdd683b12f434e289cc8e264040c1ed 100644 (file)
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 }
 
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+                                   enum mlx5e_traffic_types tt)
 {
+       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                                MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
        MLX5_SET(tirc, tirc, rx_hash_fn,
                 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
        if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
                MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
                memcpy(rss_key, priv->params.toeplitz_hash_key, len);
        }
+
+       switch (tt) {
+       case MLX5E_TT_IPV4_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+
+       case MLX5E_TT_IPV6:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+       default:
+               WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+       }
 }
 
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
                                      enum mlx5e_traffic_types tt)
 {
-       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
        MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
-                                MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
        MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-       mlx5e_build_tir_ctx_hash(tirc, priv);
-
-       switch (tt) {
-       case MLX5E_TT_IPV4_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-
-       case MLX5E_TT_IPV6:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-       default:
-               WARN_ONCE(true,
-                         "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
-       }
+       mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
 }
 
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 {
        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
            !MLX5_CAP_GEN(mdev, nic_flow_table) ||
            !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
                               < 3) {
                mlx5_core_warn(mdev,
                               "Not creating net device, some required device capabilities are missing\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
        if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
                mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
index 46bef6a26a8cdbebf268b6275271367c4109a77d..c5282b6aba8baf6a3c8f345c55764110cdfd6ff5 100644 (file)
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
                                   __be32 *saddr,
                                   int *out_ttl)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct rtable *rt;
        struct neighbour *n = NULL;
        int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
 #else
        return -EOPNOTSUPP;
 #endif
-
-       if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-               pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
-               ip_rt_put(rt);
-               return -EOPNOTSUPP;
-       }
+       /* if the egress device isn't on the same HW e-switch, we use the uplink */
+       if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
+               *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+       else
+               *out_dev = rt->dst.dev;
 
        ttl = ip4_dst_hoplimit(&rt->dst);
        n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        *out_n = n;
        *saddr = fl4->saddr;
        *out_ttl = ttl;
-       *out_dev = rt->dst.dev;
 
        return 0;
 }
index f14d9c9ba77394b83aea50564afd3c762613467a..d0c8bf014453ea38736182c03ba7b2d9c5bcd4d7 100644 (file)
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
 
        if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
            !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
                  vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
        if (!root_ns) {
                esw_warn(dev, "Failed to get FDB flow namespace\n");
-               return -ENOMEM;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
        if (!root_ns) {
                esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
-               return -EIO;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
        if (!root_ns) {
                esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
-               return -EIO;
+               return -EOPNOTSUPP;
        }
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
        if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
index 03293ed1cc22d2716ff5708dc2312b7291cc1899..595f7c7383b399440aedec593ae0fb0c37bb6748 100644 (file)
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
        return 0;
 
 out_notsupp:
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
        root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
        if (!root_ns) {
                esw_warn(dev, "Failed to get FDB flow namespace\n");
+               err = -EOPNOTSUPP;
                goto ns_err;
        }
 
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
        ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
        if (!ns) {
                esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
-               return -ENOMEM;
+               return -EOPNOTSUPP;
        }
 
        ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
                esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
                err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
                if (err1)
-                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+                       esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
        }
        if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
                if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
        int vport;
        int err;
 
+       /* disable PF RoCE so missed packets don't go through RoCE steering */
+       mlx5_dev_list_lock();
+       mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        err = esw_create_offloads_fdb_table(esw, nvports);
        if (err)
-               return err;
+               goto create_fdb_err;
 
        err = esw_create_offloads_table(esw);
        if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
                        goto err_reps;
        }
 
-       /* disable PF RoCE so missed packets don't go through RoCE steering */
-       mlx5_dev_list_lock();
-       mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-       mlx5_dev_list_unlock();
-
        return 0;
 
 err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
 
 create_ft_err:
        esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+       /* enable back PF RoCE */
+       mlx5_dev_list_lock();
+       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        return err;
 }
 
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
 {
        int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
 
-       /* enable back PF RoCE */
-       mlx5_dev_list_lock();
-       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-       mlx5_dev_list_unlock();
-
        mlx5_eswitch_disable_sriov(esw);
        err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
        if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
                        esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
        }
 
+       /* enable back PF RoCE */
+       mlx5_dev_list_lock();
+       mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+       mlx5_dev_list_unlock();
+
        return err;
 }
 
index c4478ecd8056e42de2c359eb7a2abfd9e6400090..b53fc85a2375778ddd02ac07d21d88b56c49e432 100644 (file)
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
                                                flow_table_properties_nic_receive.
                                                flow_modify_en);
        if (!atomic_mod_cap)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        opmod = 1;
 
        return  mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
index 0ac7a2fc916c438bc535b20d45964009747f0b33..6346a8f5883bcc911ef422cf572fd1891ddf73c9 100644 (file)
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
        struct mlx5_flow_table *ft;
 
        ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
-       if (!ns)
+       if (WARN_ON(!ns))
                return -EINVAL;
        ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
        if (IS_ERR(ft)) {
index d01e9f21d4691ea497aa7ea0666c83e330c078bb..3c315eb8d270f6f94ecaea2c8ee4d78ed1244658 100644 (file)
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
                return 0;
        }
 
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 
index d2ec9d232a70727df71d0c733f60c78a55415392..fd12e0a377a567c693c7f174d7762dd6071ff925 100644 (file)
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
        u32 out[MLX5_ST_SZ_DW(qtct_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
                                    MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
        u32 in[MLX5_ST_SZ_DW(qtct_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        memset(in, 0, sizeof(in));
        return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
index 269e4401c342d1375e70a40ba9905dddf9b65cef..7129c30a2ab477d23be1b8b8d34e7190618e0f9f 100644 (file)
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
        if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        in = mlx5_vzalloc(inlen);
        if (!in)
index be3c91c7f211d94ad7386b77de73676933a46dcd..5484fd726d5af7f5f10708c57d062b9992be655d 100644 (file)
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
 {
        void __iomem *ioaddr = hw->pcsr;
        u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+       u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
        int ret = 0;
 
+       /* Discard masked bits */
+       intr_status &= ~intr_mask;
+
        /* Not used events (e.g. MMC interrupts) are not handled. */
        if ((intr_status & GMAC_INT_STATUS_MMCTIS))
                x->mmc_tx_irq_n++;
index 5a1cc089acb7fd2e79c18876cd7951f6dfb6e747..86e5749226ef4cf65d6070bca1ab0d4be35bf2e0 100644 (file)
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context)
        ndev = hv_get_drvdata(device);
        buffer = get_per_channel_state(channel);
 
+       /* commit_rd_index() -> hv_signal_on_read() needs this. */
+       init_cached_read_index(channel);
+
        do {
                desc = get_next_pkt_raw(channel);
                if (desc != NULL) {
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context)
 
                        bufferlen = bytes_recvd;
                }
+
+               init_cached_read_index(channel);
+
        } while (1);
 
        if (bufferlen > NETVSC_PACKET_SIZE)
index e55809c5beb71a6c1a3a0a60420cb3c263f6a33b..6742070ca676f57694a9a6cb11364941deb520a0 100644 (file)
@@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = {
        .phy_id         = PHY_ID_KSZ8795,
        .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8795",
-       .features       = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+       .features       = PHY_BASIC_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
index d02ca1491d16cede66389540f8cb92dda5749ff3..8d3e53fac1dabc01ed875b6f8c2863bb908f770c 100644 (file)
@@ -91,7 +91,7 @@
 
 #define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
-       IWL8000_FW_PRE "-" __stringify(api) ".ucode"
+       IWL8000_FW_PRE __stringify(api) ".ucode"
 
 #define IWL8265_FW_PRE "iwlwifi-8265-"
 #define IWL8265_MODULE_FIRMWARE(api) \
index 636c8b03e31892bd30e3a3d7a6b1e9b8a8eb02ea..09e9e2e3ed040202f0cb40c1e326584b0fa7465a 100644 (file)
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
                .frame_limit = IWL_FRAME_LIMIT,
        };
 
-       /* Make sure reserved queue is still marked as such (or allocated) */
-       mvm->queue_info[mvm_sta->reserved_queue].status =
-               IWL_MVM_QUEUE_RESERVED;
+       /* Make sure reserved queue is still marked as such (if allocated) */
+       if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+               mvm->queue_info[mvm_sta->reserved_queue].status =
+                       IWL_MVM_QUEUE_RESERVED;
 
        for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
                struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
index 63a051be832ed44b30978b4d031464b15765df20..bec7d9c46087d3c8fed48d5858f4f116797eceed 100644 (file)
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
                return;
 
        IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
-       thermal_zone_device_unregister(mvm->tz_device.tzone);
-       mvm->tz_device.tzone = NULL;
+       if (mvm->tz_device.tzone) {
+               thermal_zone_device_unregister(mvm->tz_device.tzone);
+               mvm->tz_device.tzone = NULL;
+       }
 }
 
 static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
                return;
 
        IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
-       thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
-       mvm->cooling_dev.cdev = NULL;
+       if (mvm->cooling_dev.cdev) {
+               thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+               mvm->cooling_dev.cdev = NULL;
+       }
 }
 #endif /* CONFIG_THERMAL */
 
index 17ac1dce32867051298a5489841de8b636835a68..3dd8bcbb3011babd4ad4271d6f6f64733bd9b3f1 100644 (file)
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
        link = kzalloc(sizeof(*link), GFP_KERNEL);
        if (!link)
                return NULL;
+
        INIT_LIST_HEAD(&link->sibling);
        INIT_LIST_HEAD(&link->children);
        INIT_LIST_HEAD(&link->link);
        link->pdev = pdev;
-       if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
+
+       /*
+        * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
+        * hierarchies.
+        */
+       if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+           pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+               link->root = link;
+       } else {
                struct pcie_link_state *parent;
+
                parent = pdev->bus->parent->self->link_state;
                if (!parent) {
                        kfree(link);
                        return NULL;
                }
+
                link->parent = parent;
+               link->root = link->parent->root;
                list_add(&link->link, &parent->children);
        }
-       /* Setup a pointer to the root port link */
-       if (!link->parent)
-               link->root = link;
-       else
-               link->root = link->parent->root;
 
        list_add(&link->sibling, &link_list);
        pdev->link_state = link;
index 09172043d5890735127b0f79275a98704cdbd2b6..c617ec49e9edeeebb1f33b78fa6b7214fb23207c 100644 (file)
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
        BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
                        BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
                        BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
-                       BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
        BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
                        BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
                        BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
index c123488266ce74883ed8ba972b43103d136bb66e..d94aef17348b4b88f3952670886b863b952cfc60 100644 (file)
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
                                  int reg)
 {
        struct byt_community *comm = byt_get_community(vg, offset);
-       u32 reg_offset = 0;
+       u32 reg_offset;
 
        if (!comm)
                return NULL;
 
        offset -= comm->pin_base;
-       if (reg == BYT_INT_STAT_REG)
+       switch (reg) {
+       case BYT_INT_STAT_REG:
                reg_offset = (offset / 32) * 4;
-       else
+               break;
+       case BYT_DEBOUNCE_REG:
+               reg_offset = 0;
+               break;
+       default:
                reg_offset = comm->pad_map[offset] * 16;
+               break;
+       }
 
        return comm->reg_base + reg_offset + reg;
 }
@@ -1243,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                        debounce = readl(db_reg);
                        debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
+                       if (arg)
+                               conf |= BYT_DEBOUNCE_EN;
+                       else
+                               conf &= ~BYT_DEBOUNCE_EN;
+
                        switch (arg) {
-                       case 0:
-                               conf &= BYT_DEBOUNCE_EN;
-                               break;
                        case 375:
                                debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
@@ -1269,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
                                debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
-                               ret = -EINVAL;
+                               if (arg)
+                                       ret = -EINVAL;
+                               break;
                        }
 
                        if (!ret)
@@ -1612,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
                        continue;
                }
 
+               raw_spin_lock(&vg->lock);
                pending = readl(reg);
+               raw_spin_unlock(&vg->lock);
                for_each_set_bit(pin, &pending, 32) {
                        virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
                        generic_handle_irq(virq);
index b21896126f760a5cbae044ab13cba527e9dbfeff..4d4ef42a39b5faaa1969d20a5aeeedffef90074c 100644 (file)
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
        unsigned int i;
        int ret;
 
+       if (!mrfld_buf_available(mp, pin))
+               return -ENOTSUPP;
+
        for (i = 0; i < nconfigs; i++) {
                switch (pinconf_to_config_param(configs[i])) {
                case PIN_CONFIG_BIAS_DISABLE:
index 0eb51e33cb1be5412ab11d10e7cdb474a2faa061..207a8de4e1ed851cf542aa4af008e8f74102cad3 100644 (file)
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
                        val = arg / 10 - 1;
                        break;
                case PIN_CONFIG_BIAS_DISABLE:
-                       val = 0;
-                       break;
+                       continue;
                case PIN_CONFIG_BIAS_PULL_UP:
                        if (arg == 0)
                                return -EINVAL;
index e6a512ebeae2762812212ac5b9264f92bb8252be..a3ade9e4ef478ed90311365a4e86db0cbbc394d5 100644 (file)
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
                        64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
                        BIT(3)),
        AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
-                AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+                AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
                 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
        AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
index a43b0e8a438d305a959d3d745c7c65bf796ca9f9..988a7472c2ab568c3d1c03d1092c0713073a6d28 100644 (file)
@@ -30,9 +30,6 @@
 #include <linux/of_gpio.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/machine.h>
-#include <linux/acpi.h>
-#include <linux/property.h>
-#include <linux/gpio/consumer.h>
 
 struct fixed_voltage_data {
        struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
        return config;
 }
 
-/**
- * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
- * @dev: device requesting for fixed_voltage_config
- * @desc: regulator description
- *
- * Populates fixed_voltage_config structure by extracting data through ACPI
- * interface, returns a pointer to the populated structure of NULL if memory
- * alloc fails.
- */
-static struct fixed_voltage_config *
-acpi_get_fixed_voltage_config(struct device *dev,
-                             const struct regulator_desc *desc)
-{
-       struct fixed_voltage_config *config;
-       const char *supply_name;
-       struct gpio_desc *gpiod;
-       int ret;
-
-       config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
-       if (!config)
-               return ERR_PTR(-ENOMEM);
-
-       ret = device_property_read_string(dev, "supply-name", &supply_name);
-       if (!ret)
-               config->supply_name = supply_name;
-
-       gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
-       if (IS_ERR(gpiod))
-               return ERR_PTR(-ENODEV);
-
-       config->gpio = desc_to_gpio(gpiod);
-       config->enable_high = device_property_read_bool(dev,
-                                                       "enable-active-high");
-       gpiod_put(gpiod);
-
-       return config;
-}
-
 static struct regulator_ops fixed_voltage_ops = {
 };
 
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
                                                     &drvdata->desc);
                if (IS_ERR(config))
                        return PTR_ERR(config);
-       } else if (ACPI_HANDLE(&pdev->dev)) {
-               config = acpi_get_fixed_voltage_config(&pdev->dev,
-                                                      &drvdata->desc);
-               if (IS_ERR(config))
-                       return PTR_ERR(config);
        } else {
                config = dev_get_platdata(&pdev->dev);
        }
index 4864b9d742c0f7915cc792aaacd692c2a7f305b0..716191046a70782b0007033dda6e1402c0d68ea3 100644 (file)
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
                        vsel = 62;
                else if ((min_uV > 1800000) && (min_uV <= 1900000))
                        vsel = 61;
-               else if ((min_uV > 1350000) && (min_uV <= 1800000))
+               else if ((min_uV > 1500000) && (min_uV <= 1800000))
                        vsel = 60;
                else if ((min_uV > 1350000) && (min_uV <= 1500000))
                        vsel = 59;
index c93c5a8fba32925584dbc28c60610786328d09ff..5dc673dc948785a79da8d070954323b9357385c1 100644 (file)
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
          will be called rtc-mpc5121.
 
 config RTC_DRV_JZ4740
-       bool "Ingenic JZ4740 SoC"
+       tristate "Ingenic JZ4740 SoC"
        depends on MACH_INGENIC || COMPILE_TEST
        help
          If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
          controllers.
 
+         This driver can also be buillt as a module. If so, the module
+         will be called rtc-jz4740.
+
 config RTC_DRV_LPC24XX
        tristate "NXP RTC for LPC178x/18xx/408x/43xx"
        depends on ARCH_LPC18XX || COMPILE_TEST
index 72918c1ba0928d4fc78d921db621c489fafb9701..64989afffa3daada4b062321c527f18bca142bbb 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
                             JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
 
        jz4740_rtc_poweroff(dev_for_power_off);
-       machine_halt();
+       kernel_halt();
 }
 
 static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
        { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
        {},
 };
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
 
 static int jz4740_rtc_probe(struct platform_device *pdev)
 {
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
        { "jz4780-rtc", ID_JZ4780 },
        {}
 };
+MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
 
 static struct platform_driver jz4740_rtc_driver = {
        .probe   = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
        .id_table = jz4740_rtc_ids,
 };
 
-builtin_platform_driver(jz4740_rtc_driver);
+module_platform_driver(jz4740_rtc_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
+MODULE_ALIAS("platform:jz4740-rtc");
index ec91bd07f00a307337283cbea6f72c6e370a0170..c680d76413116c00b80193f5e7db9de2e13441b1 100644 (file)
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
 {
        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
        struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+       unsigned long flags;
        int req_size;
+       int ret;
 
        BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
 
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
                req_size = sizeof(cmd->req.cmd);
        }
 
-       if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+       ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+       if (ret == -EIO) {
+               cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+               spin_lock_irqsave(&req_vq->vq_lock, flags);
+               virtscsi_complete_cmd(vscsi, cmd);
+               spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+       } else if (ret != 0) {
                return SCSI_MLQUEUE_HOST_BUSY;
+       }
        return 0;
 }
 
index 113f3d6c4b3a6cdeda3fce3abe729fbc927fd9d9..27f75b17679b8f19a5f6100a769dd7b1b0f77455 100644 (file)
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
 
 int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
 {
+       if (!arche_platform_change_state_cb)
+               return 0;
+
        return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
                                              pdata);
 }
 
 void gb_timesync_platform_unlock_bus(void)
 {
+       if (!arche_platform_change_state_cb)
+               return;
+
        arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
 }
 
index d2e50a27140c9254be2a80b6c6ae69bc71a93b4a..24f9f98968a5d860f83920287a5b7deb4c98bed6 100644 (file)
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* WORLDE easy key (easykey.25) MIDI controller  */
+       { USB_DEVICE(0x0218, 0x0401), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* HP 5300/5370C scanner */
        { USB_DEVICE(0x03f0, 0x0701), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 5490fc51638ede3c565eff9036ff3beaf884d3a9..fd80c1b9c8234cf4de8371c7ca4e528bc4712fc3 100644 (file)
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
                        return -EINVAL;
                length = le32_to_cpu(d->dwSize);
+               if (len < length)
+                       return -EINVAL;
                type = le32_to_cpu(d->dwPropertyDataType);
                if (type < USB_EXT_PROP_UNICODE ||
                    type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                        return -EINVAL;
                }
                pnl = le16_to_cpu(d->wPropertyNameLength);
+               if (length < 14 + pnl) {
+                       pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
+                                 length, pnl, type);
+                       return -EINVAL;
+               }
                pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
                if (length != 14 + pnl + pdl) {
                        pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
                }
        }
        if (flags & (1 << i)) {
+               if (len < 4) {
+                       goto error;
+               }
                os_descs_count = get_unaligned_le32(data);
                data += 4;
                len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
 
        ENTER();
 
-       if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+       if (unlikely(len < 16 ||
+                    get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
                     get_unaligned_le32(data + 4) != len))
                goto error;
        str_count  = get_unaligned_le32(data + 8);
index fca288bbc8009580ba96198ce1a2a49330074d20..772f1582124255d749ab470a42394498af8f9741 100644 (file)
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                                                | MUSB_PORT_STAT_RESUME;
                                musb->rh_timer = jiffies
                                        + msecs_to_jiffies(USB_RESUME_TIMEOUT);
-                               musb->need_finish_resume = 1;
-
                                musb->xceiv->otg->state = OTG_STATE_A_HOST;
                                musb->is_active = 1;
                                musb_host_resume_root_hub(musb);
+                               schedule_delayed_work(&musb->finish_resume_work,
+                                       msecs_to_jiffies(USB_RESUME_TIMEOUT));
                                break;
                        case OTG_STATE_B_WAIT_ACON:
                                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
 static void musb_irq_work(struct work_struct *data)
 {
        struct musb *musb = container_of(data, struct musb, irq_work.work);
+       int error;
+
+       error = pm_runtime_get_sync(musb->controller);
+       if (error < 0) {
+               dev_err(musb->controller, "Could not enable: %i\n", error);
+
+               return;
+       }
 
        musb_pm_runtime_check_session(musb);
 
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
                musb->xceiv_old_state = musb->xceiv->otg->state;
                sysfs_notify(&musb->controller->kobj, NULL, "mode");
        }
+
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 }
 
 static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
        mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
        if ((devctl & mask) != (musb->context.devctl & mask))
                musb->port1_status = 0;
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                                     msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
 
        /*
         * The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
 
        musb_restore_context(musb);
 
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                               msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
-
        spin_lock_irqsave(&musb->lock, flags);
        error = musb_run_resume_work(musb);
        if (error)
index ade902ea1221e18543de05a5188c715d86af7398..ce5a18c98c6d1134231a29fa9229cf6804a0ae6e 100644 (file)
@@ -410,7 +410,6 @@ struct musb {
 
        /* is_suspended means USB B_PERIPHERAL suspend */
        unsigned                is_suspended:1;
-       unsigned                need_finish_resume :1;
 
        /* may_wakeup means remote wakeup is enabled */
        unsigned                may_wakeup:1;
index 7ce31a4c7e7fd3d186e8e05b20b9a3ca52700b6c..42cc72e54c051b2115c358bcee8bfc534258d206 100644 (file)
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 46fca6b7584686744a9e79aae0bd78db08192813..1db4b61bdf7bd710d7be6e3ff81b97102d76fbe9 100644 (file)
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+       { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
        { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
index e3b7af8adfb73ccefa92d4ba3c2c927a044dfa43..09d9be88209e1ce6b1f53dc052a53c5e4c491336 100644 (file)
@@ -27,6 +27,7 @@
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
 #define ATEN_PRODUCT_ID                0x2008
+#define ATEN_PRODUCT_ID2       0x2118
 
 #define IODATA_VENDOR_ID       0x04bb
 #define IODATA_PRODUCT_ID      0x0a03
index 1bc6089b90083a05e0ef3e4ff71c0ef752e3831a..696458db7e3c45e661a9825d05df0fe25dc0a832 100644 (file)
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1410, 0xa021)},   /* Novatel Gobi 3000 Composite */
        {USB_DEVICE(0x413c, 0x8193)},   /* Dell Gobi 3000 QDL */
        {USB_DEVICE(0x413c, 0x8194)},   /* Dell Gobi 3000 Composite */
+       {USB_DEVICE(0x413c, 0x81a6)},   /* Dell DW5570 QDL (MC8805) */
        {USB_DEVICE(0x1199, 0x68a4)},   /* Sierra Wireless QDL */
        {USB_DEVICE(0x1199, 0x68a5)},   /* Sierra Wireless Modem */
        {USB_DEVICE(0x1199, 0x68a8)},   /* Sierra Wireless QDL */
index 128d10282d1632693dc40819ff8b39485ba1e1de..7690e5bf3cf134fc56a0a17d07e57d397d293346 100644 (file)
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
                mutex_lock(&container->lock);
 
                ret = tce_iommu_create_default_window(container);
-               if (ret)
-                       return ret;
-
-               ret = tce_iommu_create_window(container, create.page_shift,
-                               create.window_size, create.levels,
-                               &create.start_addr);
+               if (!ret)
+                       ret = tce_iommu_create_window(container,
+                                       create.page_shift,
+                                       create.window_size, create.levels,
+                                       &create.start_addr);
 
                mutex_unlock(&container->lock);
 
index d6432603880c1343ea2451eba6df1973e6d61822..8f99fe08de02e7b48725a99d682055c03056b82a 100644 (file)
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 
 static void vhost_init_is_le(struct vhost_virtqueue *vq)
 {
-       if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
-               vq->is_le = true;
+       vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+               || virtio_legacy_is_little_endian();
 }
 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 
 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 {
-       vq->is_le = virtio_legacy_is_little_endian();
+       vhost_init_is_le(vq);
 }
 
 struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
        int r;
        bool is_le = vq->is_le;
 
-       if (!vq->private_data) {
-               vhost_reset_is_le(vq);
+       if (!vq->private_data)
                return 0;
-       }
 
        vhost_init_is_le(vq);
 
index 7e38ed79c3fc0f2c095164d480f75b31630a6694..409aeaa49246a0edd7c6da07ca38b58c3f876109 100644 (file)
@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
        if (xen_domain())
                return true;
 
-       /*
-        * On ARM-based machines, the DMA ops will do the right thing,
-        * so always use them with legacy devices.
-        */
-       if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
-               return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
-
        return false;
 }
 
index 8f6a2a5863b9d9275bfb6afb00fc16b867101275..a27fc8791551cc86ca14d2d65e7870990a393f1e 100644 (file)
@@ -285,6 +285,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file)
                        rc = -ENOMEM;
                        goto error_exit;
                }
+               spin_lock_init(&cifsFile->file_info_lock);
                file->private_data = cifsFile;
                cifsFile->tlink = cifs_get_tlink(tlink);
                tcon = tlink_tcon(tlink);
index 3af2da5e64ce77fa8ae4b3f294c82882d350120f..c45598b912e14c981fdeb002b01c1535218c0ff2 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1031,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                struct blk_dax_ctl dax = { 0 };
                ssize_t map_len;
 
+               if (fatal_signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
                dax.sector = dax_iomap_sector(iomap, pos);
                dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
                map_len = dax_map_atomic(iomap->bdev, &dax);
index 4304072161aa08c14d24291bf24eb2481c567874..40d61077bead88e39abff93bcdb185941462bfcb 100644 (file)
@@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
                hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
                        if (invalidate)
                                set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+                       clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
                        fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
                }
        } else {
@@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
                wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
                                 TASK_UNINTERRUPTIBLE);
 
+       /* Make sure any pending writes are cancelled. */
+       if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+               fscache_invalidate_writes(cookie);
+
        /* Reset the cookie state if it wasn't relinquished */
        if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
                atomic_inc(&cookie->n_active);
index 9b28649df3a1fdc6f0f0c23b58b03db94dd69eb2..a8aa00be44442f59d6cf08516ab7403d0c02ab9e 100644 (file)
@@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
        cookie->flags           = 1 << FSCACHE_COOKIE_ENABLED;
 
        spin_lock_init(&cookie->lock);
+       spin_lock_init(&cookie->stores_lock);
        INIT_HLIST_HEAD(&cookie->backing_objects);
 
        /* check the netfs type is not already present */
index 9e792e30f4db47b38c6db644487c440a2e12febb..7a182c87f37805f1a5fa6719f5cc06cf3dd38552 100644 (file)
@@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
 static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
 static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
 static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
 
 #define __STATE_NAME(n) fscache_osm_##n
 #define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE,     "LCFL", fscache_lookup_failure);
 static WORK_STATE(KILL_OBJECT,         "KILL", fscache_kill_object);
 static WORK_STATE(KILL_DEPENDENTS,     "KDEP", fscache_kill_dependents);
 static WORK_STATE(DROP_OBJECT,         "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD,         "DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD,         "DEAD", fscache_object_dead);
 
 static WAIT_STATE(WAIT_FOR_INIT,       "?INI",
                  TRANSIT_TO(INIT_OBJECT,       1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@ execute_work_state:
        event = -1;
        if (new_state == NO_TRANSIT) {
                _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+               if (unlikely(state == STATE(OBJECT_DEAD))) {
+                       _leave(" [dead]");
+                       return;
+               }
                fscache_enqueue_object(object);
                event_mask = object->oob_event_mask;
                goto unmask_events;
@@ -239,7 +244,7 @@ execute_work_state:
        object->state = state = new_state;
 
        if (state->work) {
-               if (unlikely(state->work == ((void *)2UL))) {
+               if (unlikely(state == STATE(OBJECT_DEAD))) {
                        _leave(" [dead]");
                        return;
                }
@@ -645,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob
        fscache_mark_object_dead(object);
        object->oob_event_mask = 0;
 
+       if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+               /* Reject any new read/write ops and abort any that are pending. */
+               clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+               fscache_cancel_all_ops(object);
+       }
+
        if (list_empty(&object->dependents) &&
            object->n_ops == 0 &&
            object->n_children == 0)
@@ -1077,3 +1088,20 @@ void fscache_object_mark_killed(struct fscache_object *object,
        }
 }
 EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead.  We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+                                                      int event)
+{
+       if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+                             &object->flags))
+               return NO_TRANSIT;
+
+       WARN(true, "FS-Cache object redispatched after death");
+       return NO_TRANSIT;
+}
index 354a123f170e534a016f74ca7006458e3b823ef8..a51cb4c07d4d8cd3a09715361c84126cecae2ca2 100644 (file)
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 
        BUG_ON(pos + len > iomap->offset + iomap->length);
 
+       if (fatal_signal_pending(current))
+               return -EINTR;
+
        page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
        if (!page)
                return -ENOMEM;
index 596205d939a1f43f1faa292d31680377045a8589..1fc07a9c70e9c6028342e8c97d183dfe914a343b 100644 (file)
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
        struct nfs4_layout_stateid *ls;
        struct nfs4_stid *stp;
 
-       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
+       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
+                                       nfsd4_free_layout_stateid);
        if (!stp)
                return NULL;
-       stp->sc_free = nfsd4_free_layout_stateid;
+
        get_nfs4_file(fp);
        stp->sc_file = fp;
 
index 4b4beaaa4eaac01233f874c7dfdb8d1a6d7cd3d6..a0dee8ae9f97f16a18e40ba19f8e84a45ad1a02b 100644 (file)
@@ -633,8 +633,8 @@ out:
        return co;
 }
 
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-                                        struct kmem_cache *slab)
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *))
 {
        struct nfs4_stid *stid;
        int new_id;
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
        idr_preload_end();
        if (new_id < 0)
                goto out_free;
+
+       stid->sc_free = sc_free;
        stid->sc_client = cl;
        stid->sc_stateid.si_opaque.so_id = new_id;
        stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
@@ -675,15 +677,12 @@ out_free:
 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
 {
        struct nfs4_stid *stid;
-       struct nfs4_ol_stateid *stp;
 
-       stid = nfs4_alloc_stid(clp, stateid_slab);
+       stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
        if (!stid)
                return NULL;
 
-       stp = openlockstateid(stid);
-       stp->st_stid.sc_free = nfs4_free_ol_stateid;
-       return stp;
+       return openlockstateid(stid);
 }
 
 static void nfs4_free_deleg(struct nfs4_stid *stid)
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
                goto out_dec;
        if (delegation_blocked(&current_fh->fh_handle))
                goto out_dec;
-       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
+       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
        if (dp == NULL)
                goto out_dec;
 
-       dp->dl_stid.sc_free = nfs4_free_deleg;
        /*
         * delegation seqid's are never incremented.  The 4.1 special
         * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
        get_nfs4_file(fp);
        stp->st_stid.sc_file = fp;
-       stp->st_stid.sc_free = nfs4_free_lock_stateid;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
        lst = find_lock_stateid(lo, fi);
        if (lst == NULL) {
                spin_unlock(&clp->cl_lock);
-               ns = nfs4_alloc_stid(clp, stateid_slab);
+               ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
                if (ns == NULL)
                        return NULL;
 
index c9399366f9dfc73b343d079fbad2dc2127927aae..4516e8b7d776305d94fb89f86256ee3fc54dec27 100644 (file)
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
                     stateid_t *stateid, unsigned char typemask,
                     struct nfs4_stid **s, struct nfsd_net *nn);
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-               struct kmem_cache *slab);
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *));
 void nfs4_unhash_stid(struct nfs4_stid *s);
 void nfs4_put_stid(struct nfs4_stid *s);
 void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
index 26c6fdb4bf67cf1e3e3a843e8e816d7a76eae265..ca13236dbb1f33afb1c6664c030d63a5bfcfacc5 100644 (file)
@@ -332,37 +332,6 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
        }
 }
 
-static __be32
-nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
-               struct iattr *iap)
-{
-       struct inode *inode = d_inode(fhp->fh_dentry);
-       int host_err;
-
-       if (iap->ia_size < inode->i_size) {
-               __be32 err;
-
-               err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
-                               NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
-               if (err)
-                       return err;
-       }
-
-       host_err = get_write_access(inode);
-       if (host_err)
-               goto out_nfserrno;
-
-       host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
-       if (host_err)
-               goto out_put_write_access;
-       return 0;
-
-out_put_write_access:
-       put_write_access(inode);
-out_nfserrno:
-       return nfserrno(host_err);
-}
-
 /*
  * Set various file attributes.  After this call fhp needs an fh_put.
  */
@@ -377,7 +346,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
        __be32          err;
        int             host_err;
        bool            get_write_count;
-       int             size_change = 0;
 
        if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
                accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
@@ -390,11 +358,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
        /* Get inode */
        err = fh_verify(rqstp, fhp, ftype, accmode);
        if (err)
-               goto out;
+               return err;
        if (get_write_count) {
                host_err = fh_want_write(fhp);
                if (host_err)
-                       return nfserrno(host_err);
+                       goto out_host_err;
        }
 
        dentry = fhp->fh_dentry;
@@ -405,50 +373,59 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
                iap->ia_valid &= ~ATTR_MODE;
 
        if (!iap->ia_valid)
-               goto out;
+               return 0;
 
        nfsd_sanitize_attrs(inode, iap);
 
+       if (check_guard && guardtime != inode->i_ctime.tv_sec)
+               return nfserr_notsync;
+
        /*
         * The size case is special, it changes the file in addition to the
-        * attributes.
+        * attributes, and file systems don't expect it to be mixed with
+        * "random" attribute changes.  We thus split out the size change
+        * into a separate call for vfs_truncate, and do the rest as a
+        * a separate setattr call.
         */
        if (iap->ia_valid & ATTR_SIZE) {
-               err = nfsd_get_write_access(rqstp, fhp, iap);
-               if (err)
-                       goto out;
-               size_change = 1;
+               struct path path = {
+                       .mnt    = fhp->fh_export->ex_path.mnt,
+                       .dentry = dentry,
+               };
+               bool implicit_mtime = false;
 
                /*
-                * RFC5661, Section 18.30.4:
-                *   Changing the size of a file with SETATTR indirectly
-                *   changes the time_modify and change attributes.
-                *
-                * (and similar for the older RFCs)
+                * vfs_truncate implicity updates the mtime IFF the file size
+                * actually changes.  Avoid the additional seattr call below if
+                * the only other attribute that the client sends is the mtime.
                 */
-               if (iap->ia_size != i_size_read(inode))
-                       iap->ia_valid |= ATTR_MTIME;
-       }
+               if (iap->ia_size != i_size_read(inode) &&
+                   ((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0))
+                       implicit_mtime = true;
 
-       iap->ia_valid |= ATTR_CTIME;
+               host_err = vfs_truncate(&path, iap->ia_size);
+               if (host_err)
+                       goto out_host_err;
 
-       if (check_guard && guardtime != inode->i_ctime.tv_sec) {
-               err = nfserr_notsync;
-               goto out_put_write_access;
+               iap->ia_valid &= ~ATTR_SIZE;
+               if (implicit_mtime)
+                       iap->ia_valid &= ~ATTR_MTIME;
+               if (!iap->ia_valid)
+                       goto done;
        }
 
+       iap->ia_valid |= ATTR_CTIME;
+
        fh_lock(fhp);
        host_err = notify_change(dentry, iap, NULL);
        fh_unlock(fhp);
-       err = nfserrno(host_err);
+       if (host_err)
+               goto out_host_err;
 
-out_put_write_access:
-       if (size_change)
-               put_write_access(inode);
-       if (!err)
-               err = nfserrno(commit_metadata(fhp));
-out:
-       return err;
+done:
+       host_err = commit_metadata(fhp);
+out_host_err:
+       return nfserrno(host_err);
 }
 
 #if defined(CONFIG_NFSD_V4)
index 63554e9f6e0c68595943e27d2734ad4fa8271007..719db1968d8177a91028fd5e8bd6068f5d71c491 100644 (file)
@@ -9,18 +9,15 @@
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 8
 #endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 8
-#endif
 #else
 #define __put .long
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 4
 #endif
+#endif
 #ifndef KCRC_ALIGN
 #define KCRC_ALIGN 4
 #endif
-#endif
 
 #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
 #define KSYM(name) _##name
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name):
        .section ___kcrctab\sec+\name,"a"
        .balign KCRC_ALIGN
 KSYM(__kcrctab_\name):
-       __put KSYM(__crc_\name)
+#if defined(CONFIG_MODULE_REL_CRCS)
+       .long KSYM(__crc_\name) - .
+#else
+       .long KSYM(__crc_\name)
+#endif
        .weak KSYM(__crc_\name)
        .previous
 #endif
index 192016e2b5183c7a22fd13fd21372de5862f44d8..9c4ee144b5f6b799baf92a9722cddcaee3eb2b4f 100644 (file)
@@ -517,6 +517,7 @@ struct drm_device {
        struct drm_minor *control;              /**< Control node */
        struct drm_minor *primary;              /**< Primary node */
        struct drm_minor *render;               /**< Render node */
+       bool registered;
 
        /* currently active master for this device. Protected by master_mutex */
        struct drm_master *master;
index a9b95246e26efcf3d44cd5afc85f7031f0fa77fe..045a97cbeba24f44eb1b1d5582b4145801fcafdd 100644 (file)
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
         * core drm connector interfaces. Everything added from this callback
         * should be unregistered in the early_unregister callback.
         *
+        * This is called while holding drm_connector->mutex.
+        *
         * Returns:
         *
         * 0 on success, or a negative error code on failure.
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
         * late_register(). It is called from drm_connector_unregister(),
         * early in the driver unload sequence to disable userspace access
         * before data structures are torndown.
+        *
+        * This is called while holding drm_connector->mutex.
         */
        void (*early_unregister)(struct drm_connector *connector);
 
@@ -559,7 +563,6 @@ struct drm_cmdline_mode {
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
  * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
  * @modes: modes available on this connector (from fill_modes() + user)
  * @status: one of the drm_connector_status enums (connected, not, or unknown)
  * @probed_modes: list of modes derived directly from the display
@@ -607,6 +610,13 @@ struct drm_connector {
 
        char *name;
 
+       /**
+        * @mutex: Lock for general connector state, but currently only protects
+        * @registered. Most of the connector state is still protected by the
+        * mutex in &drm_mode_config.
+        */
+       struct mutex mutex;
+
        /**
         * @index: Compacted connector index, which matches the position inside
         * the mode_config.list for drivers not supporting hot-add/removing. Can
@@ -620,6 +630,10 @@ struct drm_connector {
        bool interlace_allowed;
        bool doublescan_allowed;
        bool stereo_allowed;
+       /**
+        * @registered: Is this connector exposed (registered) with userspace?
+        * Protected by @mutex.
+        */
        bool registered;
        struct list_head modes; /* list of modes on this connector */
 
index a0875001b13c84ad70a9b2909654e9ffb6824c58..df08a41d5be5f26cfa4cdc74935f5eae7fa51385 100644 (file)
@@ -45,10 +45,9 @@ struct can_proto {
 extern int  can_proto_register(const struct can_proto *cp);
 extern void can_proto_unregister(const struct can_proto *cp);
 
-extern int  can_rx_register(struct net_device *dev, canid_t can_id,
-                           canid_t mask,
-                           void (*func)(struct sk_buff *, void *),
-                           void *data, char *ident);
+int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+                   void (*func)(struct sk_buff *, void *),
+                   void *data, char *ident, struct sock *sk);
 
 extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
                              canid_t mask,
index d936a0021839cca651e19ec43e71b8f21cb69cf0..921acaaa16017979df0722fb9803b204d77c0be0 100644 (file)
@@ -8,9 +8,7 @@ enum cpuhp_state {
        CPUHP_CREATE_THREADS,
        CPUHP_PERF_PREPARE,
        CPUHP_PERF_X86_PREPARE,
-       CPUHP_PERF_X86_UNCORE_PREP,
        CPUHP_PERF_X86_AMD_UNCORE_PREP,
-       CPUHP_PERF_X86_RAPL_PREP,
        CPUHP_PERF_BFIN,
        CPUHP_PERF_POWER,
        CPUHP_PERF_SUPERH,
@@ -86,7 +84,6 @@ enum cpuhp_state {
        CPUHP_AP_IRQ_ARMADA_XP_STARTING,
        CPUHP_AP_IRQ_BCM2836_STARTING,
        CPUHP_AP_ARM_MVEBU_COHERENCY,
-       CPUHP_AP_PERF_X86_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_STARTING,
        CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
index 2a0f61fbc7310e61f5927c31250e208d217c3e26..1a1dfdb2a5c6d8806d11e2cd58722e304f293054 100644 (file)
@@ -43,12 +43,19 @@ extern struct module __this_module;
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
+#if defined(CONFIG_MODULE_REL_CRCS)
 #define __CRC_SYMBOL(sym, sec)                                         \
-       extern __visible void *__crc_##sym __attribute__((weak));       \
-       static const unsigned long __kcrctab_##sym                      \
-       __used                                                          \
-       __attribute__((section("___kcrctab" sec "+" #sym), used))       \
-       = (unsigned long) &__crc_##sym;
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n"     \
+           "   .previous                                       \n");
+#else
+#define __CRC_SYMBOL(sym, sec)                                         \
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .previous                                       \n");
+#endif
 #else
 #define __CRC_SYMBOL(sym, sec)
 #endif
index 13ba552e6c094e82ee8b952b26d9f0bf5eeebc39..4c467ef50159db533ecb567a86eeaf6e1e81e632 100644 (file)
@@ -360,6 +360,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_IS_AVAILABLE    5       /* T if object has become active */
 #define FSCACHE_OBJECT_RETIRED         6       /* T if object was retired on relinquishment */
 #define FSCACHE_OBJECT_KILLED_BY_CACHE 7       /* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD  8       /* T if object has been dispatched after death */
 
        struct list_head        cache_link;     /* link in cache->object_list */
        struct hlist_node       cookie_link;    /* link in cookie->backing_objects */
index 42fe43fb0c80605f9553c746f04310f43914f683..183efde54269e18c5d4d1eda7dc448717fe85800 100644 (file)
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
        u32 ring_data_startoffset;
        u32 priv_write_index;
        u32 priv_read_index;
+       u32 cached_read_index;
 };
 
 /*
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
        return write;
 }
 
+static inline u32 hv_get_cached_bytes_to_write(
+       const struct hv_ring_buffer_info *rbi)
+{
+       u32 read_loc, write_loc, dsize, write;
+
+       dsize = rbi->ring_datasize;
+       read_loc = rbi->cached_read_index;
+       write_loc = rbi->ring_buffer->write_index;
+
+       write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+               read_loc - write_loc;
+       return write;
+}
 /*
  * VMBUS version is 32 bit entity broken up into
  * two 16 bit quantities: major_number. minor_number.
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
 
 static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 {
-       u32 cur_write_sz;
+       u32 cur_write_sz, cached_write_sz;
        u32 pending_sz;
        struct hv_ring_buffer_info *rbi = &channel->inbound;
 
@@ -1512,12 +1526,24 @@ static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 
        cur_write_sz = hv_get_bytes_to_write(rbi);
 
-       if (cur_write_sz >= pending_sz)
+       if (cur_write_sz < pending_sz)
+               return;
+
+       cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+       if (cached_write_sz < pending_sz)
                vmbus_setevent(channel);
 
        return;
 }
 
+static inline void
+init_cached_read_index(struct vmbus_channel *channel)
+{
+       struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+       rbi->cached_read_index = rbi->ring_buffer->read_index;
+}
+
 /*
  * An API to support in-place processing of incoming VMBUS packets.
  */
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
  * This call commits the read index and potentially signals the host.
  * Here is the pattern for using the "in-place" consumption APIs:
  *
+ * init_cached_read_index();
+ *
  * while (get_next_pkt_raw() {
  *     process the packet "in-place";
  *     put_pkt_raw();
index d49e26c6cdc7b5e48e41591f7c73e74d200441a8..c573a52ae440e83894709328820bacb79391d15c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/dma_remapping.h>
 #include <linux/mmu_notifier.h>
 #include <linux/list.h>
+#include <linux/iommu.h>
 #include <asm/cacheflush.h>
 #include <asm/iommu.h>
 
@@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
-#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
-#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
+#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
+#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
 #define DMA_TLB_DID(id)        (((u64)((id) & 0xffff)) << 32)
@@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
 
 /* INVALID_DESC */
 #define DMA_CCMD_INVL_GRANU_OFFSET  61
-#define DMA_ID_TLB_GLOBAL_FLUSH        (((u64)1) << 3)
-#define DMA_ID_TLB_DSI_FLUSH   (((u64)2) << 3)
-#define DMA_ID_TLB_PSI_FLUSH   (((u64)3) << 3)
+#define DMA_ID_TLB_GLOBAL_FLUSH        (((u64)1) << 4)
+#define DMA_ID_TLB_DSI_FLUSH   (((u64)2) << 4)
+#define DMA_ID_TLB_PSI_FLUSH   (((u64)3) << 4)
 #define DMA_ID_TLB_READ_DRAIN  (((u64)1) << 7)
 #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
 #define DMA_ID_TLB_DID(id)     (((u64)((id & 0xffff) << 16)))
@@ -316,8 +317,8 @@ enum {
 #define QI_DEV_EIOTLB_SIZE     (((u64)1) << 11)
 #define QI_DEV_EIOTLB_GLOB(g)  ((u64)g)
 #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
-#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
-#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
+#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
+#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
 #define QI_DEV_EIOTLB_MAX_INVS 32
 
 #define QI_PGRP_IDX(idx)       (((u64)(idx)) << 55)
@@ -439,7 +440,7 @@ struct intel_iommu {
        struct irq_domain *ir_domain;
        struct irq_domain *ir_msi_domain;
 #endif
-       struct device   *iommu_dev; /* IOMMU-sysfs device */
+       struct iommu_device iommu;  /* IOMMU core code handle */
        int             node;
        u32             flags;      /* Software defined flags */
 };
index add30c3753328d618c363a6f29f2174cf28d9da0..6a6de187ddc0ff1e0e737f94261211b7ea4408e9 100644 (file)
@@ -219,6 +219,42 @@ struct iommu_ops {
        unsigned long pgsize_bitmap;
 };
 
+/**
+ * struct iommu_device - IOMMU core representation of one IOMMU hardware
+ *                      instance
+ * @list: Used by the iommu-core to keep a list of registered iommus
+ * @ops: iommu-ops for talking to this iommu
+ * @dev: struct device for sysfs handling
+ */
+struct iommu_device {
+       struct list_head list;
+       const struct iommu_ops *ops;
+       struct fwnode_handle *fwnode;
+       struct device dev;
+};
+
+int  iommu_device_register(struct iommu_device *iommu);
+void iommu_device_unregister(struct iommu_device *iommu);
+int  iommu_device_sysfs_add(struct iommu_device *iommu,
+                           struct device *parent,
+                           const struct attribute_group **groups,
+                           const char *fmt, ...) __printf(4, 5);
+void iommu_device_sysfs_remove(struct iommu_device *iommu);
+int  iommu_device_link(struct iommu_device   *iommu, struct device *link);
+void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
+
+static inline void iommu_device_set_ops(struct iommu_device *iommu,
+                                       const struct iommu_ops *ops)
+{
+       iommu->ops = ops;
+}
+
+static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+                                          struct fwnode_handle *fwnode)
+{
+       iommu->fwnode = fwnode;
+}
+
 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE          1 /* Device added */
 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE          2 /* Pre Device removed */
 #define IOMMU_GROUP_NOTIFY_BIND_DRIVER         3 /* Pre Driver bind */
@@ -286,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
                                 void *data);
 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
                                 void *data);
-struct device *iommu_device_create(struct device *parent, void *drvdata,
-                                  const struct attribute_group **groups,
-                                  const char *fmt, ...) __printf(4, 5);
-void iommu_device_destroy(struct device *dev);
-int iommu_device_link(struct device *dev, struct device *link);
-void iommu_device_unlink(struct device *dev, struct device *link);
 
 /* Window handling function prototypes */
 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@@ -371,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
                      const struct iommu_ops *ops);
 void iommu_fwspec_free(struct device *dev);
 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
-void iommu_register_instance(struct fwnode_handle *fwnode,
-                            const struct iommu_ops *ops);
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
 
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};
 struct iommu_group {};
 struct iommu_fwspec {};
+struct iommu_device {};
 
 static inline bool iommu_present(struct bus_type *bus)
 {
@@ -571,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
        return -EINVAL;
 }
 
-static inline struct device *iommu_device_create(struct device *parent,
-                                       void *drvdata,
-                                       const struct attribute_group **groups,
-                                       const char *fmt, ...)
+static inline int  iommu_device_register(struct iommu_device *iommu)
+{
+       return -ENODEV;
+}
+
+static inline void iommu_device_set_ops(struct iommu_device *iommu,
+                                       const struct iommu_ops *ops)
+{
+}
+
+static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
+                                          struct fwnode_handle *fwnode)
 {
-       return ERR_PTR(-ENODEV);
 }
 
-static inline void iommu_device_destroy(struct device *dev)
+static inline void iommu_device_unregister(struct iommu_device *iommu)
+{
+}
+
+static inline int  iommu_device_sysfs_add(struct iommu_device *iommu,
+                                         struct device *parent,
+                                         const struct attribute_group **groups,
+                                         const char *fmt, ...)
+{
+       return -ENODEV;
+}
+
+static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
 {
 }
 
@@ -609,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
        return -ENODEV;
 }
 
-static inline void iommu_register_instance(struct fwnode_handle *fwnode,
-                                          const struct iommu_ops *ops)
-{
-}
-
 static inline
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
 {
        return NULL;
 }
index e79875574b393f33ed183fdc8f44277a49c26ba8..39e3254e5769d7da1d0f74cdfdf90bd9b40cfc8a 100644 (file)
@@ -184,6 +184,7 @@ struct irq_data {
  *
  * IRQD_TRIGGER_MASK           - Mask for the trigger type bits
  * IRQD_SETAFFINITY_PENDING    - Affinity setting is pending
+ * IRQD_ACTIVATED              - Interrupt has already been activated
  * IRQD_NO_BALANCING           - Balancing disabled for this IRQ
  * IRQD_PER_CPU                        - Interrupt is per cpu
  * IRQD_AFFINITY_SET           - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
 enum {
        IRQD_TRIGGER_MASK               = 0xf,
        IRQD_SETAFFINITY_PENDING        = (1 <<  8),
+       IRQD_ACTIVATED                  = (1 <<  9),
        IRQD_NO_BALANCING               = (1 << 10),
        IRQD_PER_CPU                    = (1 << 11),
        IRQD_AFFINITY_SET               = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
        return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
 }
 
+static inline bool irqd_is_activated(struct irq_data *d)
+{
+       return __irqd_to_state(d) & IRQD_ACTIVATED;
+}
+
+static inline void irqd_set_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) |= IRQD_ACTIVATED;
+}
+
+static inline void irqd_clr_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) &= ~IRQD_ACTIVATED;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
index fd7ff3d91e6a920ff084beca09d10b5b9abba981..ef3d4f67118ce0f60789e6e749a4773754e01e87 100644 (file)
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
  *  ... and so on.
  */
 
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+       return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
 
+#define order_base_2(n)                                \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               ((n) == 0 || (n) == 1) ? 0 :    \
+               ilog2((n) - 1) + 1) :           \
+       __order_base_2(n)                       \
+)
 #endif /* _LINUX_LOG2_H */
index c1784c0b4f3585e0d20ca8253813c31d47f11c04..134a2f69c21abf7921181af0adff033bb459edc5 100644 (file)
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 /* VM interface that may be used by firmware interface */
 extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long, unsigned long);
+extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+       unsigned long *valid_start, unsigned long *valid_end);
 extern void __offline_isolated_pages(unsigned long, unsigned long);
 
 typedef void (*online_page_callback_t)(struct page *page);
index 7c84273d60b963d44c032761cac62194e82d1198..cc7cba219b207de5536f6f9e9353d1a20201e4af 100644 (file)
@@ -346,7 +346,7 @@ struct module {
 
        /* Exported symbols */
        const struct kernel_symbol *syms;
-       const unsigned long *crcs;
+       const s32 *crcs;
        unsigned int num_syms;
 
        /* Kernel parameters. */
@@ -359,18 +359,18 @@ struct module {
        /* GPL-only exported symbols. */
        unsigned int num_gpl_syms;
        const struct kernel_symbol *gpl_syms;
-       const unsigned long *gpl_crcs;
+       const s32 *gpl_crcs;
 
 #ifdef CONFIG_UNUSED_SYMBOLS
        /* unused exported symbols. */
        const struct kernel_symbol *unused_syms;
-       const unsigned long *unused_crcs;
+       const s32 *unused_crcs;
        unsigned int num_unused_syms;
 
        /* GPL-only, unused exported symbols. */
        unsigned int num_unused_gpl_syms;
        const struct kernel_symbol *unused_gpl_syms;
-       const unsigned long *unused_gpl_crcs;
+       const s32 *unused_gpl_crcs;
 #endif
 
 #ifdef CONFIG_MODULE_SIG
@@ -382,7 +382,7 @@ struct module {
 
        /* symbols that will be GPL-only in the near future. */
        const struct kernel_symbol *gpl_future_syms;
-       const unsigned long *gpl_future_crcs;
+       const s32 *gpl_future_crcs;
        unsigned int num_gpl_future_syms;
 
        /* Exception table */
@@ -523,7 +523,7 @@ struct module *find_module(const char *name);
 
 struct symsearch {
        const struct kernel_symbol *start, *stop;
-       const unsigned long *crcs;
+       const s32 *crcs;
        enum {
                NOT_GPL_ONLY,
                GPL_ONLY,
@@ -539,7 +539,7 @@ struct symsearch {
  */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn);
 
index 9bde9558b59672a866bd763039d326bde2af0f81..70ad0291d517b41cd1d4a3c2c7fc046f13e27980 100644 (file)
@@ -866,11 +866,15 @@ struct netdev_xdp {
  *     of useless work if you return NETDEV_TX_BUSY.
  *     Required; cannot be NULL.
  *
- * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
- *             netdev_features_t features);
- *     Adjusts the requested feature flags according to device-specific
- *     constraints, and returns the resulting flags. Must not modify
- *     the device state.
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ *                                        struct net_device *dev
+ *                                        netdev_features_t features);
+ *     Called by core transmit path to determine if device is capable of
+ *     performing offload operations on a given packet. This is to give
+ *     the device an opportunity to implement any restrictions that cannot
+ *     be otherwise expressed by feature flags. The check is called with
+ *     the set of features that the stack has calculated and it returns
+ *     those the driver believes to be appropriate.
  *
  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
  *                         void *accel_priv, select_queue_fallback_t fallback);
@@ -1028,6 +1032,12 @@ struct netdev_xdp {
  *     Called to release previously enslaved netdev.
  *
  *      Feature/offload setting functions.
+ * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
+ *             netdev_features_t features);
+ *     Adjusts the requested feature flags according to device-specific
+ *     constraints, and returns the resulting flags. Must not modify
+ *     the device state.
+ *
  * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
  *     Called to update device configuration to new features. Passed
  *     feature set might be less than what was returned by ndo_fix_features()).
@@ -1100,15 +1110,6 @@ struct netdev_xdp {
  *     Callback to use for xmit over the accelerated station. This
  *     is used in place of ndo_start_xmit on accelerated net
  *     devices.
- * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
- *                                        struct net_device *dev
- *                                        netdev_features_t features);
- *     Called by core transmit path to determine if device is capable of
- *     performing offload operations on a given packet. This is to give
- *     the device an opportunity to implement any restrictions that cannot
- *     be otherwise expressed by feature flags. The check is called with
- *     the set of features that the stack has calculated and it returns
- *     those the driver believes to be appropriate.
  * int (*ndo_set_tx_maxrate)(struct net_device *dev,
  *                          int queue_index, u32 maxrate);
  *     Called when a user wants to set a max-rate limitation of specific
index 6a7fc50510999eb330982c3d7b4452cb373063e2..13394ac83c66a70946d5a269ca8014c15477f942 100644 (file)
@@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
 
 #endif /* CONFIG_OF_IOMMU */
 
-static inline void of_iommu_set_ops(struct device_node *np,
-                                   const struct iommu_ops *ops)
-{
-       iommu_register_instance(&np->fwnode, ops);
-}
-
-static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
-{
-       return iommu_get_instance(&np->fwnode);
-}
-
 extern struct of_device_id __iommu_of_table;
 
 typedef int (*of_iommu_init_fn)(struct device_node *);
index 1c7eec09e5eba7ae8c0cc8e82172791f992bb361..3a481a49546ef1c85d8f88bf7668f8a2c8e8c0f1 100644 (file)
@@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 {
        unsigned long __percpu *percpu_count;
-       int ret;
+       bool ret;
 
        rcu_read_lock_sched();
 
@@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 {
        unsigned long __percpu *percpu_count;
-       int ret = false;
+       bool ret = false;
 
        rcu_read_lock_sched();
 
index 7afe991e900e25838c3e66f2ff185a5226f790ff..dbf0abba33b8da21be05abf6e719f69542da80fc 100644 (file)
@@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
 {
        u32 hash;
 
+       /* @flowlabel may include more than a flow label, eg, the traffic class.
+        * Here we want only the flow label value.
+        */
+       flowlabel &= IPV6_FLOWLABEL_MASK;
+
        if (flowlabel ||
            net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
            (!autolabel &&
index f0db7788f887b9947e0e1aa78d48a9980e80bddf..3dc91a46e8b8da0b243a12a168bbf205e5a87916 100644 (file)
@@ -1384,6 +1384,8 @@ enum ethtool_link_mode_bit_indices {
        ETHTOOL_LINK_MODE_10000baseLR_Full_BIT  = 44,
        ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
        ETHTOOL_LINK_MODE_10000baseER_Full_BIT  = 46,
+       ETHTOOL_LINK_MODE_2500baseT_Full_BIT    = 47,
+       ETHTOOL_LINK_MODE_5000baseT_Full_BIT    = 48,
 
 
        /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
@@ -1393,7 +1395,7 @@ enum ethtool_link_mode_bit_indices {
         */
 
        __ETHTOOL_LINK_MODE_LAST
-         = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+         = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
 };
 
 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name)     \
index e1a937348a3ed2bb3a76820e1ffa6a542f6aa9fb..4dd8bd232a1d4efd012fab8757887426ece9c0aa 100644 (file)
@@ -1987,6 +1987,10 @@ config MODVERSIONS
          make them incompatible with the kernel you are running.  If
          unsure, say N.
 
+config MODULE_REL_CRCS
+       bool
+       depends on MODVERSIONS
+
 config MODULE_SRCVERSION_ALL
        bool "Source checksum for all modules"
        help
index 2ee9ec3051b20774b118a57e4609f30e87bf82be..688dd02af9857e6fe739e0b4258060820acb41e3 100644 (file)
@@ -5221,6 +5221,11 @@ err_free_css:
        return ERR_PTR(err);
 }
 
+/*
+ * The returned cgroup is fully initialized including its control mask, but
+ * it isn't associated with its kernfs_node and doesn't have the control
+ * mask applied.
+ */
 static struct cgroup *cgroup_create(struct cgroup *parent)
 {
        struct cgroup_root *root = parent->root;
@@ -5288,11 +5293,6 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
 
        cgroup_propagate_control(cgrp);
 
-       /* @cgrp doesn't have dir yet so the following will only create csses */
-       ret = cgroup_apply_control_enable(cgrp);
-       if (ret)
-               goto out_destroy;
-
        return cgrp;
 
 out_cancel_ref:
@@ -5300,9 +5300,6 @@ out_cancel_ref:
 out_free_cgrp:
        kfree(cgrp);
        return ERR_PTR(ret);
-out_destroy:
-       cgroup_destroy_locked(cgrp);
-       return ERR_PTR(ret);
 }
 
 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
index 110b38a58493ee4ba4c19763d2678dae8815e1af..e5aaa806702de888b63a82bdcb72766f7a317563 100644 (file)
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-
        lockdep_assert_held(&ctx->lock);
 
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
 {
        struct perf_event *group_leader = event->group_leader, *pos;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double attach due to group movement in perf_event_open.
         */
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
        struct perf_event *sibling, *tmp;
        struct list_head *list = NULL;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double detach due to exit/hot-unplug + close.
         */
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
  */
 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
 {
-       lockdep_assert_held(&event->ctx->mutex);
+       struct perf_event_context *ctx = event->ctx;
+
+       lockdep_assert_held(&ctx->mutex);
 
        event_function_call(event, __perf_remove_from_context, (void *)flags);
+
+       /*
+        * The above event_function_call() can NO-OP when it hits
+        * TASK_TOMBSTONE. In that case we must already have been detached
+        * from the context (by perf_event_exit_event()) but the grouping
+        * might still be in-tact.
+        */
+       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+       if ((flags & DETACH_GROUP) &&
+           (event->attach_state & PERF_ATTACH_GROUP)) {
+               /*
+                * Since in that case we cannot possibly be scheduled, simply
+                * detach now.
+                */
+               raw_spin_lock_irq(&ctx->lock);
+               perf_group_detach(event);
+               raw_spin_unlock_irq(&ctx->lock);
+       }
 }
 
 /*
@@ -6609,6 +6632,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        char *buf = NULL;
        char *name;
 
+       if (vma->vm_flags & VM_READ)
+               prot |= PROT_READ;
+       if (vma->vm_flags & VM_WRITE)
+               prot |= PROT_WRITE;
+       if (vma->vm_flags & VM_EXEC)
+               prot |= PROT_EXEC;
+
+       if (vma->vm_flags & VM_MAYSHARE)
+               flags = MAP_SHARED;
+       else
+               flags = MAP_PRIVATE;
+
+       if (vma->vm_flags & VM_DENYWRITE)
+               flags |= MAP_DENYWRITE;
+       if (vma->vm_flags & VM_MAYEXEC)
+               flags |= MAP_EXECUTABLE;
+       if (vma->vm_flags & VM_LOCKED)
+               flags |= MAP_LOCKED;
+       if (vma->vm_flags & VM_HUGETLB)
+               flags |= MAP_HUGETLB;
+
        if (file) {
                struct inode *inode;
                dev_t dev;
@@ -6635,27 +6679,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                maj = MAJOR(dev);
                min = MINOR(dev);
 
-               if (vma->vm_flags & VM_READ)
-                       prot |= PROT_READ;
-               if (vma->vm_flags & VM_WRITE)
-                       prot |= PROT_WRITE;
-               if (vma->vm_flags & VM_EXEC)
-                       prot |= PROT_EXEC;
-
-               if (vma->vm_flags & VM_MAYSHARE)
-                       flags = MAP_SHARED;
-               else
-                       flags = MAP_PRIVATE;
-
-               if (vma->vm_flags & VM_DENYWRITE)
-                       flags |= MAP_DENYWRITE;
-               if (vma->vm_flags & VM_MAYEXEC)
-                       flags |= MAP_EXECUTABLE;
-               if (vma->vm_flags & VM_LOCKED)
-                       flags |= MAP_LOCKED;
-               if (vma->vm_flags & VM_HUGETLB)
-                       flags |= MAP_HUGETLB;
-
                goto got_name;
        } else {
                if (vma->vm_ops && vma->vm_ops->name) {
index 80c4f931218710eab830f2e83711007b06bf147c..31805f237396bdfb6f4d72a906c0dcb957b6ceaf 100644 (file)
@@ -1371,6 +1371,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
 }
 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
 
+static void __irq_domain_activate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (irq_data->parent_data)
+                       __irq_domain_activate_irq(irq_data->parent_data);
+               if (domain->ops->activate)
+                       domain->ops->activate(domain, irq_data);
+       }
+}
+
+static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (domain->ops->deactivate)
+                       domain->ops->deactivate(domain, irq_data);
+               if (irq_data->parent_data)
+                       __irq_domain_deactivate_irq(irq_data->parent_data);
+       }
+}
+
 /**
  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  *                          interrupt
@@ -1381,13 +1405,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  */
 void irq_domain_activate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (irq_data->parent_data)
-                       irq_domain_activate_irq(irq_data->parent_data);
-               if (domain->ops->activate)
-                       domain->ops->activate(domain, irq_data);
+       if (!irqd_is_activated(irq_data)) {
+               __irq_domain_activate_irq(irq_data);
+               irqd_set_activated(irq_data);
        }
 }
 
@@ -1401,13 +1421,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
  */
 void irq_domain_deactivate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (domain->ops->deactivate)
-                       domain->ops->deactivate(domain, irq_data);
-               if (irq_data->parent_data)
-                       irq_domain_deactivate_irq(irq_data->parent_data);
+       if (irqd_is_activated(irq_data)) {
+               __irq_domain_deactivate_irq(irq_data);
+               irqd_clr_activated(irq_data);
        }
 }
 
index 38d4270925d4d13619d725052aa3f9844f23bc96..3d8f126208e3ae04eeff3fd1b1e00044c0e3d0d2 100644 (file)
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_gpl[];
 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
-extern const unsigned long __start___kcrctab[];
-extern const unsigned long __start___kcrctab_gpl[];
-extern const unsigned long __start___kcrctab_gpl_future[];
+extern const s32 __start___kcrctab[];
+extern const s32 __start___kcrctab_gpl[];
+extern const s32 __start___kcrctab_gpl_future[];
 #ifdef CONFIG_UNUSED_SYMBOLS
 extern const struct kernel_symbol __start___ksymtab_unused[];
 extern const struct kernel_symbol __stop___ksymtab_unused[];
 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
-extern const unsigned long __start___kcrctab_unused[];
-extern const unsigned long __start___kcrctab_unused_gpl[];
+extern const s32 __start___kcrctab_unused[];
+extern const s32 __start___kcrctab_unused_gpl[];
 #endif
 
 #ifndef CONFIG_MODVERSIONS
@@ -497,7 +497,7 @@ struct find_symbol_arg {
 
        /* Output */
        struct module *owner;
-       const unsigned long *crc;
+       const s32 *crc;
        const struct kernel_symbol *sym;
 };
 
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
  * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn)
 {
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason)
 }
 
 #ifdef CONFIG_MODVERSIONS
-/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
-static unsigned long maybe_relocated(unsigned long crc,
-                                    const struct module *crc_owner)
+
+static u32 resolve_rel_crc(const s32 *crc)
 {
-#ifdef ARCH_RELOCATES_KCRCTAB
-       if (crc_owner == NULL)
-               return crc - (unsigned long)reloc_start;
-#endif
-       return crc;
+       return *(u32 *)((void *)crc + *crc);
 }
 
 static int check_version(Elf_Shdr *sechdrs,
                         unsigned int versindex,
                         const char *symname,
                         struct module *mod,
-                        const unsigned long *crc,
-                        const struct module *crc_owner)
+                        const s32 *crc)
 {
        unsigned int i, num_versions;
        struct modversion_info *versions;
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs,
                / sizeof(struct modversion_info);
 
        for (i = 0; i < num_versions; i++) {
+               u32 crcval;
+
                if (strcmp(versions[i].name, symname) != 0)
                        continue;
 
-               if (versions[i].crc == maybe_relocated(*crc, crc_owner))
+               if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
+                       crcval = resolve_rel_crc(crc);
+               else
+                       crcval = *crc;
+               if (versions[i].crc == crcval)
                        return 1;
-               pr_debug("Found checksum %lX vs module %lX\n",
-                      maybe_relocated(*crc, crc_owner), versions[i].crc);
+               pr_debug("Found checksum %X vs module %lX\n",
+                        crcval, versions[i].crc);
                goto bad_version;
        }
 
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
                                          unsigned int versindex,
                                          struct module *mod)
 {
-       const unsigned long *crc;
+       const s32 *crc;
 
        /*
         * Since this should be found in kernel (which can't be removed), no
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
        }
        preempt_enable();
        return check_version(sechdrs, versindex,
-                            VMLINUX_SYMBOL_STR(module_layout), mod, crc,
-                            NULL);
+                            VMLINUX_SYMBOL_STR(module_layout), mod, crc);
 }
 
 /* First part is kernel version, which we ignore if module has crcs. */
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
                                unsigned int versindex,
                                const char *symname,
                                struct module *mod,
-                               const unsigned long *crc,
-                               const struct module *crc_owner)
+                               const s32 *crc)
 {
        return 1;
 }
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
 {
        struct module *owner;
        const struct kernel_symbol *sym;
-       const unsigned long *crc;
+       const s32 *crc;
        int err;
 
        /*
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
        if (!sym)
                goto unlock;
 
-       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
-                          owner)) {
+       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
                sym = ERR_PTR(-EINVAL);
                goto getname;
        }
index 775569ec50d03fbf0ca4f755f79d106a63acaf1a..af344a1bf0d0e6270e5e659ffa160753e148cdd9 100644 (file)
@@ -266,7 +266,7 @@ out:
 static struct cpumask save_cpumask;
 static bool disable_migrate;
 
-static void move_to_next_cpu(void)
+static void move_to_next_cpu(bool initmask)
 {
        static struct cpumask *current_mask;
        int next_cpu;
@@ -275,7 +275,7 @@ static void move_to_next_cpu(void)
                return;
 
        /* Just pick the first CPU on first iteration */
-       if (!current_mask) {
+       if (initmask) {
                current_mask = &save_cpumask;
                get_online_cpus();
                cpumask_and(current_mask, cpu_online_mask, tracing_buffer_mask);
@@ -330,10 +330,12 @@ static void move_to_next_cpu(void)
 static int kthread_fn(void *data)
 {
        u64 interval;
+       bool initmask = true;
 
        while (!kthread_should_stop()) {
 
-               move_to_next_cpu();
+               move_to_next_cpu(initmask);
+               initmask = false;
 
                local_irq_disable();
                get_sample();
index a133ecd741e437d938ca377cf5e4358bcb586aa4..7ad9e53ad174bc6cdb0f99490f87e419e9381b02 100644 (file)
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
        return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
-static struct __init trace_event_file *
+static __init struct trace_event_file *
 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
 {
        struct trace_event_file *file;
index b772a33ef640ab0d6770bb3d249a6fe6f16eeebc..3f9afded581be1a013bda4db2c0ec3a721323364 100644 (file)
@@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
 
                cond_resched();
 find_page:
+               if (fatal_signal_pending(current)) {
+                       error = -EINTR;
+                       goto out;
+               }
+
                page = find_get_page(mapping, index);
                if (!page) {
                        page_cache_sync_readahead(mapping,
index b82b3e2151574ae1abbb2cd57624114727975410..f479365530b6484bbd5cae42064521fed362961e 100644 (file)
@@ -13,6 +13,7 @@
  *
  */
 
+#include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/printk.h>
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
        if (likely(!kasan_report_enabled()))
                return;
 
+       disable_trace_on_warning();
+
        info.access_addr = (void *)addr;
        info.access_size = size;
        info.is_write = is_write;
index ca2723d4733849eab01b323a50e6b1bc609e308c..b8c11e063ff0746316fb792f4fe1dde0094cb828 100644 (file)
@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) is belongs to the same zone.
+ * Confirm all pages in a range [start, end) belong to the same zone.
+ * When true, return its valid [start, end).
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned long *valid_start, unsigned long *valid_end)
 {
        unsigned long pfn, sec_end_pfn;
+       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
-       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
+       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
             pfn < end_pfn;
-            pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
+            pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
                /* Make sure the memory section is present first */
                if (!present_section_nr(pfn_to_section_nr(pfn)))
                        continue;
@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
                                return 0;
+                       if (!zone)
+                               start = pfn + i;
                        zone = page_zone(page);
+                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
-       return 1;
+
+       if (zone) {
+               *valid_start = start;
+               *valid_end = end;
+               return 1;
+       } else {
+               return 0;
+       }
 }
 
 /*
@@ -1839,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        long offlined_pages;
        int ret, drain, retry_max, node;
        unsigned long flags;
+       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
 
@@ -1849,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
                return -EINVAL;
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return -EINVAL;
 
-       zone = page_zone(pfn_to_page(start_pfn));
+       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
        nr_pages = end_pfn - start_pfn;
 
index bb53285a1d99666676e85697330f1a052f7c3cc0..3a7587a0314dc73fb4929a824a74f9b8948ea502 100644 (file)
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                struct shrink_control *sc, unsigned long nr_to_split)
 {
        LIST_HEAD(list), *pos, *next;
+       LIST_HEAD(to_remove);
        struct inode *inode;
        struct shmem_inode_info *info;
        struct page *page;
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                /* Check if there's anything to gain */
                if (round_up(inode->i_size, PAGE_SIZE) ==
                                round_up(inode->i_size, HPAGE_PMD_SIZE)) {
-                       list_del_init(&info->shrinklist);
+                       list_move(&info->shrinklist, &to_remove);
                        removed++;
-                       iput(inode);
                        goto next;
                }
 
@@ -454,6 +454,13 @@ next:
        }
        spin_unlock(&sbinfo->shrinklist_lock);
 
+       list_for_each_safe(pos, next, &to_remove) {
+               info = list_entry(pos, struct shmem_inode_info, shrinklist);
+               inode = &info->vfs_inode;
+               list_del_init(&info->shrinklist);
+               iput(inode);
+       }
+
        list_for_each_safe(pos, next, &list) {
                int ret;
 
index 067a0d62f31841d16913d36e38531a277ab59b01..cabf09e0128beebdee2b8a959361fe6464fb3469 100644 (file)
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
 
 /* Enable/disable zswap (disabled by default) */
 static bool zswap_enabled;
-module_param_named(enabled, zswap_enabled, bool, 0644);
+static int zswap_enabled_param_set(const char *,
+                                  const struct kernel_param *);
+static struct kernel_param_ops zswap_enabled_param_ops = {
+       .set =          zswap_enabled_param_set,
+       .get =          param_get_bool,
+};
+module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
 
 /* Crypto compressor to use */
 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 /* used by param callback function */
 static bool zswap_init_started;
 
+/* fatal error during init */
+static bool zswap_init_failed;
+
 /*********************************
 * helpers and fwd declarations
 **********************************/
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
        char *s = strstrip((char *)val);
        int ret;
 
+       if (zswap_init_failed) {
+               pr_err("can't set param, initialization failed\n");
+               return -ENODEV;
+       }
+
        /* no change required */
        if (!strcmp(s, *(char **)kp->arg))
                return 0;
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
        return __zswap_param_set(val, kp, NULL, zswap_compressor);
 }
 
+static int zswap_enabled_param_set(const char *val,
+                                  const struct kernel_param *kp)
+{
+       if (zswap_init_failed) {
+               pr_err("can't enable, initialization failed\n");
+               return -ENODEV;
+       }
+
+       return param_set_bool(val, kp);
+}
+
 /*********************************
 * writeback code
 **********************************/
@@ -1201,6 +1226,9 @@ hp_fail:
 dstmem_fail:
        zswap_entry_cache_destroy();
 cache_fail:
+       /* if built-in, we aren't unloaded on failure; don't allow use */
+       zswap_init_failed = true;
+       zswap_enabled = false;
        return -ENOMEM;
 }
 /* must be late so crypto has time to come up */
index 1108079d934f8383a599d7997b08100fca0465e9..5488e4a6ccd062e6f6e7e2b841dde5ef055d4337 100644 (file)
@@ -445,6 +445,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  * @func: callback function on filter match
  * @data: returned parameter for callback function
  * @ident: string for calling module identification
+ * @sk: socket pointer (might be NULL)
  *
  * Description:
  *  Invokes the callback function with the received sk_buff and the given
@@ -468,7 +469,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  */
 int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                    void (*func)(struct sk_buff *, void *), void *data,
-                   char *ident)
+                   char *ident, struct sock *sk)
 {
        struct receiver *r;
        struct hlist_head *rl;
@@ -496,6 +497,7 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
                r->func    = func;
                r->data    = data;
                r->ident   = ident;
+               r->sk      = sk;
 
                hlist_add_head_rcu(&r->list, rl);
                d->entries++;
@@ -520,8 +522,11 @@ EXPORT_SYMBOL(can_rx_register);
 static void can_rx_delete_receiver(struct rcu_head *rp)
 {
        struct receiver *r = container_of(rp, struct receiver, rcu);
+       struct sock *sk = r->sk;
 
        kmem_cache_free(rcv_cache, r);
+       if (sk)
+               sock_put(sk);
 }
 
 /**
@@ -596,8 +601,11 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
        spin_unlock(&can_rcvlists_lock);
 
        /* schedule the receiver item for deletion */
-       if (r)
+       if (r) {
+               if (r->sk)
+                       sock_hold(r->sk);
                call_rcu(&r->rcu, can_rx_delete_receiver);
+       }
 }
 EXPORT_SYMBOL(can_rx_unregister);
 
index fca0fe9fc45a497cdf3da82d5414e846e7cc61b7..b86f5129e8385fe84ef671bb914e8e05c2977ca0 100644 (file)
 
 struct receiver {
        struct hlist_node list;
-       struct rcu_head rcu;
        canid_t can_id;
        canid_t mask;
        unsigned long matches;
        void (*func)(struct sk_buff *, void *);
        void *data;
        char *ident;
+       struct sock *sk;
+       struct rcu_head rcu;
 };
 
 #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
index 21ac75390e3d64f795faad074b515d34ce0bbfa3..95d13b233c65161cf3595a8b0036207f5c2892e3 100644 (file)
@@ -734,14 +734,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops,
 
 static void bcm_remove_op(struct bcm_op *op)
 {
-       hrtimer_cancel(&op->timer);
-       hrtimer_cancel(&op->thrtimer);
-
-       if (op->tsklet.func)
-               tasklet_kill(&op->tsklet);
+       if (op->tsklet.func) {
+               while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
+                      test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
+                      hrtimer_active(&op->timer)) {
+                       hrtimer_cancel(&op->timer);
+                       tasklet_kill(&op->tsklet);
+               }
+       }
 
-       if (op->thrtsklet.func)
-               tasklet_kill(&op->thrtsklet);
+       if (op->thrtsklet.func) {
+               while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
+                      test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
+                      hrtimer_active(&op->thrtimer)) {
+                       hrtimer_cancel(&op->thrtimer);
+                       tasklet_kill(&op->thrtsklet);
+               }
+       }
 
        if ((op->frames) && (op->frames != &op->sframe))
                kfree(op->frames);
@@ -1216,7 +1225,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                                err = can_rx_register(dev, op->can_id,
                                                      REGMASK(op->can_id),
                                                      bcm_rx_handler, op,
-                                                     "bcm");
+                                                     "bcm", sk);
 
                                op->rx_reg_dev = dev;
                                dev_put(dev);
@@ -1225,7 +1234,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
                } else
                        err = can_rx_register(NULL, op->can_id,
                                              REGMASK(op->can_id),
-                                             bcm_rx_handler, op, "bcm");
+                                             bcm_rx_handler, op, "bcm", sk);
                if (err) {
                        /* this bcm rx op is broken -> remove it */
                        list_del(&op->list);
index a54ab0c821048ab2034bf32cef3c1f35e0dc82a5..7056a1a2bb70098e691ce557f05e5bc1f27cb42f 100644 (file)
@@ -442,7 +442,7 @@ static inline int cgw_register_filter(struct cgw_job *gwj)
 {
        return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
                               gwj->ccgw.filter.can_mask, can_can_gw_rcv,
-                              gwj, "gw");
+                              gwj, "gw", NULL);
 }
 
 static inline void cgw_unregister_filter(struct cgw_job *gwj)
index b075f028d7e23958e9433a4b19f4475ad930b547..6dc546a06673ff41fc121c546ebd0567bb0da05f 100644 (file)
@@ -190,7 +190,7 @@ static int raw_enable_filters(struct net_device *dev, struct sock *sk,
        for (i = 0; i < count; i++) {
                err = can_rx_register(dev, filter[i].can_id,
                                      filter[i].can_mask,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
                if (err) {
                        /* clean up successfully registered filters */
                        while (--i >= 0)
@@ -211,7 +211,7 @@ static int raw_enable_errfilter(struct net_device *dev, struct sock *sk,
 
        if (err_mask)
                err = can_rx_register(dev, 0, err_mask | CAN_ERR_FLAG,
-                                     raw_rcv, sk, "raw");
+                                     raw_rcv, sk, "raw", sk);
 
        return err;
 }
index 1d5331a1b1dc2677316148ba9852c191e7ed0fd4..8ce50dc3ab8cac821b8a2c3e0d31f0aa42f5c9d5 100644 (file)
@@ -2518,9 +2518,11 @@ u32 __tcp_select_window(struct sock *sk)
        int full_space = min_t(int, tp->window_clamp, allowed_space);
        int window;
 
-       if (mss > full_space)
+       if (unlikely(mss > full_space)) {
                mss = full_space;
-
+               if (mss <= 0)
+                       return 0;
+       }
        if (free_space < (full_space >> 1)) {
                icsk->icsk_ack.quick = 0;
 
index 2c0df09e90365ad38b5362f77c6e33a24fc062f0..b6a94ff0bbd0d5f42c78b7bcd6ea9fbe5ac9a595 100644 (file)
@@ -1344,7 +1344,7 @@ emsgsize:
         */
        if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
            headersize == sizeof(struct ipv6hdr) &&
-           length < mtu - headersize &&
+           length <= mtu - headersize &&
            !(flags & MSG_MORE) &&
            rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
                csummode = CHECKSUM_PARTIAL;
index ff8ee06491c335d209e86bb15f2526ab1915df3b..75fac933c209a0f430279dea10b5dd2426a7ed31 100644 (file)
@@ -441,7 +441,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                                if (i + sizeof(*tel) > optlen)
                                        break;
 
-                               tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
+                               tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
                                /* return index of option if found and valid */
                                if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
                                    tel->length == 1)
index 970db7a41684aa2a494b97663f91ca932308de05..5752789acc135250c312199c2d6e5e15d05fdea0 100644 (file)
@@ -568,9 +568,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                               &mask->icmp.type,
                               TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
                               sizeof(key->icmp.type));
-               fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
+               fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
                               &mask->icmp.code,
-                              TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
+                              TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
                               sizeof(key->icmp.code));
        }
 
index f935429bd5ef1fcbe6a4272876b76e2ebb574c4b..b12bc2abea931a7defd1e23eb86a20fe09e76388 100644 (file)
 #include <net/sch_generic.h>
 #include <net/pkt_cls.h>
 
-struct cls_mall_filter {
+struct cls_mall_head {
        struct tcf_exts exts;
        struct tcf_result res;
        u32 handle;
-       struct rcu_head rcu;
        u32 flags;
-};
-
-struct cls_mall_head {
-       struct cls_mall_filter *filter;
        struct rcu_head rcu;
 };
 
@@ -33,38 +28,29 @@ static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                         struct tcf_result *res)
 {
        struct cls_mall_head *head = rcu_dereference_bh(tp->root);
-       struct cls_mall_filter *f = head->filter;
 
-       if (tc_skip_sw(f->flags))
+       if (tc_skip_sw(head->flags))
                return -1;
 
-       return tcf_exts_exec(skb, &f->exts, res);
+       return tcf_exts_exec(skb, &head->exts, res);
 }
 
 static int mall_init(struct tcf_proto *tp)
 {
-       struct cls_mall_head *head;
-
-       head = kzalloc(sizeof(*head), GFP_KERNEL);
-       if (!head)
-               return -ENOBUFS;
-
-       rcu_assign_pointer(tp->root, head);
-
        return 0;
 }
 
-static void mall_destroy_filter(struct rcu_head *head)
+static void mall_destroy_rcu(struct rcu_head *rcu)
 {
-       struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
+       struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
+                                                 rcu);
 
-       tcf_exts_destroy(&f->exts);
-
-       kfree(f);
+       tcf_exts_destroy(&head->exts);
+       kfree(head);
 }
 
 static int mall_replace_hw_filter(struct tcf_proto *tp,
-                                 struct cls_mall_filter *f,
+                                 struct cls_mall_head *head,
                                  unsigned long cookie)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
@@ -74,7 +60,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
        offload.type = TC_SETUP_MATCHALL;
        offload.cls_mall = &mall_offload;
        offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
-       offload.cls_mall->exts = &f->exts;
+       offload.cls_mall->exts = &head->exts;
        offload.cls_mall->cookie = cookie;
 
        return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
@@ -82,7 +68,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
 }
 
 static void mall_destroy_hw_filter(struct tcf_proto *tp,
-                                  struct cls_mall_filter *f,
+                                  struct cls_mall_head *head,
                                   unsigned long cookie)
 {
        struct net_device *dev = tp->q->dev_queue->dev;
@@ -103,29 +89,20 @@ static bool mall_destroy(struct tcf_proto *tp, bool force)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
        struct net_device *dev = tp->q->dev_queue->dev;
-       struct cls_mall_filter *f = head->filter;
 
-       if (!force && f)
-               return false;
+       if (!head)
+               return true;
 
-       if (f) {
-               if (tc_should_offload(dev, tp, f->flags))
-                       mall_destroy_hw_filter(tp, f, (unsigned long) f);
+       if (tc_should_offload(dev, tp, head->flags))
+               mall_destroy_hw_filter(tp, head, (unsigned long) head);
 
-               call_rcu(&f->rcu, mall_destroy_filter);
-       }
-       kfree_rcu(head, rcu);
+       call_rcu(&head->rcu, mall_destroy_rcu);
        return true;
 }
 
 static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
 {
-       struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = head->filter;
-
-       if (f && f->handle == handle)
-               return (unsigned long) f;
-       return 0;
+       return 0UL;
 }
 
 static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
@@ -134,7 +111,7 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
 };
 
 static int mall_set_parms(struct net *net, struct tcf_proto *tp,
-                         struct cls_mall_filter *f,
+                         struct cls_mall_head *head,
                          unsigned long base, struct nlattr **tb,
                          struct nlattr *est, bool ovr)
 {
@@ -147,11 +124,11 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
                return err;
 
        if (tb[TCA_MATCHALL_CLASSID]) {
-               f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
-               tcf_bind_filter(tp, &f->res, base);
+               head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
+               tcf_bind_filter(tp, &head->res, base);
        }
 
-       tcf_exts_change(tp, &f->exts, &e);
+       tcf_exts_change(tp, &head->exts, &e);
 
        return 0;
 }
@@ -162,21 +139,17 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                       unsigned long *arg, bool ovr)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
        struct net_device *dev = tp->q->dev_queue->dev;
-       struct cls_mall_filter *f;
        struct nlattr *tb[TCA_MATCHALL_MAX + 1];
+       struct cls_mall_head *new;
        u32 flags = 0;
        int err;
 
        if (!tca[TCA_OPTIONS])
                return -EINVAL;
 
-       if (head->filter)
-               return -EBUSY;
-
-       if (fold)
-               return -EINVAL;
+       if (head)
+               return -EEXIST;
 
        err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
                               tca[TCA_OPTIONS], mall_policy);
@@ -189,23 +162,23 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                        return -EINVAL;
        }
 
-       f = kzalloc(sizeof(*f), GFP_KERNEL);
-       if (!f)
+       new = kzalloc(sizeof(*new), GFP_KERNEL);
+       if (!new)
                return -ENOBUFS;
 
-       tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
+       tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
 
        if (!handle)
                handle = 1;
-       f->handle = handle;
-       f->flags = flags;
+       new->handle = handle;
+       new->flags = flags;
 
-       err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
+       err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
        if (err)
                goto errout;
 
        if (tc_should_offload(dev, tp, flags)) {
-               err = mall_replace_hw_filter(tp, f, (unsigned long) f);
+               err = mall_replace_hw_filter(tp, new, (unsigned long) new);
                if (err) {
                        if (tc_skip_sw(flags))
                                goto errout;
@@ -214,39 +187,29 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
                }
        }
 
-       *arg = (unsigned long) f;
-       rcu_assign_pointer(head->filter, f);
-
+       *arg = (unsigned long) head;
+       rcu_assign_pointer(tp->root, new);
+       if (head)
+               call_rcu(&head->rcu, mall_destroy_rcu);
        return 0;
 
 errout:
-       kfree(f);
+       kfree(new);
        return err;
 }
 
 static int mall_delete(struct tcf_proto *tp, unsigned long arg)
 {
-       struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
-       struct net_device *dev = tp->q->dev_queue->dev;
-
-       if (tc_should_offload(dev, tp, f->flags))
-               mall_destroy_hw_filter(tp, f, (unsigned long) f);
-
-       RCU_INIT_POINTER(head->filter, NULL);
-       tcf_unbind_filter(tp, &f->res);
-       call_rcu(&f->rcu, mall_destroy_filter);
-       return 0;
+       return -EOPNOTSUPP;
 }
 
 static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
        struct cls_mall_head *head = rtnl_dereference(tp->root);
-       struct cls_mall_filter *f = head->filter;
 
        if (arg->count < arg->skip)
                goto skip;
-       if (arg->fn(tp, (unsigned long) f, arg) < 0)
+       if (arg->fn(tp, (unsigned long) head, arg) < 0)
                arg->stop = 1;
 skip:
        arg->count++;
@@ -255,28 +218,28 @@ skip:
 static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
                     struct sk_buff *skb, struct tcmsg *t)
 {
-       struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
+       struct cls_mall_head *head = (struct cls_mall_head *) fh;
        struct nlattr *nest;
 
-       if (!f)
+       if (!head)
                return skb->len;
 
-       t->tcm_handle = f->handle;
+       t->tcm_handle = head->handle;
 
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (!nest)
                goto nla_put_failure;
 
-       if (f->res.classid &&
-           nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
+       if (head->res.classid &&
+           nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
                goto nla_put_failure;
 
-       if (tcf_exts_dump(skb, &f->exts))
+       if (tcf_exts_dump(skb, &head->exts))
                goto nla_put_failure;
 
        nla_nest_end(skb, nest);
 
-       if (tcf_exts_dump_stats(skb, &f->exts) < 0)
+       if (tcf_exts_dump_stats(skb, &head->exts) < 0)
                goto nla_put_failure;
 
        return skb->len;
index dc6fb79a361f1ca3ab9869fc02ba05c1a533ad9b..25d9a9cf7b66b7f4e501d38d91f6a1908830972e 100644 (file)
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
        if (!oa->data)
                return -ENOMEM;
 
-       creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+       creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
        if (!creds) {
                kfree(oa->data);
                return -ENOMEM;
index eadcd4d359d91fc7823a75263c44c520e05f900b..d883116ebaa452d9c2f6c657de53121ebd9d50bd 100644 (file)
@@ -164,6 +164,7 @@ cmd_gensymtypes_c =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) $< |                                   \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
@@ -337,6 +338,7 @@ cmd_gensymtypes_S =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) -xc - |                                \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
index 06121ce524a76006072459d352d727b4aebdf203..c9235d8340f1e7ba33eacfaee94642c18f5fd211 100644 (file)
@@ -44,7 +44,7 @@ char *cur_filename, *source_file;
 int in_source_file;
 
 static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
-          flag_preserve, flag_warnings;
+          flag_preserve, flag_warnings, flag_rel_crcs;
 static const char *mod_prefix = "";
 
 static int errors;
@@ -693,7 +693,10 @@ void export_symbol(const char *name)
                        fputs(">\n", debugfile);
 
                /* Used as a linker script. */
-               printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc);
+               printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
+                      "SECTIONS { .rodata : ALIGN(4) { "
+                      "%s__crc_%s = .; LONG(0x%08lx); } }\n",
+                      mod_prefix, name, crc);
        }
 }
 
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...)
 
 static void genksyms_usage(void)
 {
-       fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n"
+       fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
 #ifdef __GNU_LIBRARY__
              "  -s, --symbol-prefix   Select symbol prefix\n"
              "  -d, --debug           Increment the debug level (repeatable)\n"
@@ -742,6 +745,7 @@ static void genksyms_usage(void)
              "  -q, --quiet           Disable warnings (default)\n"
              "  -h, --help            Print this message\n"
              "  -V, --version         Print the release version\n"
+             "  -R, --relative-crc    Emit section relative symbol CRCs\n"
 #else                          /* __GNU_LIBRARY__ */
              "  -s                    Select symbol prefix\n"
              "  -d                    Increment the debug level (repeatable)\n"
@@ -753,6 +757,7 @@ static void genksyms_usage(void)
              "  -q                    Disable warnings (default)\n"
              "  -h                    Print this message\n"
              "  -V                    Print the release version\n"
+             "  -R                    Emit section relative symbol CRCs\n"
 #endif                         /* __GNU_LIBRARY__ */
              , stderr);
 }
@@ -774,13 +779,14 @@ int main(int argc, char **argv)
                {"preserve", 0, 0, 'p'},
                {"version", 0, 0, 'V'},
                {"help", 0, 0, 'h'},
+               {"relative-crc", 0, 0, 'R'},
                {0, 0, 0, 0}
        };
 
-       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph",
+       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR",
                                &long_opts[0], NULL)) != EOF)
 #else                          /* __GNU_LIBRARY__ */
-       while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF)
+       while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
 #endif                         /* __GNU_LIBRARY__ */
                switch (o) {
                case 's':
@@ -823,6 +829,9 @@ int main(int argc, char **argv)
                case 'h':
                        genksyms_usage();
                        return 0;
+               case 'R':
+                       flag_rel_crcs = 1;
+                       break;
                default:
                        genksyms_usage();
                        return 1;
index 299b92ca1ae092d82e9a0e3bffaec45988ebcc37..5d554419170b7d54ec82ddb1d31093d3eab0aa7d 100644 (file)
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s)
                "_SDA2_BASE_",          /* ppc */
                NULL };
 
+       static char *special_prefixes[] = {
+               "__crc_",               /* modversions */
+               NULL };
+
        static char *special_suffixes[] = {
                "_veneer",              /* arm */
                "_from_arm",            /* arm */
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s)
                if (strcmp(sym_name, special_symbols[i]) == 0)
                        return 0;
 
+       for (i = 0; special_prefixes[i]; i++) {
+               int l = strlen(special_prefixes[i]);
+
+               if (l <= strlen(sym_name) &&
+                   strncmp(sym_name, special_prefixes[i], l) == 0)
+                       return 0;
+       }
+
        for (i = 0; special_suffixes[i]; i++) {
                int l = strlen(sym_name) - strlen(special_suffixes[i]);
 
index 29c89a6bad3d3ac34e539189e83769f1c63ddab3..4dedd0d3d3a7fda58af2bc6150b9f6b6195d2cac 100644 (file)
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
                is_crc = true;
                crc = (unsigned int) sym->st_value;
+               if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
+                       unsigned int *crcp;
+
+                       /* symbol points to the CRC in the ELF object */
+                       crcp = (void *)info->hdr + sym->st_value +
+                              info->sechdrs[sym->st_shndx].sh_offset -
+                              (info->hdr->e_type != ET_REL ?
+                               info->sechdrs[sym->st_shndx].sh_addr : 0);
+                       crc = *crcp;
+               }
                sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
                                export);
        }
index 5e0dea2cdc01f65849f49f10392293a21b3a468d..039636ffb6c8a3edb6c14fd9a2b3a854ab84f982 100644 (file)
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                *type = INSN_RETURN;
                break;
 
-       case 0xc5: /* iret */
        case 0xca: /* retf */
        case 0xcb: /* retf */
+       case 0xcf: /* iret */
                *type = INSN_CONTEXT_SWITCH;
                break;