]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 7 Feb 2017 21:29:30 +0000 (16:29 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 7 Feb 2017 21:29:30 +0000 (16:29 -0500)
The conflict was an interaction between a bug fix in the
netvsc driver in 'net' and an optimization of the RX path
in 'net-next'.

Signed-off-by: David S. Miller <davem@davemloft.net>
176 files changed:
Documentation/media/uapi/cec/cec-func-close.rst
Documentation/media/uapi/cec/cec-func-ioctl.rst
Documentation/media/uapi/cec/cec-func-open.rst
Documentation/media/uapi/cec/cec-func-poll.rst
Documentation/media/uapi/cec/cec-intro.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
Documentation/media/uapi/cec/cec-ioc-dqevent.rst
Documentation/media/uapi/cec/cec-ioc-g-mode.rst
Documentation/media/uapi/cec/cec-ioc-receive.rst
MAINTAINERS
Makefile
arch/arc/kernel/unaligned.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/module.h
arch/powerpc/include/asm/stackprotector.h [deleted file]
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/mm/pgtable-radix.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/include/asm/microcode.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/hpet.c
arch/x86/kvm/x86.c
arch/x86/platform/efi/efi_64.c
arch/xtensa/kernel/setup.c
crypto/algif_aead.c
drivers/acpi/nfit/core.c
drivers/base/firmware_class.c
drivers/base/memory.c
drivers/base/power/runtime.c
drivers/cpufreq/brcmstb-avs-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/ccp/ccp-dev-v5.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-dmaengine.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_core.c
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/qat_hal.c
drivers/firmware/efi/libstub/fdt.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/nouveau/dispnv04/hw.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/nouveau/nouveau_led.h
drivers/gpu/drm/nouveau/nouveau_usif.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nv84_fence.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/hv/ring_buffer.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/health/afe4403.c
drivers/iio/health/afe4404.c
drivers/iio/health/max30100.c
drivers/iio/humidity/dht11.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-rq.c
drivers/media/cec/cec-adap.c
drivers/mmc/host/sdhci.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/hyperv/netvsc.c
drivers/net/macvtap.c
drivers/net/phy/phy_device.c
drivers/net/tun.c
drivers/net/usb/catc.c
drivers/net/usb/pegasus.c
drivers/net/usb/rtl8150.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pfn_devs.c
drivers/pci/pcie/aspm.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/fixed.c
drivers/regulator/twl6030-regulator.c
drivers/scsi/virtio_scsi.c
drivers/staging/greybus/timesync_platform.c
drivers/usb/core/quirks.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/qcserial.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vhost/vhost.c
drivers/virtio/virtio_ring.c
fs/dax.c
fs/iomap.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nfsd/vfs.c
fs/proc/page.c
include/asm-generic/export.h
include/drm/drmP.h
include/drm/drm_connector.h
include/linux/cpuhotplug.h
include/linux/export.h
include/linux/hyperv.h
include/linux/irq.h
include/linux/log2.h
include/linux/memory_hotplug.h
include/linux/module.h
include/net/cipso_ipv4.h
include/net/sock.h
include/uapi/linux/seg6.h
init/Kconfig
kernel/events/core.c
kernel/irq/irqdomain.c
kernel/module.c
kernel/trace/trace_kprobe.c
mm/filemap.c
mm/kasan/report.c
mm/memory_hotplug.c
mm/shmem.c
mm/zswap.c
net/core/datagram.c
net/core/dev.c
net/core/ethtool.c
net/ipv4/cipso_ipv4.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/exthdrs.c
net/ipv6/ip6_gre.c
net/ipv6/seg6_hmac.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/fils_aead.c
net/mac80211/mesh.c
net/sctp/socket.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/wireless/nl80211.c
scripts/Makefile.build
scripts/genksyms/genksyms.c
scripts/kallsyms.c
scripts/mod/modpost.c
tools/objtool/arch/x86/decode.c

index 8267c31b317dc9ac8e86007c56de62c0f79ebcba..895d9c2d1c04302f9452350b7fffe3678af1ee61 100644 (file)
@@ -33,11 +33,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 Closes the cec device. Resources associated with the file descriptor are
 freed. The device configuration remain unchanged.
 
index 9e8dbb118d6a3d518391caa1a086f05a545b57c2..7dcfd178fb243c33f6ba23bb331236496018809c 100644 (file)
@@ -39,11 +39,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 The :c:func:`ioctl()` function manipulates cec device parameters. The
 argument ``fd`` must be an open file descriptor.
 
index af3f5b5c24c646dfe61dea9b8afed05a9cb84228..0304388cd15976a7ff00eb46ca9bc9ef5f0a3c94 100644 (file)
@@ -46,11 +46,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To open a cec device applications call :c:func:`open()` with the
 desired device name. The function has no side effects; the device
 configuration remain unchanged.
index cfb73e6027a55734747255df676619e2e55441f7..6a863cfda6e05172be7e60e930fbd0ff885b3ce4 100644 (file)
@@ -39,11 +39,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 With the :c:func:`poll()` function applications can wait for CEC
 events.
 
index 4a19ea5323a97d6ac577b62caf015e6cf2653f63..07ee2b8f89d6a320d66f1a9ed3d20977aa60c34b 100644 (file)
@@ -3,11 +3,6 @@
 Introduction
 ============
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 HDMI connectors provide a single pin for use by the Consumer Electronics
 Control protocol. This protocol allows different devices connected by an
 HDMI cable to communicate. The protocol for CEC version 1.4 is defined
@@ -31,3 +26,15 @@ control just the CEC pin.
 Drivers that support CEC will create a CEC device node (/dev/cecX) to
 give userspace access to the CEC adapter. The
 :ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
+
+In order to check the support and test it, it is suggested to download
+the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
+provides three tools to handle CEC:
+
+- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
+  and monitor CEC messages.
+
+- cec-compliance: does a CEC compliance test of a remote CEC device to
+  determine how compliant the CEC implementation is.
+
+- cec-follower: emulates a CEC follower.
index 2b0ddb14b280e6fddcd8aae1bc67f60560e8a9e4..a0e961f11017c74d9d8ca36fa57124910cd6dbdd 100644 (file)
@@ -29,11 +29,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
 device information, applications call the ioctl with a pointer to a
 struct :c:type:`cec_caps`. The driver fills the structure and
index b878637e91b3d8dc78aebf0ccb94823ef6b96ac5..09f09bbe28d4ffb1d5b3291221c4b678e4e38c6e 100644 (file)
@@ -35,11 +35,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current CEC logical addresses, applications call
 :ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
 struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
index 3357deb43c85afb7c577a2fe34127f210903ae73..a3cdc75cec3e3c0754027869cee43ad8ca019260 100644 (file)
@@ -35,11 +35,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To query the current physical address applications call
 :ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
 driver stores the physical address.
index e256c6605de7f7b7242a1ad4fbd303d5b5309369..6e589a1fae1704c0eea188c662537d37ea6142b5 100644 (file)
@@ -30,11 +30,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 CEC devices can send asynchronous events. These can be retrieved by
 calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
 non-blocking mode and no event is pending, then it will return -1 and
index 4f5818b9d27724163781713a14241dca38dca0cb..e4ded9df0a84a3be52a4f2a5fb09359e1d4fe285 100644 (file)
@@ -31,11 +31,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
 applications from stepping on each others toes it must be possible to
 obtain exclusive access to the CEC adapter. This ioctl sets the
index bdf015b1d1dc0a677b89cdc05b7637687370a496..dc2adb391c0a2d2183e3e00bc90af520e727c6dd 100644 (file)
@@ -34,11 +34,6 @@ Arguments
 Description
 ===========
 
-.. note::
-
-   This documents the proposed CEC API. This API is not yet finalized
-   and is currently only available as a staging kernel module.
-
 To receive a CEC message the application has to fill in the
 ``timeout`` field of struct :c:type:`cec_msg` and pass it to
 :ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
index a9368bba9b37de1079b9394f9446c9a145d4ab9b..93a41ef97f18cb0ac7fcc4a713211c087f3cfc10 100644 (file)
@@ -13101,7 +13101,7 @@ F:      drivers/input/serio/userio.c
 F:     include/uapi/linux/userio.h
 
 VIRTIO CONSOLE DRIVER
-M:     Amit Shah <amit.shah@redhat.com>
+M:     Amit Shah <amit@kernel.org>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
 F:     drivers/char/virtio_console.c
index 96b27a888285c5258dc57f666b2876fcece1a1aa..8e223e081c9d3b28f1b19c9472c1970bc0c19234 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -797,7 +797,7 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 KBUILD_ARFLAGS := $(call ar-option,D)
 
 # check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
        KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
        KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
index 91ebe382147f6d915a58f94027fa748ca8319f30..5f69c3bd59bba47babf92c97a9c7e5a0174c5648 100644 (file)
@@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
 
        /* clear any remanants of delay slot */
        if (delay_mode(regs)) {
-               regs->ret = regs->bta ~1U;
+               regs->ret = regs->bta ~1U;
                regs->status32 &= ~STATUS_DE_MASK;
        } else {
                regs->ret += state.instr_len;
index a8ee573fe610bd5e2d8191b4dffb05e134a6d3c2..281f4f1fcd1f68ab2fbc613afa3f2597bd090550 100644 (file)
@@ -164,7 +164,6 @@ config PPC
        select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
        select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_KERNEL_GZIP
-       select HAVE_CC_STACKPROTECTOR
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
@@ -484,6 +483,7 @@ config RELOCATABLE
        bool "Build a relocatable kernel"
        depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
        select NONSTATIC_KERNEL
+       select MODULE_REL_CRCS if MODVERSIONS
        help
          This builds a kernel image that is capable of running at the
          location the kernel is loaded at. For ppc32, there is no any
index b312b152461b0539a22c5939ba7728a38f5d8d32..6e834caa37206a476792823463e81ac4c9617f9c 100644 (file)
@@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index a34c764ca8dd83435faf75307e30e5149e55de4b..233a7e8cc8e32d6cf0ac904b3f02b2f340e3883b 100644 (file)
@@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
 {
        int i;
 
+#ifndef __clang__ /* clang can't cope with this */
        BUILD_BUG_ON(!__builtin_constant_p(feature));
+#endif
 
 #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
        if (!static_key_initialized) {
index cc12c61ef315fc6ca5d43233bd54889cd19a01a0..53885512b8d31b12acec28dc3ba6688e57fb9615 100644 (file)
@@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
 }
 #endif
 
-#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
-#define ARCH_RELOCATES_KCRCTAB
-#define reloc_start PHYSICAL_START
-#endif
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
deleted file mode 100644 (file)
index 6720190..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * GCC stack protector support.
- *
- * Stack protector works by putting predefined pattern at the start of
- * the stack frame and verifying that it hasn't been overwritten when
- * returning from the function.  The pattern is called stack canary
- * and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on PPC.  This unfortunately means that on SMP
- * we cannot have a different canary value per task.
- */
-
-#ifndef _ASM_STACKPROTECTOR_H
-#define _ASM_STACKPROTECTOR_H
-
-#include <linux/random.h>
-#include <linux/version.h>
-#include <asm/reg.h>
-
-extern unsigned long __stack_chk_guard;
-
-/*
- * Initialize the stackprotector canary value.
- *
- * NOTE: this must only be called from functions that never return,
- * and it must always be inlined.
- */
-static __always_inline void boot_init_stack_canary(void)
-{
-       unsigned long canary;
-
-       /* Try to get a semi random initial value. */
-       get_random_bytes(&canary, sizeof(canary));
-       canary ^= mftb();
-       canary ^= LINUX_VERSION_CODE;
-
-       current->stack_canary = canary;
-       __stack_chk_guard = current->stack_canary;
-}
-
-#endif /* _ASM_STACKPROTECTOR_H */
index 23f8082d7bfad95f4c9fbb8201e3e58c3104928f..f4c2b52e58b36eb44bcb6be2a2428bc0458f7660 100644 (file)
@@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
 
-# -fstack-protector triggers protection checks in this code,
-# but it is being used too early to link to meaningful stack_chk logic.
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
-
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
index 0601e6a7297c64ea4b2129011d32ae42a662ac07..195a9fc8f81c8fc41fca8b05239948d4f28d54af 100644 (file)
@@ -91,9 +91,6 @@ int main(void)
        DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
 #endif
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-       DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
-#endif
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
 #ifdef CONFIG_BOOKE
index d88573bdd0907c6682cf03395bc296155cea9124..b94887165a101557c97fd6a53139ae7df67dbbd9 100644 (file)
@@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
 {
        struct eeh_pe *pe = (struct eeh_pe *)data;
-       bool *clear_sw_state = flag;
+       bool clear_sw_state = *(bool *)flag;
        int i, rc = 1;
 
        for (i = 0; rc && i < 3; i++)
index 5742dbdbee4677924ebf0019b891e43879410131..3841d749a430069f4d4f2705c4199c08609b3757 100644 (file)
@@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 #endif /* CONFIG_SPE */
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       lwz     r0,TSK_STACK_CANARY(r2)
-       lis     r4,__stack_chk_guard@ha
-       stw     r0,__stack_chk_guard@l(r4)
-#endif
+
        lwz     r0,_CCR(r1)
        mtcrf   0xFF,r0
        /* r3-r12 are destroyed -- Cort */
index bb1807184bad5da5f9b65ce69087184e67534c03..0b0f89685b679745895251b011d0db522d4558e1 100644 (file)
@@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
        for (end = (void *)vers + size; vers < end; vers++)
                if (vers->name[0] == '.') {
                        memmove(vers->name, vers->name+1, strlen(vers->name));
-#ifdef ARCH_RELOCATES_KCRCTAB
-                       /* The TOC symbol has no CRC computed. To avoid CRC
-                        * check failing, we must force it to the expected
-                        * value (see CRC check in module.c).
-                        */
-                       if (!strcmp(vers->name, "TOC."))
-                               vers->crc = -(unsigned long)reloc_start;
-#endif
                }
 }
 
index 04885cec24df1413f90121115cf6569e4aa444e6..5dd056df0baaec576431adb4cbe8ab370d8f44c1 100644 (file)
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 
-#ifdef CONFIG_CC_STACKPROTECTOR
-#include <linux/stackprotector.h>
-unsigned long __stack_chk_guard __read_mostly;
-EXPORT_SYMBOL(__stack_chk_guard);
-#endif
-
 /* Transactional Memory debug */
 #ifdef TM_DEBUG_SW
 #define TM_DEBUG(x...) printk(KERN_INFO x)
index ec47a939cbdd6dd81c6c05ed6707f28e12d9f0ea..ac83eb04a8b871293c53e7bd6ff4d439b89704a9 100644 (file)
@@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
 
        cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
 
+       if (!PHANDLE_VALID(cpu_pkg))
+               return;
+
        prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
        prom.cpu = be32_to_cpu(rval);
 
index cfa53ccc8bafc908e80532a4a64e44ad358dffc7..34f1a0dbc898ee4a28a6adcd41c0ce5d3fe61198 100644 (file)
@@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                if (!pmdp)
                        return -ENOMEM;
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                ptep = pte_alloc_kernel(pmdp, ea);
@@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                }
                pmdp = pmd_offset(pudp, ea);
                if (map_page_size == PMD_SIZE) {
-                       ptep = (pte_t *)pudp;
+                       ptep = pmdp_ptep(pmdp);
                        goto set_the_pte;
                }
                if (!pmd_present(*pmdp)) {
index 6ef688a1ef3e0f022032e5317662b9011c8f74c4..7ff1b0c86a8e5a630844c3ade200b592cbad413d 100644 (file)
@@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
                    aesni_simd_skciphers[i]; i++)
                simd_skcipher_free(aesni_simd_skciphers[i]);
 
-       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
-                   aesni_simd_skciphers2[i].simd; i++)
-               simd_skcipher_free(aesni_simd_skciphers2[i].simd);
+       for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
+               if (aesni_simd_skciphers2[i].simd)
+                       simd_skcipher_free(aesni_simd_skciphers2[i].simd);
 }
 
 static int __init aesni_init(void)
@@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
                simd = simd_skcipher_create_compat(algname, drvname, basename);
                err = PTR_ERR(simd);
                if (IS_ERR(simd))
-                       goto unregister_simds;
+                       continue;
 
                aesni_simd_skciphers2[i].simd = simd;
        }
index 17c3564d087a48bc24e41417fe6f128b5d7b9f0d..22ef4f72cf320adf510545ce08fd342bc89839d2 100644 (file)
@@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
 
 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
 {
-       return rapl_pmus->pmus[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
 }
 
 static inline u64 rapl_read_counter(struct perf_event *event)
@@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
 
        /* must be done before validate_group */
        pmu = cpu_to_rapl_pmu(event->cpu);
+       if (!pmu)
+               return -EINVAL;
        event->cpu = pmu->cpu;
        event->pmu_private = pmu;
        event->hw.event_base = msr;
@@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
        struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
        int target;
 
+       if (!pmu) {
+               pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+               if (!pmu)
+                       return -ENOMEM;
+
+               raw_spin_lock_init(&pmu->lock);
+               INIT_LIST_HEAD(&pmu->active_list);
+               pmu->pmu = &rapl_pmus->pmu;
+               pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
+               rapl_hrtimer_init(pmu);
+
+               rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
+       }
+
        /*
         * Check if there is an online cpu in the package which collects rapl
         * events already.
@@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
        return 0;
 }
 
-static int rapl_cpu_prepare(unsigned int cpu)
-{
-       struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
-
-       if (pmu)
-               return 0;
-
-       pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
-       if (!pmu)
-               return -ENOMEM;
-
-       raw_spin_lock_init(&pmu->lock);
-       INIT_LIST_HEAD(&pmu->active_list);
-       pmu->pmu = &rapl_pmus->pmu;
-       pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
-       pmu->cpu = -1;
-       rapl_hrtimer_init(pmu);
-       rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
-       return 0;
-}
-
 static int rapl_check_hw_unit(bool apply_quirk)
 {
        u64 msr_rapl_power_unit_bits;
@@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
        /*
         * Install callbacks. Core will call them for each online cpu.
         */
-
-       ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
-                               rapl_cpu_prepare, NULL);
-       if (ret)
-               goto out;
-
        ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
                                "perf/x86/rapl:online",
                                rapl_cpu_online, rapl_cpu_offline);
        if (ret)
-               goto out1;
+               goto out;
 
        ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
        if (ret)
-               goto out2;
+               goto out1;
 
        rapl_advertise();
        return 0;
 
-out2:
-       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out1:
-       cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
 out:
        pr_warn("Initialization failed (%d), disabled\n", ret);
        cleanup_rapl_pmus();
@@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
 static void __exit intel_rapl_exit(void)
 {
        cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
        perf_pmu_unregister(&rapl_pmus->pmu);
        cleanup_rapl_pmus();
 }
index 8c4ccdc3a3f3607ee0af4f4006029df3000e0839..1ab45976474d52597700b291e24b0b0a235dff03 100644 (file)
@@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
 
 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
 {
-       return pmu->boxes[topology_logical_package_id(cpu)];
+       unsigned int pkgid = topology_logical_package_id(cpu);
+
+       /*
+        * The unsigned check also catches the '-1' return value for non
+        * existent mappings in the topology map.
+        */
+       return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
 }
 
 u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
        pmu->registered = false;
 }
 
-static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
-{
-       struct intel_uncore_pmu *pmu = type->pmus;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       if (pmu) {
-               pkg = topology_physical_package_id(cpu);
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box)
-                               uncore_box_exit(box);
-               }
-       }
-}
-
-static void uncore_exit_boxes(void *dummy)
-{
-       struct intel_uncore_type **types;
-
-       for (types = uncore_msr_uncores; *types; types++)
-               __uncore_exit_boxes(*types++, smp_processor_id());
-}
-
 static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
 {
        int pkg;
@@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
        }
 }
 
-static int uncore_cpu_dying(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (box && atomic_dec_return(&box->refcnt) == 0)
-                               uncore_box_exit(box);
-               }
-       }
-       return 0;
-}
-
-static int first_init;
-
-static int uncore_cpu_starting(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg, ncpus = 1;
-
-       if (first_init) {
-               /*
-                * On init we get the number of online cpus in the package
-                * and set refcount for all of them.
-                */
-               ncpus = cpumask_weight(topology_core_cpumask(cpu));
-       }
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       box = pmu->boxes[pkg];
-                       if (!box)
-                               continue;
-                       /* The first cpu on a package activates the box */
-                       if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
-                               uncore_box_init(box);
-               }
-       }
-
-       return 0;
-}
-
-static int uncore_cpu_prepare(unsigned int cpu)
-{
-       struct intel_uncore_type *type, **types = uncore_msr_uncores;
-       struct intel_uncore_pmu *pmu;
-       struct intel_uncore_box *box;
-       int i, pkg;
-
-       pkg = topology_logical_package_id(cpu);
-       for (; *types; types++) {
-               type = *types;
-               pmu = type->pmus;
-               for (i = 0; i < type->num_boxes; i++, pmu++) {
-                       if (pmu->boxes[pkg])
-                               continue;
-                       /* First cpu of a package allocates the box */
-                       box = uncore_alloc_box(type, cpu_to_node(cpu));
-                       if (!box)
-                               return -ENOMEM;
-                       box->pmu = pmu;
-                       box->pkgid = pkg;
-                       pmu->boxes[pkg] = box;
-               }
-       }
-       return 0;
-}
-
 static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
                                   int new_cpu)
 {
@@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
 
 static int uncore_event_cpu_offline(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, pkg, target;
 
        /* Check if exiting cpu is used for collecting uncore events */
        if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-               return 0;
-
+               goto unref;
        /* Find a new cpu to collect uncore events */
        target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
@@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
 
        uncore_change_context(uncore_msr_uncores, cpu, target);
        uncore_change_context(uncore_pci_uncores, cpu, target);
+
+unref:
+       /* Clear the references */
+       pkg = topology_logical_package_id(cpu);
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (box && atomic_dec_return(&box->refcnt) == 0)
+                               uncore_box_exit(box);
+               }
+       }
        return 0;
 }
 
+static int allocate_boxes(struct intel_uncore_type **types,
+                        unsigned int pkg, unsigned int cpu)
+{
+       struct intel_uncore_box *box, *tmp;
+       struct intel_uncore_type *type;
+       struct intel_uncore_pmu *pmu;
+       LIST_HEAD(allocated);
+       int i;
+
+       /* Try to allocate all required boxes */
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       if (pmu->boxes[pkg])
+                               continue;
+                       box = uncore_alloc_box(type, cpu_to_node(cpu));
+                       if (!box)
+                               goto cleanup;
+                       box->pmu = pmu;
+                       box->pkgid = pkg;
+                       list_add(&box->active_list, &allocated);
+               }
+       }
+       /* Install them in the pmus */
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               box->pmu->boxes[pkg] = box;
+       }
+       return 0;
+
+cleanup:
+       list_for_each_entry_safe(box, tmp, &allocated, active_list) {
+               list_del_init(&box->active_list);
+               kfree(box);
+       }
+       return -ENOMEM;
+}
+
 static int uncore_event_cpu_online(unsigned int cpu)
 {
-       int target;
+       struct intel_uncore_type *type, **types = uncore_msr_uncores;
+       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_box *box;
+       int i, ret, pkg, target;
+
+       pkg = topology_logical_package_id(cpu);
+       ret = allocate_boxes(types, pkg, cpu);
+       if (ret)
+               return ret;
+
+       for (; *types; types++) {
+               type = *types;
+               pmu = type->pmus;
+               for (i = 0; i < type->num_boxes; i++, pmu++) {
+                       box = pmu->boxes[pkg];
+                       if (!box && atomic_inc_return(&box->refcnt) == 1)
+                               uncore_box_init(box);
+               }
+       }
 
        /*
         * Check if there is an online cpu in the package
@@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
        if (cret && pret)
                return -ENODEV;
 
-       /*
-        * Install callbacks. Core will call them for each online cpu.
-        *
-        * The first online cpu of each package allocates and takes
-        * the refcounts for all other online cpus in that package.
-        * If msrs are not enabled no allocation is required and
-        * uncore_cpu_prepare() is not called for each online cpu.
-        */
-       if (!cret) {
-              ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
-                                      "perf/x86/intel/uncore:prepare",
-                                      uncore_cpu_prepare, NULL);
-               if (ret)
-                       goto err;
-       } else {
-               cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
-                                         "perf/x86/intel/uncore:prepare",
-                                         uncore_cpu_prepare, NULL);
-       }
-       first_init = 1;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
-                         "perf/x86/uncore:starting",
-                         uncore_cpu_starting, uncore_cpu_dying);
-       first_init = 0;
-       cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
-                         "perf/x86/uncore:online",
-                         uncore_event_cpu_online, uncore_event_cpu_offline);
+       /* Install hotplug callbacks to setup the targets for each package */
+       ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+                               "perf/x86/intel/uncore:online",
+                               uncore_event_cpu_online,
+                               uncore_event_cpu_offline);
+       if (ret)
+               goto err;
        return 0;
 
 err:
-       /* Undo box->init_box() */
-       on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
        return ret;
@@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
 
 static void __exit intel_uncore_exit(void)
 {
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
-       cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
-       cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
+       cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
        uncore_types_exit(uncore_msr_uncores);
        uncore_pci_exit();
 }
index 38711df3bcb56b6939f2f84af16b920f0409049b..2266f864b7477a3ff88fee26f07df62eb5073ab4 100644 (file)
@@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 void reload_early_microcode(void);
 extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
+extern bool initrd_gone;
 #else
 static inline int __init microcode_init(void)                  { return 0; };
 static inline void __init load_ucode_bsp(void)                 { }
index 1e35dd06b090ee91189cb5a52fdf026f2ca5e74b..52f352b063fdf372bddd62d5887296fa43661931 100644 (file)
@@ -2117,6 +2117,7 @@ static inline void __init check_timer(void)
                        if (idx != -1 && irq_trigger(idx))
                                unmask_ioapic_irq(irq_get_chip_data(0));
                }
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                if (timer_irq_works()) {
                        if (disable_timer_pin_1 > 0)
@@ -2138,6 +2139,7 @@ static inline void __init check_timer(void)
                 * legacy devices should be connected to IO APIC #0
                 */
                replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+               irq_domain_deactivate_irq(irq_data);
                irq_domain_activate_irq(irq_data);
                legacy_pic->unmask(0);
                if (timer_irq_works()) {
index 00ef43233e034b0cde9b2adc88b8003ddc42d00b..537c6647d84ca3e7cca771feb6c98bda94ef8c70 100644 (file)
@@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 
 static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
 
-static void __restart_timer(struct timer_list *t, unsigned long interval)
+static void __start_timer(struct timer_list *t, unsigned long interval)
 {
        unsigned long when = jiffies + interval;
        unsigned long flags;
 
        local_irq_save(flags);
 
-       if (timer_pending(t)) {
-               if (time_before(when, t->expires))
-                       mod_timer(t, when);
-       } else {
-               t->expires = round_jiffies(when);
-               add_timer_on(t, smp_processor_id());
-       }
+       if (!timer_pending(t) || time_before(when, t->expires))
+               mod_timer(t, round_jiffies(when));
 
        local_irq_restore(flags);
 }
@@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
 
 done:
        __this_cpu_write(mce_next_interval, iv);
-       __restart_timer(t, iv);
+       __start_timer(t, iv);
 }
 
 /*
@@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
        struct timer_list *t = this_cpu_ptr(&mce_timer);
        unsigned long iv = __this_cpu_read(mce_next_interval);
 
-       __restart_timer(t, interval);
+       __start_timer(t, interval);
 
        if (interval < iv)
                __this_cpu_write(mce_next_interval, interval);
@@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
        }
 }
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(struct timer_list *t)
 {
        unsigned long iv = check_interval * HZ;
 
        if (mca_cfg.ignore_ce || !iv)
                return;
 
-       per_cpu(mce_next_interval, cpu) = iv;
-
-       t->expires = round_jiffies(jiffies + iv);
-       add_timer_on(t, cpu);
+       this_cpu_write(mce_next_interval, iv);
+       __start_timer(t, iv);
 }
 
 static void __mcheck_cpu_setup_timer(void)
@@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
        unsigned int cpu = smp_processor_id();
 
        setup_pinned_timer(t, mce_timer_fn, cpu);
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
 }
 
 /* Handle unconfigured int18 (should never happen) */
@@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
 
 static int mce_cpu_online(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
        int ret;
 
        mce_device_create(cpu);
@@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
                return ret;
        }
        mce_reenable_cpu();
-       mce_start_timer(cpu, t);
+       mce_start_timer(t);
        return 0;
 }
 
 static int mce_cpu_pre_down(unsigned int cpu)
 {
-       struct timer_list *t = &per_cpu(mce_timer, cpu);
+       struct timer_list *t = this_cpu_ptr(&mce_timer);
 
        mce_disable_cpu();
        del_timer_sync(t);
index 6a31e2691f3aa0ac68620459c371ca42912c4475..079e81733a58950486a7012b70e06f45e71af688 100644 (file)
@@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
 reget:
                if (!get_builtin_microcode(&cp, family)) {
 #ifdef CONFIG_BLK_DEV_INITRD
-                       cp = find_cpio_data(ucode_path, (void *)initrd_start,
-                                           initrd_end - initrd_start, NULL);
+                       if (!initrd_gone)
+                               cp = find_cpio_data(ucode_path, (void *)initrd_start,
+                                                   initrd_end - initrd_start, NULL);
 #endif
                        if (!(cp.data && cp.size)) {
                                /*
index 2af69d27da629a5c802498e692300f9980862a2a..73102d932760b871896a956a88ff178362cc5a94 100644 (file)
@@ -46,6 +46,8 @@
 static struct microcode_ops    *microcode_ops;
 static bool dis_ucode_ldr = true;
 
+bool initrd_gone;
+
 LIST_HEAD(microcode_cache);
 
 /*
@@ -190,21 +192,24 @@ void load_ucode_ap(void)
 static int __init save_microcode_in_initrd(void)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
+       int ret = -EINVAL;
 
        switch (c->x86_vendor) {
        case X86_VENDOR_INTEL:
                if (c->x86 >= 6)
-                       return save_microcode_in_initrd_intel();
+                       ret = save_microcode_in_initrd_intel();
                break;
        case X86_VENDOR_AMD:
                if (c->x86 >= 0x10)
-                       return save_microcode_in_initrd_amd(c->x86);
+                       ret = save_microcode_in_initrd_amd(c->x86);
                break;
        default:
                break;
        }
 
-       return -EINVAL;
+       initrd_gone = true;
+
+       return ret;
 }
 
 struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
@@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
         * has the virtual address of the beginning of the initrd. It also
         * possibly relocates the ramdisk. In either case, initrd_start contains
         * the updated address so use that instead.
+        *
+        * initrd_gone is for the hotplug case where we've thrown out initrd
+        * already.
         */
-       if (!use_pa && initrd_start)
-               start = initrd_start;
+       if (!use_pa) {
+               if (initrd_gone)
+                       return (struct cpio_data){ NULL, 0, "" };
+               if (initrd_start)
+                       start = initrd_start;
+       }
 
        return find_cpio_data(path, (void *)start, size, NULL);
 #else /* !CONFIG_BLK_DEV_INITRD */
index 3f329b74e040c23b6b85dfd12a85f80d630c63ac..8325d8a09ab0768dd08156b8e4c5b755b78c10f9 100644 (file)
@@ -41,7 +41,7 @@
 
 static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
 
-/* Current microcode patch used in early patching */
+/* Current microcode patch used in early patching on the APs. */
 struct microcode_intel *intel_ucode_patch;
 
 static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
@@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
        struct ucode_cpu_info uci;
        struct cpio_data cp;
 
-       /*
-        * AP loading didn't find any microcode patch, no need to save anything.
-        */
-       if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
-               return 0;
-
        if (!load_builtin_intel_microcode(&cp))
                cp = find_microcode_in_initrd(ucode_path, false);
 
@@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
        return 0;
 }
 
-
 /*
  * @res_patch, output: a pointer to the patch we found.
  */
index e4e97a5355ce852ac49937fd180f62b614c1286c..de7234401275b56760f27573eea5669e2bda4f68 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/types.h>
+#include <asm/fpu/xstate.h>
 #include <asm/traps.h>
 
 #include <linux/hardirq.h>
@@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
         * it will #GP. Make sure it is replaced after the memset().
         */
        if (static_cpu_has(X86_FEATURE_XSAVES))
-               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
+               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
+                                              xfeatures_mask;
 
        if (static_cpu_has(X86_FEATURE_FXSR))
                fpstate_init_fxstate(&state->fxsave);
index 85e87b46c318026ed28d87056c516aec3e5fb9ed..dc6ba5bda9fc83630c773a80c4adea6871db0a59 100644 (file)
@@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
        } else {
                struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
 
+               irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
                irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
                disable_irq(hdev->irq);
                irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
index d153be8929a68440ae5e5894497cbb7fa1ab9913..e52c9088660fac47d6da377b39412378ff0157b0 100644 (file)
@@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
        memcpy(dest, xsave, XSAVE_HDR_OFFSET);
 
        /* Set XSTATE_BV */
+       xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
        *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
 
        /*
index 319148bd4b05091d24576a7535b10aad7bec0c2d..2f25a363068cf9723e8b418e8c1942a6d3ca4029 100644 (file)
@@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
        efi_scratch.use_pgd = true;
 
+       /*
+        * Certain firmware versions are way too sentimential and still believe
+        * they are exclusive and unquestionable owners of the first physical page,
+        * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+        * (but then write-access it later during SetVirtualAddressMap()).
+        *
+        * Create a 1:1 mapping for this page, to avoid triple faults during early
+        * boot with such firmware. We are free to hand this page to the BIOS,
+        * as trim_bios_range() will reserve the first page and isolate it away
+        * from memory allocators anyway.
+        */
+       if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
+               pr_err("Failed to create 1:1 mapping for the first page!\n");
+               return 1;
+       }
+
        /*
         * When making calls to the firmware everything needs to be 1:1
         * mapped and addressable with 32-bit pointers. Map the kernel
index 848e8568fb3c4a90c2eb89783c0420f8b5526cd6..8fd4be610607c2683b16a3e0da4249f4aea732e4 100644 (file)
@@ -419,7 +419,7 @@ subsys_initcall(topology_init);
 
 void cpu_reset(void)
 {
-#if XCHAL_HAVE_PTP_MMU
+#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
        local_irq_disable();
        /*
         * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
index f849311e9fd4c94e57d81ba97279ec5fb0cb0ded..533265f110e0297b9fc1e14a7a215a54d634b8d8 100644 (file)
@@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
 unlock:
        list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
                af_alg_free_sg(&rsgl->sgl);
+               list_del(&rsgl->list);
                if (rsgl != &ctx->first_rsgl)
                        sock_kfree_s(sk, rsgl, sizeof(*rsgl));
-               list_del(&rsgl->list);
        }
        INIT_LIST_HEAD(&ctx->list);
        aead_wmem_wakeup(sk);
index 2f82b8eba360e7f369338b7d7a340060d6519f4f..7361d00818e2bb61f5d280c6817db2a2e8d01fc4 100644 (file)
@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
        struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
        struct device *dev = acpi_desc->dev;
        struct acpi_nfit_flush_work flush;
+       int rc;
 
        /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
        device_lock(dev);
@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
        INIT_WORK_ONSTACK(&flush.work, flush_probe);
        COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
        queue_work(nfit_wq, &flush.work);
-       return wait_for_completion_interruptible(&flush.cmp);
+
+       rc = wait_for_completion_interruptible(&flush.cmp);
+       cancel_work_sync(&flush.work);
+       return rc;
 }
 
 static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
index 4497d263209fb861e08e1650ddf654a98f2102e3..ac350c518e0c9479c05c4d9ff9f6ae918f26b96c 100644 (file)
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
        struct firmware_buf *buf = fw_priv->buf;
 
        __fw_load_abort(buf);
-
-       /* avoid user action after loading abort */
-       fw_priv->buf = NULL;
 }
 
 static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
 
        mutex_lock(&fw_lock);
        fw_buf = fw_priv->buf;
-       if (!fw_buf)
+       if (fw_state_is_aborted(&fw_buf->fw_st))
                goto out;
 
        switch (loading) {
index dacb6a8418aa927e8d75a86470b35b414bf48598..fa26ffd25fa61bae95bd441699a54ee8e16818d2 100644 (file)
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
 {
        struct memory_block *mem = to_memory_block(dev);
        unsigned long start_pfn, end_pfn;
+       unsigned long valid_start, valid_end, valid_pages;
        unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
-       struct page *first_page;
        struct zone *zone;
        int zone_shift = 0;
 
        start_pfn = section_nr_to_pfn(mem->start_section_nr);
        end_pfn = start_pfn + nr_pages;
-       first_page = pfn_to_page(start_pfn);
 
        /* The block contains more than one zone can not be offlined. */
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return sprintf(buf, "none\n");
 
-       zone = page_zone(first_page);
+       zone = page_zone(pfn_to_page(valid_start));
+       valid_pages = valid_end - valid_start;
 
        /* MMOP_ONLINE_KEEP */
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+       zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+       zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index 872eac4cb1dfdcd069136856e681f741332708e2..a14fac6a01d316a0249fdd45de3e676e0dffed7c 100644 (file)
@@ -966,13 +966,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
        if (rpmflags & RPM_GET_PUT) {
                if (!atomic_dec_and_test(&dev->power.usage_count))
                        return 0;
        }
 
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
        spin_lock_irqsave(&dev->power.lock, flags);
        retval = rpm_idle(dev, rpmflags);
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -998,13 +998,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
        if (rpmflags & RPM_GET_PUT) {
                if (!atomic_dec_and_test(&dev->power.usage_count))
                        return 0;
        }
 
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
        spin_lock_irqsave(&dev->power.lock, flags);
        retval = rpm_suspend(dev, rpmflags);
        spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1029,7 +1029,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
        unsigned long flags;
        int retval;
 
-       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+       might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+                       dev->power.runtime_status != RPM_ACTIVE);
 
        if (rpmflags & RPM_GET_PUT)
                atomic_inc(&dev->power.usage_count);
index 4fda623e55bb728e910ac0f0995cc7d8cb34f330..c94360671f41e54db9861a8ca86a8da7ddc2e5fe 100644 (file)
@@ -784,8 +784,19 @@ static int brcm_avs_target_index(struct cpufreq_policy *policy,
 static int brcm_avs_suspend(struct cpufreq_policy *policy)
 {
        struct private_data *priv = policy->driver_data;
+       int ret;
+
+       ret = brcm_avs_get_pmap(priv, &priv->pmap);
+       if (ret)
+               return ret;
 
-       return brcm_avs_get_pmap(priv, &priv->pmap);
+       /*
+        * We can't use the P-state returned by brcm_avs_get_pmap(), since
+        * that's the initial P-state from when the P-map was downloaded to the
+        * AVS co-processor, not necessarily the P-state we are running at now.
+        * So, we get the current P-state explicitly.
+        */
+       return brcm_avs_get_pstate(priv, &priv->pmap.state);
 }
 
 static int brcm_avs_resume(struct cpufreq_policy *policy)
@@ -954,9 +965,9 @@ static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy, char *buf)
        brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
        brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
 
-       return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+       return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u %u %u\n",
                pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
-               mdiv_p3, mdiv_p4);
+               mdiv_p3, mdiv_p4, pmap.mode, pmap.state);
 }
 
 static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
index a54d65aa776d064f025036cf9aa35790d83d82e8..50bd6d987fc3a0bf2e1a344a44b9b41d084899cb 100644 (file)
@@ -1235,6 +1235,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
                cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
 }
 
+#define MSR_IA32_POWER_CTL_BIT_EE      19
+
+/* Disable energy efficiency optimization */
+static void intel_pstate_disable_ee(int cpu)
+{
+       u64 power_ctl;
+       int ret;
+
+       ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
+       if (ret)
+               return;
+
+       if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
+               pr_info("Disabling energy efficiency optimization\n");
+               power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
+               wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
+       }
+}
+
 static int atom_get_min_pstate(void)
 {
        u64 value;
@@ -1845,6 +1864,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
        {}
 };
 
+static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
+       ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
+       {}
+};
+
 static int intel_pstate_init_cpu(unsigned int cpunum)
 {
        struct cpudata *cpu;
@@ -1875,6 +1899,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
        cpu->cpu = cpunum;
 
        if (hwp_active) {
+               const struct x86_cpu_id *id;
+
+               id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
+               if (id)
+                       intel_pstate_disable_ee(cpunum);
+
                intel_pstate_hwp_enable(cpu);
                pid_params.sample_rate_ms = 50;
                pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
index e2ce8190ecc9a5c0baf3f6e642a4cff17f89862c..612898b4aaad045f7e96429ac325ecbec91a4295 100644 (file)
@@ -959,7 +959,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
 static void ccp5_config(struct ccp_device *ccp)
 {
        /* Public side */
-       iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
+       iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
 }
 
 static void ccp5other_config(struct ccp_device *ccp)
index 830f35e6005f0e002457d57fab70a8557fb85e24..649e5610a5cea7d08a95767af1c8f8e0153e0b7a 100644 (file)
@@ -238,6 +238,7 @@ struct ccp_dma_chan {
        struct ccp_device *ccp;
 
        spinlock_t lock;
+       struct list_head created;
        struct list_head pending;
        struct list_head active;
        struct list_head complete;
index 6553912804f73f1c061aec9bdd3afd0f0d7426bc..e5d9278f40197427e913993fe9249d405585fe87 100644 (file)
@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
        ccp_free_desc_resources(chan->ccp, &chan->complete);
        ccp_free_desc_resources(chan->ccp, &chan->active);
        ccp_free_desc_resources(chan->ccp, &chan->pending);
+       ccp_free_desc_resources(chan->ccp, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, flags);
 }
@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
        spin_lock_irqsave(&chan->lock, flags);
 
        cookie = dma_cookie_assign(tx_desc);
+       list_del(&desc->entry);
        list_add_tail(&desc->entry, &chan->pending);
 
        spin_unlock_irqrestore(&chan->lock, flags);
@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
 
        spin_lock_irqsave(&chan->lock, sflags);
 
-       list_add_tail(&desc->entry, &chan->pending);
+       list_add_tail(&desc->entry, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, sflags);
 
@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
        /*TODO: Purge the complete list? */
        ccp_free_desc_resources(chan->ccp, &chan->active);
        ccp_free_desc_resources(chan->ccp, &chan->pending);
+       ccp_free_desc_resources(chan->ccp, &chan->created);
 
        spin_unlock_irqrestore(&chan->lock, flags);
 
@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
                chan->ccp = ccp;
 
                spin_lock_init(&chan->lock);
+               INIT_LIST_HEAD(&chan->created);
                INIT_LIST_HEAD(&chan->pending);
                INIT_LIST_HEAD(&chan->active);
                INIT_LIST_HEAD(&chan->complete);
index 2ed1e24b44a8ba07b24a1ef4a9d7cca4e57522d4..b4b78b37f8a684698d79bc053be0b2f9db7e654f 100644 (file)
@@ -158,7 +158,7 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
        case CRYPTO_ALG_TYPE_AEAD:
                ctx_req.req.aead_req = (struct aead_request *)req;
                ctx_req.ctx.reqctx = aead_request_ctx(ctx_req.req.aead_req);
-               dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.aead_req->dst,
+               dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.ctx.reqctx->dst,
                             ctx_req.ctx.reqctx->dst_nents, DMA_FROM_DEVICE);
                if (ctx_req.ctx.reqctx->skb) {
                        kfree_skb(ctx_req.ctx.reqctx->skb);
@@ -1362,8 +1362,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len;
        unsigned int ivsize = crypto_aead_ivsize(tfm), dst_size = 0;
        unsigned int   kctx_len = 0;
@@ -1383,19 +1382,21 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
 
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
+
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err)
                        return ERR_PTR(err);
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
        if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
                null = 1;
                assoclen = 0;
        }
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("AUTHENC:Invalid Destination sg entries\n");
@@ -1460,7 +1461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1711,8 +1712,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len, ivsize = AES_BLOCK_SIZE;
        unsigned int dst_size = 0, kctx_len;
        unsigned int sub_type;
@@ -1728,17 +1728,19 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
        sub_type = get_aead_subtype(tfm);
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
+
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err) {
                        pr_err("AAD copy to destination buffer fails\n");
                        return ERR_PTR(err);
                }
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("CCM:Invalid Destination sg entries\n");
@@ -1777,7 +1779,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1809,8 +1811,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        struct chcr_wr *chcr_req;
        struct cpl_rx_phys_dsgl *phys_cpl;
        struct phys_sge_parm sg_param;
-       struct scatterlist *src, *dst;
-       struct scatterlist src_sg[2], dst_sg[2];
+       struct scatterlist *src;
        unsigned int frags = 0, transhdr_len;
        unsigned int ivsize = AES_BLOCK_SIZE;
        unsigned int dst_size = 0, kctx_len;
@@ -1832,13 +1833,14 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        if (sg_nents_for_len(req->src, req->assoclen + req->cryptlen) < 0)
                goto err;
 
-       src = scatterwalk_ffwd(src_sg, req->src, req->assoclen);
-       dst = src;
+       src = scatterwalk_ffwd(reqctx->srcffwd, req->src, req->assoclen);
+       reqctx->dst = src;
        if (req->src != req->dst) {
                err = chcr_copy_assoc(req, aeadctx);
                if (err)
                        return  ERR_PTR(err);
-               dst = scatterwalk_ffwd(dst_sg, req->dst, req->assoclen);
+               reqctx->dst = scatterwalk_ffwd(reqctx->dstffwd, req->dst,
+                                              req->assoclen);
        }
 
        if (!req->cryptlen)
@@ -1848,7 +1850,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
                crypt_len = AES_BLOCK_SIZE;
        else
                crypt_len = req->cryptlen;
-       reqctx->dst_nents = sg_nents_for_len(dst, req->cryptlen +
+       reqctx->dst_nents = sg_nents_for_len(reqctx->dst, req->cryptlen +
                                             (op_type ? -authsize : authsize));
        if (reqctx->dst_nents <= 0) {
                pr_err("GCM:Invalid Destination sg entries\n");
@@ -1923,7 +1925,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
        sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
        sg_param.qid = qid;
        sg_param.align = 0;
-       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, dst,
+       if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, reqctx->dst,
                                  &sg_param))
                goto dstmap_fail;
 
@@ -1937,7 +1939,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
                write_sg_to_skb(skb, &frags, src, req->cryptlen);
        } else {
                aes_gcm_empty_pld_pad(req->dst, authsize - 1);
-               write_sg_to_skb(skb, &frags, dst, crypt_len);
+               write_sg_to_skb(skb, &frags, reqctx->dst, crypt_len);
+
        }
 
        create_wreq(ctx, chcr_req, req, skb, kctx_len, size, 1,
@@ -2189,8 +2192,8 @@ static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
        unsigned int ck_size;
        int ret = 0, key_ctx_size = 0;
 
-       if (get_aead_subtype(aead) ==
-           CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+       if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
+           keylen > 3) {
                keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
                memcpy(aeadctx->salt, key + keylen, 4);
        }
index 918da8e6e2d8a587ef7e397e149a9779a8c4a21f..1c65f07e1cc9a56f9dc88b30c10ba57e13b875ed 100644 (file)
@@ -52,6 +52,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
 int assign_chcr_device(struct chcr_dev **dev)
 {
        struct uld_ctx *u_ctx;
+       int ret = -ENXIO;
 
        /*
         * Which device to use if multiple devices are available TODO
@@ -59,15 +60,14 @@ int assign_chcr_device(struct chcr_dev **dev)
         * must go to the same device to maintain the ordering.
         */
        mutex_lock(&dev_mutex); /* TODO ? */
-       u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
-       if (!u_ctx) {
-               mutex_unlock(&dev_mutex);
-               return -ENXIO;
+       list_for_each_entry(u_ctx, &uld_ctx_list, entry)
+               if (u_ctx && u_ctx->dev) {
+                       *dev = u_ctx->dev;
+                       ret = 0;
+                       break;
        }
-
-       *dev = u_ctx->dev;
        mutex_unlock(&dev_mutex);
-       return 0;
+       return ret;
 }
 
 static int chcr_dev_add(struct uld_ctx *u_ctx)
@@ -202,10 +202,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
 
 static int __init chcr_crypto_init(void)
 {
-       if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
+       if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
                pr_err("ULD register fail: No chcr crypto support in cxgb4");
-               return -1;
-       }
 
        return 0;
 }
index d5af7d64a763bc8ad4ce45d60bada6226059bff1..7ec0a8f124753d1fbcdf7b248d14177330bab977 100644 (file)
@@ -158,6 +158,9 @@ struct ablk_ctx {
 };
 struct chcr_aead_reqctx {
        struct  sk_buff *skb;
+       struct scatterlist *dst;
+       struct scatterlist srcffwd[2];
+       struct scatterlist dstffwd[2];
        short int dst_nents;
        u16 verify;
        u8 iv[CHCR_MAX_CRYPTO_IV_LEN];
index bc5cbc193aae5c6167559ea3d2b9975acc7e6a5a..5b2d78a5b5aaaffd9ba3a858269c8664cd4f89b7 100644 (file)
@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                              &hw_data->accel_capabilities_mask);
 
        /* Find and map all the device's BARS */
-       i = 0;
+       i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
        for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
                         ADF_PCI_MAX_BARS * 2) {
index e8822536530b99666f2f5137c25b36f7b451fe06..33f0a6251e385c8f5f72fc0f384106c368329c86 100644 (file)
@@ -69,6 +69,7 @@
 #define ADF_ERRSOU5 (0x3A000 + 0xD8)
 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
+#define ADF_DEVICE_FUSECTL_MASK 0x80000000
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
index 1e480f140663530a699d8bf51b402bce394a935e..8c4fd255a601b2d6c60e2300f5c878f8a512f57d 100644 (file)
@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
        unsigned int csr_val;
        int times = 30;
 
-       if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
+       if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
                return 0;
 
        csr_val = ADF_CSR_RD(csr_addr, 0);
@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
                (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
                                 LOCAL_TO_XFER_REG_OFFSET);
        handle->pci_dev = pci_info->pci_dev;
-       if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
+       if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
                sram_bar =
                        &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
                handle->hal_sram_addr_v = sram_bar->virt_addr;
index 921dfa047202952c9064cd39971e68e0e3c28b49..260c4b4b492ec38735715859522068da40c21381 100644 (file)
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
 struct exit_boot_struct {
        efi_memory_desc_t *runtime_map;
        int *runtime_entry_count;
+       void *new_fdt_addr;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
        efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
                        p->runtime_map, p->runtime_entry_count);
 
-       return EFI_SUCCESS;
+       return update_fdt_memmap(p->new_fdt_addr, map);
 }
 
 /*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
        priv.runtime_map = runtime_map;
        priv.runtime_entry_count = &runtime_entry_count;
+       priv.new_fdt_addr = (void *)*new_fdt_addr;
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
                                        exit_boot_func);
 
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
 
-               status = update_fdt_memmap((void *)*new_fdt_addr, &map);
-               if (status != EFI_SUCCESS) {
-                       /*
-                        * The kernel won't get far without the memory map, but
-                        * may still be able to print something meaningful so
-                        * return success here.
-                        */
-                       return EFI_SUCCESS;
-               }
-
                /* Install the new virtual address map */
                svam = sys_table->runtime->set_virtual_address_map;
                status = svam(runtime_entry_count * desc_size, desc_size,
index e2b0b1646f995fd94d12e17a8cb1258bec34061f..0635829b18cf3aed41239079e4208336b00cda0f 100644 (file)
@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
        }
        WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
+       if (adev->mode_info.num_crtc)
+               amdgpu_display_set_vga_render_state(adev, false);
+
        gmc_v6_0_mc_stop(adev, &save);
 
        if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
                dev_warn(adev->dev, "Wait for MC idle timedout !\n");
        }
        gmc_v6_0_mc_resume(adev, &save);
-       amdgpu_display_set_vga_render_state(adev, false);
 }
 
 static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
index 50f5cf7b69d1dc55fd427efa61e7e8e5eeff80d0..fdfb1ec17e660efa0b1f2c1f7273fa8d6fd8567a 100644 (file)
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               struct drm_pending_vblank_event *event = crtc_state->event;
                /*
-                * TEST_ONLY and PAGE_FLIP_EVENT are mutually
-                * exclusive, if they weren't, this code should be
-                * called on success for TEST_ONLY too.
+                * Free the allocated event. drm_atomic_helper_setup_commit
+                * can allocate an event too, so only free it if it's ours
+                * to prevent a double free in drm_atomic_state_clear.
                 */
-               if (crtc_state->event)
-                       drm_event_cancel_free(dev, &crtc_state->event->base);
+               if (event && (event->base.fence || event->base.file_priv)) {
+                       drm_event_cancel_free(dev, &event->base);
+                       crtc_state->event = NULL;
+               }
        }
 
        if (!fence_state)
index 34f757bcabae8d88382f18392467e67f4e0f6100..4594477dee00bc0ffb7847da556985848df717e7 100644 (file)
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
                funcs = plane->helper_private;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                if (funcs->prepare_fb) {
                        ret = funcs->prepare_fb(plane, plane_state);
                        if (ret)
@@ -1685,9 +1682,6 @@ fail:
                if (j >= i)
                        continue;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
        for_each_plane_in_state(old_state, plane, plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
-               if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
-                       continue;
-
                funcs = plane->helper_private;
 
                if (funcs->cleanup_fb)
index 5a452628939272969c54239498b9a1ca86cd4a67..7a7019ac93884eeeba046ad62b3c81037796beb7 100644 (file)
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
 
        INIT_LIST_HEAD(&connector->probed_modes);
        INIT_LIST_HEAD(&connector->modes);
+       mutex_init(&connector->mutex);
        connector->edid_blob_ptr = NULL;
        connector->status = connector_status_unknown;
 
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
                connector->funcs->atomic_destroy_state(connector,
                                                       connector->state);
 
+       mutex_destroy(&connector->mutex);
+
        memset(connector, 0, sizeof(*connector));
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
  */
 int drm_connector_register(struct drm_connector *connector)
 {
-       int ret;
+       int ret = 0;
 
-       if (connector->registered)
+       if (!connector->dev->registered)
                return 0;
 
+       mutex_lock(&connector->mutex);
+       if (connector->registered)
+               goto unlock;
+
        ret = drm_sysfs_connector_add(connector);
        if (ret)
-               return ret;
+               goto unlock;
 
        ret = drm_debugfs_connector_add(connector);
        if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
        drm_mode_object_register(connector->dev, &connector->base);
 
        connector->registered = true;
-       return 0;
+       goto unlock;
 
 err_debugfs:
        drm_debugfs_connector_remove(connector);
 err_sysfs:
        drm_sysfs_connector_remove(connector);
+unlock:
+       mutex_unlock(&connector->mutex);
        return ret;
 }
 EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
  */
 void drm_connector_unregister(struct drm_connector *connector)
 {
-       if (!connector->registered)
+       mutex_lock(&connector->mutex);
+       if (!connector->registered) {
+               mutex_unlock(&connector->mutex);
                return;
+       }
 
        if (connector->funcs->early_unregister)
                connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
        drm_debugfs_connector_remove(connector);
 
        connector->registered = false;
+       mutex_unlock(&connector->mutex);
 }
 EXPORT_SYMBOL(drm_connector_unregister);
 
index a525751b4559e9f3cc99850d5aac6e5c3eb6f16c..6594b4088f11bc8e5aa6a6c308ba3fdf633921a3 100644 (file)
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto err_minors;
 
+       dev->registered = true;
+
        if (dev->driver->load) {
                ret = dev->driver->load(dev, flags);
                if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
 
        drm_lastclose(dev);
 
+       dev->registered = false;
+
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                drm_modeset_unregister_all(dev);
 
index 69bc3b0c43905eccf19ad142cf32c7064a09feb0..8493e19b563a134ba588e9df39c3fb5785a25a69 100644 (file)
@@ -1012,6 +1012,8 @@ struct intel_fbc {
        struct work_struct underrun_work;
 
        struct intel_fbc_state_cache {
+               struct i915_vma *vma;
+
                struct {
                        unsigned int mode_flags;
                        uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
                } plane;
 
                struct {
-                       u64 ilk_ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
-                       unsigned int tiling_mode;
                } fb;
        } state_cache;
 
        struct intel_fbc_reg_params {
+               struct i915_vma *vma;
+
                struct {
                        enum pipe pipe;
                        enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
                } crtc;
 
                struct {
-                       u64 ggtt_offset;
                        uint32_t pixel_format;
                        unsigned int stride;
-                       int fence_reg;
                } fb;
 
                int cfb_size;
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
        return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
 }
 
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-                           const struct i915_ggtt_view *view)
-{
-       return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
index dbe9fb41ae535449f996ab36e51bd626be6651fc..8d3e515f27bade27acda9544a30d975628ac8ab3 100644 (file)
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
        __drm_atomic_helper_plane_duplicate_state(plane, state);
 
+       intel_state->vma = NULL;
+
        return state;
 }
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+       /*
+        * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+        * We currently don't clear all planes during driver unload, so we have
+        * to be able to unpin vma here for now.
+        *
+        * Normally this can only happen during unload when kmscon is disabled
+        * and userspace doesn't attempt to set a framebuffer at all.
+        */
+       if (vma) {
+               mutex_lock(&plane->dev->struct_mutex);
+               intel_unpin_fb_vma(vma);
+               mutex_unlock(&plane->dev->struct_mutex);
+       }
+
        drm_atomic_helper_plane_destroy_state(plane, state);
 }
 
index f0b9aa7a0483d1928aecec439d342fa2c85b78d9..f1e4a21d46643b53a3cf492671b872ed03215105 100644 (file)
@@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
                        i915_vma_pin_fence(vma);
        }
 
+       i915_vma_get(vma);
 err:
        intel_runtime_pm_put(dev_priv);
        return vma;
 }
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-       vma = i915_gem_object_to_ggtt(obj, &view);
+       lockdep_assert_held(&vma->vm->dev->struct_mutex);
 
        if (WARN_ON_ONCE(!vma))
                return;
 
        i915_vma_unpin_fence(vma);
        i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
 }
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
-       struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
        struct drm_plane *primary = intel_crtc->base.primary;
        struct drm_plane_state *plane_state = primary->state;
@@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * an fb with another CRTC instead
         */
        for_each_crtc(dev, c) {
-               i = to_intel_crtc(c);
+               struct intel_plane_state *state;
 
                if (c == &intel_crtc->base)
                        continue;
 
-               if (!i->active)
+               if (!to_intel_crtc(c)->active)
                        continue;
 
-               fb = c->primary->fb;
-               if (!fb)
+               state = to_intel_plane_state(c->primary->state);
+               if (!state->vma)
                        continue;
 
-               obj = intel_fb_obj(fb);
-               if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+               if (intel_plane_ggtt_offset(state) == plane_config->base) {
+                       fb = c->primary->fb;
                        drm_framebuffer_reference(fb);
                        goto valid_fb;
                }
@@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       mutex_lock(&dev->struct_mutex);
+       intel_state->vma =
+               intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(intel_state->vma)) {
+               DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+               intel_state->vma = NULL;
+               drm_framebuffer_unreference(fb);
+               return;
+       }
+
        plane_state->src_x = 0;
        plane_state->src_y = 0;
        plane_state->src_w = fb->width << 16;
@@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_GEN(dev_priv) >= 4) {
                I915_WRITE(DSPSURF(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else {
                I915_WRITE(DSPADDR(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
        }
        POSTING_READ(reg);
@@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane),
-                  intel_fb_gtt_offset(fb, rotation) +
+                  intel_plane_ggtt_offset(plane_state) +
                   intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
        }
 }
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-                       unsigned int rotation)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-       vma = i915_gem_object_to_ggtt(obj, &view);
-       if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-                view.type))
-               return -1;
-
-       return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
        struct drm_device *dev = intel_crtc->base.dev;
@@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        }
 
        I915_WRITE(PLANE_SURF(pipe, 0),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
 
        POSTING_READ(PLANE_SURF(pipe, 0));
 }
@@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
                flush_work(&work->mmio_work);
 
        mutex_lock(&dev->struct_mutex);
-       intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+       intel_unpin_fb_vma(work->old_vma);
        i915_gem_object_put(work->pending_flip_obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto cleanup_pending;
        }
 
-       work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-       work->gtt_offset += intel_crtc->dspaddr_offset;
+       work->old_vma = to_intel_plane_state(primary->state)->vma;
+       to_intel_plane_state(primary->state)->vma = vma;
+
+       work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
        work->rotation = crtc->primary->state->rotation;
 
        /*
@@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
        i915_add_request_no_flush(request);
 cleanup_unpin:
-       intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+       to_intel_plane_state(primary->state)->vma = work->old_vma;
+       intel_unpin_fb_vma(vma);
 cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
@@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                        DRM_DEBUG_KMS("failed to pin object\n");
                        return PTR_ERR(vma);
                }
+
+               to_intel_plane_state(new_state)->vma = vma;
        }
 
        return 0;
@@ -14812,19 +14807,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
                       struct drm_plane_state *old_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct intel_plane_state *old_intel_state;
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-       struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-       old_intel_state = to_intel_plane_state(old_state);
-
-       if (!obj && !old_obj)
-               return;
+       struct i915_vma *vma;
 
-       if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-           !INTEL_INFO(dev_priv)->cursor_needs_physical))
-               intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+       /* Should only be called after a successful intel_prepare_plane_fb()! */
+       vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+       if (vma)
+               intel_unpin_fb_vma(vma);
 }
 
 int
@@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
        if (!obj)
                addr = 0;
        else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-               addr = i915_gem_object_ggtt_offset(obj, NULL);
+               addr = intel_plane_ggtt_offset(state);
        else
                addr = obj->phys_handle->busaddr;
 
@@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
 void intel_modeset_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *c;
-       struct drm_i915_gem_object *obj;
 
        intel_init_gt_powersave(dev_priv);
 
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev_priv);
-
-       /*
-        * Make sure any fbs we allocated at startup are properly
-        * pinned & fenced.  When we do the allocation it's too early
-        * for this.
-        */
-       for_each_crtc(dev, c) {
-               struct i915_vma *vma;
-
-               obj = intel_fb_obj(c->primary->fb);
-               if (obj == NULL)
-                       continue;
-
-               mutex_lock(&dev->struct_mutex);
-               vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-                                                c->primary->state->rotation);
-               mutex_unlock(&dev->struct_mutex);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin boot fb on pipe %d\n",
-                                 to_intel_crtc(c)->pipe);
-                       drm_framebuffer_unreference(c->primary->fb);
-                       c->primary->fb = NULL;
-                       c->primary->crtc = c->primary->state->crtc = NULL;
-                       update_state_fb(c->primary);
-                       c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-               }
-       }
 }
 
 int intel_connector_register(struct drm_connector *connector)
index cd72ae171eeb673de11f1ca6dcc6f1ab06fde81c..03a2112004f91e1d5ac011cabc255a36867e0e71 100644 (file)
@@ -377,6 +377,7 @@ struct intel_atomic_state {
 struct intel_plane_state {
        struct drm_plane_state base;
        struct drm_rect clip;
+       struct i915_vma *vma;
 
        struct {
                u32 offset;
@@ -1046,6 +1047,7 @@ struct intel_flip_work {
        struct work_struct mmio_work;
 
        struct drm_crtc *crtc;
+       struct i915_vma *old_vma;
        struct drm_framebuffer *old_fb;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+       return i915_ggtt_offset(state->vma);
+}
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
index 62f215b12eb5274b8251d3f46d2a4fdbfc590e96..f3a1d6a5cabe9fcf76f5812dc526781b678f7e41 100644 (file)
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
        if (IS_I945GM(dev_priv))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= params->fb.fence_reg;
+       fbc_ctl |= params->vma->fence->id;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+       if (params->vma->fence) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
                I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN;
                if (IS_GEN5(dev_priv))
-                       dpfc_ctl |= params->fb.fence_reg;
+                       dpfc_ctl |= params->vma->fence->id;
                if (IS_GEN6(dev_priv)) {
                        I915_WRITE(SNB_DPFC_CTL_SA,
-                                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                                  SNB_CPU_FENCE_ENABLE |
+                                  params->vma->fence->id);
                        I915_WRITE(DPFC_CPU_FENCE_OFFSET,
                                   params->crtc.fence_y_offset);
                }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
        }
 
        I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-       I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE,
+                  i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
                I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                          SNB_CPU_FENCE_ENABLE |
+                          params->vma->fence->id);
                I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-       struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-       return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         struct intel_crtc_state *crtc_state,
                                         struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj;
+
+       cache->vma = NULL;
 
        cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        if (!cache->plane.visible)
                return;
 
-       obj = intel_fb_obj(fb);
-
-       /* FIXME: We lack the proper locking here, so only run this on the
-        * platforms that need. */
-       if (IS_GEN(dev_priv, 5, 6))
-               cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
        cache->fb.pixel_format = fb->pixel_format;
        cache->fb.stride = fb->pitches[0];
-       cache->fb.fence_reg = get_fence_id(fb);
-       cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+       cache->vma = plane_state->vma;
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
-       if (!cache->plane.visible) {
+       if (!cache->vma) {
                fbc->no_fbc_reason = "primary plane not visible";
                return false;
        }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         * so have no fence associated with it) due to aperture constaints
         * at the time of pinning.
         */
-       if (cache->fb.tiling_mode != I915_TILING_X ||
-           cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+       if (!cache->vma->fence) {
                fbc->no_fbc_reason = "framebuffer not tiled or fenced";
                return false;
        }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
         * zero. */
        memset(params, 0, sizeof(*params));
 
+       params->vma = cache->vma;
+
        params->crtc.pipe = crtc->pipe;
        params->crtc.plane = crtc->plane;
        params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
        params->fb.pixel_format = cache->fb.pixel_format;
        params->fb.stride = cache->fb.stride;
-       params->fb.fence_reg = cache->fb.fence_reg;
 
        params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-       params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
index 8cf2d80f22540a35dc4245a842d8473ce7d852d9..f4a8c4fc57c4e654a1af91903275189c841b35bd 100644 (file)
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_unpin:
-       intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+       intel_unpin_fb_vma(vma);
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
        if (ifbdev->fb) {
                mutex_lock(&ifbdev->helper.dev->struct_mutex);
-               intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+               intel_unpin_fb_vma(ifbdev->vma);
                mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
                drm_framebuffer_remove(&ifbdev->fb->base);
index 8f131a08d440cf02cbcd9ffe255b94e3f4eb6077..242a73e66d82862bea3ba881474efe327cd02d6e 100644 (file)
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
        I915_WRITE(PLANE_SURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
        POSTING_READ(PLANE_SURF(pipe, plane));
 }
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
        I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane), sprctl);
        I915_WRITE(SPSURF(pipe, plane),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane));
 }
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
        I915_WRITE(SPRSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 }
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
        I915_WRITE(DVSSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
index 74856a8b8f35943b08a59f8aed5a546e98058d3c..e64f52464ecf55b83a17f25b434cb9e1474c10e1 100644 (file)
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t mpllP;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+               mpllP = (mpllP >> 8) & 0xf;
                if (!mpllP)
                        mpllP = 4;
 
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                uint32_t clock;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
-               return clock;
+               return clock / 1000;
        }
 
        ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
index ccdce1b4eec4b8bf183235ebae2eb5395a306430..d5e58a38f160182354b8c9a126bdcbb5d459f40a 100644 (file)
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
        struct nouveau_bo *bo;
        struct nouveau_bo *bo_gart;
        u32 *suspend;
+       struct mutex mutex;
 };
 
 int  nv84_fence_context_new(struct nouveau_channel *);
index 187ecdb8200273baa77c41a42fbc65c3bcb93db6..21a5775028cc612e9a6c81e280777329f18233fd 100644 (file)
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
 }
 
 /* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
 int  nouveau_led_init(struct drm_device *dev);
 void nouveau_led_suspend(struct drm_device *dev);
 void nouveau_led_resume(struct drm_device *dev);
index 08f9c6fa0f7f210d3e3fd5a0fbe8f11ff40b1972..1fba3862274474f0001deaec9fefaf6b0fd324b4 100644 (file)
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
        if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
                /* block access to objects not created via this interface */
                owner = argv->v0.owner;
-               if (argv->v0.object == 0ULL)
+               if (argv->v0.object == 0ULL &&
+                   argv->v0.type != NVIF_IOCTL_V0_DEL)
                        argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
                else
                        argv->v0.owner = NVDRM_OBJECT_USIF;
index 2c2c645076614b4f9c187d24a5e9d2667ea27778..32097fd615fd1e3a5954019c02dec36bd2e8db8c 100644 (file)
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                }
        }
 
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (crtc->state->event)
+                       drm_crtc_vblank_get(crtc);
+       }
+
        /* Update plane(s). */
        for_each_plane_in_state(state, plane, plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
                        drm_crtc_send_vblank_event(crtc, crtc->state->event);
                        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
                        crtc->state->event = NULL;
+                       drm_crtc_vblank_put(crtc);
                }
        }
 
index 52b87ae83e7b4d0df54e003d58783eddd8deb6f5..f0b322bec7df22de23bbae372333fb484ac6d9ed 100644 (file)
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
        struct nv84_fence_chan *fctx = chan->fence;
 
        nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+       mutex_lock(&priv->mutex);
        nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
        nouveau_bo_vma_del(priv->bo, &fctx->vma);
+       mutex_unlock(&priv->mutex);
        nouveau_fence_context_del(&fctx->base);
        chan->fence = NULL;
        nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
        fctx->base.sync32 = nv84_fence_sync32;
        fctx->base.sequence = nv84_fence_read(chan);
 
+       mutex_lock(&priv->mutex);
        ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
        if (ret == 0) {
                ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
                                        &fctx->vma_gart);
        }
+       mutex_unlock(&priv->mutex);
 
        if (ret)
                nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
        priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
        priv->base.uevent = true;
 
+       mutex_init(&priv->mutex);
+
        /* Use VRAM if there is any ; otherwise fallback to system memory */
        domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
                         /*
index 6f0436df021953337ba29f0ff9c02c507214b07b..f8f2f16c22a2a2502bf283c63a5d1fc124d0ff89 100644 (file)
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
                        );
                }
                for (i = 0; i < size; i++)
-                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+                       nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
                for (; i < 0x60; i++)
                        nvkm_wr32(device, 0x61c440 + soff, (i << 8));
                nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
index 567466f93cd5d9645020e3b89c282808d88397e4..0db8efbf1c2e2e9cd84b689098bdba9d666843bc 100644 (file)
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
        case 0x94:
        case 0x96:
        case 0x98:
-       case 0xaa:
-       case 0xac:
                return true;
        default:
                break;
index e0c143b865f39cb36074b5e524638530cadeede9..30bd4a6a9d466e11755bf95cee6929473ae65adf 100644 (file)
  *   2.46.0 - Add PFP_SYNC_ME support on evergreen
  *   2.47.0 - Add UVD_NO_OP register support
  *   2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ *   2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
  */
 #define KMS_DRIVER_MAJOR       2
-#define KMS_DRIVER_MINOR       48
+#define KMS_DRIVER_MINOR       49
 #define KMS_DRIVER_PATCHLEVEL  0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
index 0bcffd8a7bd3ceac0de37cd0b44344011963be77..96683f5b2b1b722db08de97550aa8cdf99444a1c 100644 (file)
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 
        man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
-       args->vram_size = rdev->mc.real_vram_size;
-       args->vram_visible = (u64)man->size << PAGE_SHIFT;
+       args->vram_size = (u64)man->size << PAGE_SHIFT;
+       args->vram_visible = rdev->mc.visible_vram_size;
        args->vram_visible -= rdev->vram_pin_size;
        args->gart_size = rdev->mc.gtt_size;
        args->gart_size -= rdev->gart_pin_size;
index cd49cb17eb7fb385ddbf507ef14b2ea2be090159..308dbda700ebdaeb02f222aa46dc7bb79c24c0da 100644 (file)
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
                return ret;
        }
 
+       init_cached_read_index(channel);
        next_read_location = hv_get_next_read_location(inring_info);
        next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
                                                    sizeof(desc),
index 2bbf0c521bebb5c44840a8ce5296024910d8d73d..7d61b566e148dddd21ce2986cba233567802f93c 100644 (file)
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
 
 static int palmas_gpadc_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
 
 static int palmas_gpadc_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct palmas_gpadc *adc = iio_priv(indio_dev);
        int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
        int ret;
index 9a081465c42f4225d26747a734c5daceea68c842..6bb23a49e81eb8cf304a2f81f50c5e36a91e2d48 100644 (file)
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
 
 static int __maybe_unused afe4403_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
 
 static int __maybe_unused afe4403_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
        struct afe4403_data *afe = iio_priv(indio_dev);
        int ret;
 
index 45266404f7e3b5bd7c7790a0b05ff298fc24d5a9..964f5231a831c437c277e4bcfe292720104043f7 100644 (file)
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
 
 static int __maybe_unused afe4404_suspend(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
 
 static int __maybe_unused afe4404_resume(struct device *dev)
 {
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
        struct afe4404_data *afe = iio_priv(indio_dev);
        int ret;
 
index 90ab8a2d2846f8a8591ee6b1615dce2c984020ef..183c14329d6e350f6325b0e77a95dcb56de892dd 100644 (file)
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
 
        mutex_lock(&data->lock);
 
-       while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+       while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
                ret = max30100_read_measurement(data);
                if (ret)
                        break;
index 9c47bc98f3acdea4cb4b56b23a72e582c34a413b..2a22ad92033306d02eec60f288c00a65d5186c37 100644 (file)
@@ -71,7 +71,8 @@
  * a) select an implementation using busy loop polling on those systems
  * b) use the checksum to do some probabilistic decoding
  */
-#define DHT11_START_TRANSMISSION       18  /* ms */
+#define DHT11_START_TRANSMISSION_MIN   18000  /* us */
+#define DHT11_START_TRANSMISSION_MAX   20000  /* us */
 #define DHT11_MIN_TIMERES      34000  /* ns */
 #define DHT11_THRESHOLD                49000  /* ns */
 #define DHT11_AMBIG_LOW                23000  /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
                ret = gpio_direction_output(dht11->gpio, 0);
                if (ret)
                        goto err;
-               msleep(DHT11_START_TRANSMISSION);
+               usleep_range(DHT11_START_TRANSMISSION_MIN,
+                            DHT11_START_TRANSMISSION_MAX);
                ret = gpio_direction_input(dht11->gpio);
                if (ret)
                        goto err;
index 7c6c57216bf29f301690d270ca5f05a55dea3d3f..8a9f742d8ed72c810d49115ff802706d2b9d7e7f 100644 (file)
@@ -1534,18 +1534,18 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
                return PTR_ERR(key);
        }
 
-       rcu_read_lock();
+       down_read(&key->sem);
 
        ukp = user_key_payload(key);
        if (!ukp) {
-               rcu_read_unlock();
+               up_read(&key->sem);
                key_put(key);
                kzfree(new_key_string);
                return -EKEYREVOKED;
        }
 
        if (cc->key_size != ukp->datalen) {
-               rcu_read_unlock();
+               up_read(&key->sem);
                key_put(key);
                kzfree(new_key_string);
                return -EINVAL;
@@ -1553,7 +1553,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
 
        memcpy(cc->key, ukp->data, cc->key_size);
 
-       rcu_read_unlock();
+       up_read(&key->sem);
        key_put(key);
 
        /* clear the flag since following operations may invalidate previously valid key */
index 6400cffb986df21be7289dce8d2a4d4ff228a9a2..3570bcb7a4a4e5cade63c39b9071ed610da1e4ae 100644 (file)
@@ -427,7 +427,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
        unsigned long flags;
        struct priority_group *pg;
        struct pgpath *pgpath;
-       bool bypassed = true;
+       unsigned bypassed = 1;
 
        if (!atomic_read(&m->nr_valid_paths)) {
                clear_bit(MPATHF_QUEUE_IO, &m->flags);
@@ -466,7 +466,7 @@ check_current_pg:
         */
        do {
                list_for_each_entry(pg, &m->priority_groups, list) {
-                       if (pg->bypassed == bypassed)
+                       if (pg->bypassed == !!bypassed)
                                continue;
                        pgpath = choose_path_in_pg(m, pg, nr_bytes);
                        if (!IS_ERR_OR_NULL(pgpath)) {
index 9d7275fb541ad422b171cbe0ee43335e3e1efc27..6e702fc69a83cb27f6bc1792d4871ad18d708f71 100644 (file)
@@ -779,6 +779,10 @@ static void dm_old_request_fn(struct request_queue *q)
                int srcu_idx;
                struct dm_table *map = dm_get_live_table(md, &srcu_idx);
 
+               if (unlikely(!map)) {
+                       dm_put_live_table(md, srcu_idx);
+                       return;
+               }
                ti = dm_table_find_target(map, pos);
                dm_put_live_table(md, srcu_idx);
        }
index ebb5e391b800e0bbbca2fd9da926a4ff191839e0..87a6b65ed3af5f72f176625bcc2bfad7e21b0885 100644 (file)
@@ -1206,7 +1206,7 @@ static int cec_config_thread_func(void *arg)
                las->log_addr[i] = CEC_LOG_ADDR_INVALID;
                if (last_la == CEC_LOG_ADDR_INVALID ||
                    last_la == CEC_LOG_ADDR_UNREGISTERED ||
-                   !(last_la & type2mask[type]))
+                   !((1 << last_la) & type2mask[type]))
                        last_la = la_list[0];
 
                err = cec_config_log_addr(adap, i, last_la);
index 23909804ffb840d3187f21f67180a634a769425e..0def99590d162ebcfb86a16a6b9d5adf96f19cb6 100644 (file)
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
                if (intmask & SDHCI_INT_RETUNE)
                        mmc_retune_needed(host->mmc);
 
-               if (intmask & SDHCI_INT_CARD_INT) {
+               if ((intmask & SDHCI_INT_CARD_INT) &&
+                   (host->ier & SDHCI_INT_CARD_INT)) {
                        sdhci_enable_sdio_irq_nolock(host, false);
                        host->thread_isr |= SDHCI_INT_CARD_INT;
                        result = IRQ_WAKE_THREAD;
index 87226685f74215a2093e59a99bd8042a2e2585c5..8fa18fc17cd2e25f2e3458e608abe6f5a96b60d9 100644 (file)
 
 static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 {
-       u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-       writel(value, reg_addr + reg);
+       writel(value, base + reg);
 }
 
 #define dsaf_write_dev(a, reg, value) \
@@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
 
 static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
 {
-       u8 __iomem *reg_addr = ACCESS_ONCE(base);
-
-       return readl(reg_addr + reg);
+       return readl(base + reg);
 }
 
 static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
index ca730d4abbb4bde8b80d635d7e86a0669480e20e..c4d714fcc7dae759998a49a1f90f9ab1ee9bdda3 100644 (file)
@@ -1113,7 +1113,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        new_prof.tx_ring_size = tx_size;
        new_prof.rx_ring_size = rx_size;
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
@@ -1788,7 +1788,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
        new_prof.tx_ring_num[TX_XDP] = xdp_count;
        new_prof.rx_ring_num = channel->rx_count;
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
index 60a021c34881fecd1e1b3c6104051323e4413232..748e9f65c386b6e49ce8112cda6f47af6047cba8 100644 (file)
@@ -2048,6 +2048,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
                        if (priv->tx_cq[t] && priv->tx_cq[t][i])
                                mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
                }
+               kfree(priv->tx_ring[t]);
+               kfree(priv->tx_cq[t]);
        }
 
        for (i = 0; i < priv->rx_ring_num; i++) {
@@ -2190,9 +2192,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                struct mlx4_en_priv *tmp,
-                               struct mlx4_en_port_profile *prof)
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog)
 {
-       int t;
+       struct bpf_prog *xdp_prog;
+       int i, t;
 
        mlx4_en_copy_priv(tmp, priv, prof);
 
@@ -2206,6 +2210,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                }
                return -ENOMEM;
        }
+
+       /* All rx_rings has the same xdp_prog.  Pick the first one. */
+       xdp_prog = rcu_dereference_protected(
+               priv->rx_ring[0]->xdp_prog,
+               lockdep_is_held(&priv->mdev->state_lock));
+
+       if (xdp_prog && carry_xdp_prog) {
+               xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
+               if (IS_ERR(xdp_prog)) {
+                       mlx4_en_free_resources(tmp);
+                       return PTR_ERR(xdp_prog);
+               }
+               for (i = 0; i < tmp->rx_ring_num; i++)
+                       rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+                                          xdp_prog);
+       }
+
        return 0;
 }
 
@@ -2220,7 +2241,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int t;
 
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
 
@@ -2254,11 +2274,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        mlx4_en_free_resources(priv);
        mutex_unlock(&mdev->state_lock);
 
-       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
-               kfree(priv->tx_ring[t]);
-               kfree(priv->tx_cq[t]);
-       }
-
        free_netdev(dev);
 }
 
@@ -2761,7 +2776,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
                en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
        }
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
        if (err) {
                if (prog)
                        bpf_prog_sub(prog, priv->rx_ring_num - 1);
@@ -3505,7 +3520,7 @@ int mlx4_en_reset_config(struct net_device *dev,
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
 
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
 
index f15ddba3659aac38471059c6bcbf050717946c4e..d85e6446f9d99e38c75b97d7fba29bd057e0a16f 100644 (file)
@@ -515,8 +515,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
                return;
 
        for (ring = 0; ring < priv->rx_ring_num; ring++) {
-               if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+               if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+                       local_bh_disable();
                        napi_reschedule(&priv->rx_cq[ring]->napi);
+                       local_bh_enable();
+               }
        }
 }
 
index ba1c6cd0cc79590075f4420a930b613c9fdedc62..cec59bc264c9ac197048fd7c98bcd5cf25de0efd 100644 (file)
@@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
 
 int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                struct mlx4_en_priv *tmp,
-                               struct mlx4_en_port_profile *prof);
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog);
 void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
                                    struct mlx4_en_priv *tmp);
 
index 5cfdb1a1b4c17c793c4b5e605b38919c6840904f..fd6ebbefd919344e0c7dab94316488a43bc3e2bc 100644 (file)
@@ -1254,6 +1254,9 @@ void netvsc_channel_cb(void *context)
            netvsc_channel_idle(net_device, q_idx))
                return;
 
+       /* commit_rd_index() -> hv_signal_on_read() needs this. */
+       init_cached_read_index(channel);
+
        while ((desc = get_next_pkt_raw(channel)) != NULL) {
                netvsc_process_raw_pkt(device, channel, net_device,
                                       ndev, desc->trans_id, desc);
index 4026185658381df004a7d641e2be7bcb9a45b509..c27011bbe30c52d2eb892ab0d86f8cf3d6f4deb9 100644 (file)
@@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
        size_t linear;
 
        if (q->flags & IFF_VNET_HDR) {
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
 
                err = -EINVAL;
                if (len < vnet_hdr_len)
@@ -820,7 +820,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
 
        if (q->flags & IFF_VNET_HDR) {
                struct virtio_net_hdr vnet_hdr;
-               vnet_hdr_len = q->vnet_hdr_sz;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
                if (iov_iter_count(iter) < vnet_hdr_len)
                        return -EINVAL;
 
index 92b08383cafa8b88e8d5b79ea3a5c0da9998770f..0d8f4d3847f668d9bf8176e679cc26d9487f59d8 100644 (file)
@@ -920,6 +920,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                return -EIO;
        }
 
+       if (!try_module_get(d->driver->owner)) {
+               dev_err(&dev->dev, "failed to get the device driver module\n");
+               return -EIO;
+       }
+
        get_device(d);
 
        /* Assume that if there is no driver, that it doesn't
@@ -977,6 +982,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
 error:
        phy_detach(phydev);
        put_device(d);
+       module_put(d->driver->owner);
        if (ndev_owner != bus->owner)
                module_put(bus->owner);
        return err;
@@ -1059,6 +1065,7 @@ void phy_detach(struct phy_device *phydev)
        bus = phydev->mdio.bus;
 
        put_device(&phydev->mdio.dev);
+       module_put(phydev->mdio.dev.driver->owner);
        if (ndev_owner != bus->owner)
                module_put(bus->owner);
 }
index 8a7d6b905362389ace72731c2a8e1f163c298c47..30863e378925b3555dea6eadb99a02678779cfd5 100644 (file)
@@ -1207,9 +1207,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        }
 
        if (tun->flags & IFF_VNET_HDR) {
-               if (len < tun->vnet_hdr_sz)
+               int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+
+               if (len < vnet_hdr_sz)
                        return -EINVAL;
-               len -= tun->vnet_hdr_sz;
+               len -= vnet_hdr_sz;
 
                if (!copy_from_iter_full(&gso, sizeof(gso), from))
                        return -EFAULT;
@@ -1220,7 +1222,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
                if (tun16_to_cpu(tun, gso.hdr_len) > len)
                        return -EINVAL;
-               iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+               iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
        }
 
        if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
@@ -1371,7 +1373,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                vlan_hlen = VLAN_HLEN;
 
        if (tun->flags & IFF_VNET_HDR)
-               vnet_hdr_sz = tun->vnet_hdr_sz;
+               vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
 
        total = skb->len + vlan_hlen + vnet_hdr_sz;
 
index 3daa41bdd4eae0e5d44451458cb456175f8aedb2..0acc9b640419a2e94bc9a2d3d43a5a2a65800c8b 100644 (file)
@@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct net_device *netdev;
        struct catc *catc;
        u8 broadcast[ETH_ALEN];
-       int i, pktsz;
+       int pktsz, ret;
 
        if (usb_set_interface(usbdev,
                        intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        if ((!catc->ctrl_urb) || (!catc->tx_urb) || 
            (!catc->rx_urb) || (!catc->irq_urb)) {
                dev_err(&intf->dev, "No free urbs available.\n");
-               usb_free_urb(catc->ctrl_urb);
-               usb_free_urb(catc->tx_urb);
-               usb_free_urb(catc->rx_urb);
-               usb_free_urb(catc->irq_urb);
-               free_netdev(netdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail_free;
        }
 
        /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                 catc->irq_buf, 2, catc_irq_done, catc, 1);
 
        if (!catc->is_f5u011) {
+               u32 *buf;
+               int i;
+
                dev_dbg(dev, "Checking memory size\n");
 
-               i = 0x12345678;
-               catc_write_mem(catc, 0x7a80, &i, 4);
-               i = 0x87654321; 
-               catc_write_mem(catc, 0xfa80, &i, 4);
-               catc_read_mem(catc, 0x7a80, &i, 4);
+               buf = kmalloc(4, GFP_KERNEL);
+               if (!buf) {
+                       ret = -ENOMEM;
+                       goto fail_free;
+               }
+
+               *buf = 0x12345678;
+               catc_write_mem(catc, 0x7a80, buf, 4);
+               *buf = 0x87654321;
+               catc_write_mem(catc, 0xfa80, buf, 4);
+               catc_read_mem(catc, 0x7a80, buf, 4);
          
-               switch (i) {
+               switch (*buf) {
                case 0x12345678:
                        catc_set_reg(catc, TxBufCount, 8);
                        catc_set_reg(catc, RxBufCount, 32);
@@ -867,6 +872,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                        dev_dbg(dev, "32k Memory\n");
                        break;
                }
+
+               kfree(buf);
          
                dev_dbg(dev, "Getting MAC from SEEROM.\n");
          
@@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        usb_set_intfdata(intf, catc);
 
        SET_NETDEV_DEV(netdev, &intf->dev);
-       if (register_netdev(netdev) != 0) {
-               usb_set_intfdata(intf, NULL);
-               usb_free_urb(catc->ctrl_urb);
-               usb_free_urb(catc->tx_urb);
-               usb_free_urb(catc->rx_urb);
-               usb_free_urb(catc->irq_urb);
-               free_netdev(netdev);
-               return -EIO;
-       }
+       ret = register_netdev(netdev);
+       if (ret)
+               goto fail_clear_intfdata;
+
        return 0;
+
+fail_clear_intfdata:
+       usb_set_intfdata(intf, NULL);
+fail_free:
+       usb_free_urb(catc->ctrl_urb);
+       usb_free_urb(catc->tx_urb);
+       usb_free_urb(catc->rx_urb);
+       usb_free_urb(catc->irq_urb);
+       free_netdev(netdev);
+       return ret;
 }
 
 static void catc_disconnect(struct usb_interface *intf)
index 24e803fe9a534c2e23dee6418496dd2755ff8de9..36674484c6fb9b73011619824f7bc60c50b9c1ad 100644 (file)
@@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
 
 static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmalloc(size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
-                             indx, data, size, 1000);
+                             indx, buf, size, 1000);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       else if (ret <= size)
+               memcpy(data, buf, ret);
+       kfree(buf);
        return ret;
 }
 
-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
+                        const void *data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmemdup(data, size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
-                             indx, data, size, 100);
+                             indx, buf, size, 100);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       kfree(buf);
        return ret;
 }
 
 static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
 {
+       u8 *buf;
        int ret;
 
+       buf = kmemdup(&data, 1, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
        ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
                              PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
-                             indx, &data, 1, 1000);
+                             indx, buf, 1, 1000);
        if (ret < 0)
                netif_dbg(pegasus, drv, pegasus->net,
                          "%s returned %d\n", __func__, ret);
+       kfree(buf);
        return ret;
 }
 
index 95b7bd0d7abcac85482da6067626e034a57b56b3..c81c79110cefca9443d614679d8e7cdd4b3295c3 100644 (file)
@@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
 */
 static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
 {
-       return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
-                              RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
-                              indx, 0, data, size, 500);
+       void *buf;
+       int ret;
+
+       buf = kmalloc(size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+                             RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+                             indx, 0, buf, size, 500);
+       if (ret > 0 && ret <= size)
+               memcpy(data, buf, ret);
+       kfree(buf);
+       return ret;
 }
 
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
 {
-       return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
-                              RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
-                              indx, 0, data, size, 500);
+       void *buf;
+       int ret;
+
+       buf = kmemdup(data, size, GFP_NOIO);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+                             RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+                             indx, 0, buf, size, 500);
+       kfree(buf);
+       return ret;
 }
 
 static void async_set_reg_cb(struct urb *urb)
index efedd918d10ad13fdd08b4b8b70bd418d3275078..bcbb0c60f1f12bc9d1cd706510d5a97dbc5e3ae3 100644 (file)
@@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       char *fw_name = "rtlwifi/rtl8192cfwU.bin";
+       char *fw_name;
 
        rtl8192ce_bt_reg_init(hw);
 
@@ -161,8 +161,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        }
 
        /* request fw */
-       if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+       if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+           !IS_92C_SERIAL(rtlhal->version))
+               fw_name = "rtlwifi/rtl8192cfwU.bin";
+       else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
                fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+       else
+               fw_name = "rtlwifi/rtl8192cfw.bin";
 
        rtlpriv->max_fw_size = 0x4000;
        pr_info("Using firmware %s\n", fw_name);
index a518cb1b59d4238b675fccd695f45003af380296..ce3e8dfa10ad5ccc5285621e5c5b27b5c557e16b 100644 (file)
@@ -52,17 +52,17 @@ static void namespace_blk_release(struct device *dev)
        kfree(nsblk);
 }
 
-static struct device_type namespace_io_device_type = {
+static const struct device_type namespace_io_device_type = {
        .name = "nd_namespace_io",
        .release = namespace_io_release,
 };
 
-static struct device_type namespace_pmem_device_type = {
+static const struct device_type namespace_pmem_device_type = {
        .name = "nd_namespace_pmem",
        .release = namespace_pmem_release,
 };
 
-static struct device_type namespace_blk_device_type = {
+static const struct device_type namespace_blk_device_type = {
        .name = "nd_namespace_blk",
        .release = namespace_blk_release,
 };
@@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
        struct nvdimm_drvdata *ndd;
        struct nd_label_id label_id;
        u32 flags = 0, remainder;
+       int rc, i, id = -1;
        u8 *uuid = NULL;
-       int rc, i;
 
        if (dev->driver || ndns->claim)
                return -EBUSY;
@@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
                struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
 
                uuid = nspm->uuid;
+               id = nspm->id;
        } else if (is_namespace_blk(dev)) {
                struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
 
                uuid = nsblk->uuid;
                flags = NSLABEL_FLAG_LOCAL;
+               id = nsblk->id;
        }
 
        /*
@@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
        /*
         * Try to delete the namespace if we deleted all of its
-        * allocation, this is not the seed device for the region, and
-        * it is not actively claimed by a btt instance.
+        * allocation, this is not the seed or 0th device for the
+        * region, and it is not actively claimed by a btt, pfn, or dax
+        * instance.
         */
-       if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+       if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
                nd_device_unregister(dev, ND_ASYNC);
 
        return rc;
index a2ac9e641aa9341f2fcca9fa8b968fd874e80a90..6c033c9a2f06921feb49e3ba396d6cf150e19801 100644 (file)
@@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
        size = resource_size(&nsio->res);
        npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
        if (nd_pfn->mode == PFN_MODE_PMEM) {
-               unsigned long memmap_size;
-
                /*
                 * vmemmap_populate_hugepages() allocates the memmap array in
                 * HPAGE_SIZE chunks.
                 */
-               memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
-               offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
-                               nd_pfn->align) - start;
+               offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
+                               max(nd_pfn->align, HPAGE_SIZE)) - start;
        } else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K + dax_label_reserve,
                                nd_pfn->align) - start;
index 17ac1dce32867051298a5489841de8b636835a68..3dd8bcbb3011babd4ad4271d6f6f64733bd9b3f1 100644 (file)
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
        link = kzalloc(sizeof(*link), GFP_KERNEL);
        if (!link)
                return NULL;
+
        INIT_LIST_HEAD(&link->sibling);
        INIT_LIST_HEAD(&link->children);
        INIT_LIST_HEAD(&link->link);
        link->pdev = pdev;
-       if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
+
+       /*
+        * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
+        * hierarchies.
+        */
+       if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+           pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+               link->root = link;
+       } else {
                struct pcie_link_state *parent;
+
                parent = pdev->bus->parent->self->link_state;
                if (!parent) {
                        kfree(link);
                        return NULL;
                }
+
                link->parent = parent;
+               link->root = link->parent->root;
                list_add(&link->link, &parent->children);
        }
-       /* Setup a pointer to the root port link */
-       if (!link->parent)
-               link->root = link;
-       else
-               link->root = link->parent->root;
 
        list_add(&link->sibling, &link_list);
        pdev->link_state = link;
index e6a512ebeae2762812212ac5b9264f92bb8252be..a3ade9e4ef478ed90311365a4e86db0cbbc394d5 100644 (file)
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
                        64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
                        BIT(3)),
        AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
-                AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+                AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
                 AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
        AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
index a43b0e8a438d305a959d3d745c7c65bf796ca9f9..988a7472c2ab568c3d1c03d1092c0713073a6d28 100644 (file)
@@ -30,9 +30,6 @@
 #include <linux/of_gpio.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/machine.h>
-#include <linux/acpi.h>
-#include <linux/property.h>
-#include <linux/gpio/consumer.h>
 
 struct fixed_voltage_data {
        struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
        return config;
 }
 
-/**
- * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
- * @dev: device requesting for fixed_voltage_config
- * @desc: regulator description
- *
- * Populates fixed_voltage_config structure by extracting data through ACPI
- * interface, returns a pointer to the populated structure of NULL if memory
- * alloc fails.
- */
-static struct fixed_voltage_config *
-acpi_get_fixed_voltage_config(struct device *dev,
-                             const struct regulator_desc *desc)
-{
-       struct fixed_voltage_config *config;
-       const char *supply_name;
-       struct gpio_desc *gpiod;
-       int ret;
-
-       config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
-       if (!config)
-               return ERR_PTR(-ENOMEM);
-
-       ret = device_property_read_string(dev, "supply-name", &supply_name);
-       if (!ret)
-               config->supply_name = supply_name;
-
-       gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
-       if (IS_ERR(gpiod))
-               return ERR_PTR(-ENODEV);
-
-       config->gpio = desc_to_gpio(gpiod);
-       config->enable_high = device_property_read_bool(dev,
-                                                       "enable-active-high");
-       gpiod_put(gpiod);
-
-       return config;
-}
-
 static struct regulator_ops fixed_voltage_ops = {
 };
 
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
                                                     &drvdata->desc);
                if (IS_ERR(config))
                        return PTR_ERR(config);
-       } else if (ACPI_HANDLE(&pdev->dev)) {
-               config = acpi_get_fixed_voltage_config(&pdev->dev,
-                                                      &drvdata->desc);
-               if (IS_ERR(config))
-                       return PTR_ERR(config);
        } else {
                config = dev_get_platdata(&pdev->dev);
        }
index 4864b9d742c0f7915cc792aaacd692c2a7f305b0..716191046a70782b0007033dda6e1402c0d68ea3 100644 (file)
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
                        vsel = 62;
                else if ((min_uV > 1800000) && (min_uV <= 1900000))
                        vsel = 61;
-               else if ((min_uV > 1350000) && (min_uV <= 1800000))
+               else if ((min_uV > 1500000) && (min_uV <= 1800000))
                        vsel = 60;
                else if ((min_uV > 1350000) && (min_uV <= 1500000))
                        vsel = 59;
index ec91bd07f00a307337283cbea6f72c6e370a0170..c680d76413116c00b80193f5e7db9de2e13441b1 100644 (file)
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
 {
        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
        struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+       unsigned long flags;
        int req_size;
+       int ret;
 
        BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
 
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
                req_size = sizeof(cmd->req.cmd);
        }
 
-       if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+       ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+       if (ret == -EIO) {
+               cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+               spin_lock_irqsave(&req_vq->vq_lock, flags);
+               virtscsi_complete_cmd(vscsi, cmd);
+               spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+       } else if (ret != 0) {
                return SCSI_MLQUEUE_HOST_BUSY;
+       }
        return 0;
 }
 
index 113f3d6c4b3a6cdeda3fce3abe729fbc927fd9d9..27f75b17679b8f19a5f6100a769dd7b1b0f77455 100644 (file)
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
 
 int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
 {
+       if (!arche_platform_change_state_cb)
+               return 0;
+
        return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
                                              pdata);
 }
 
 void gb_timesync_platform_unlock_bus(void)
 {
+       if (!arche_platform_change_state_cb)
+               return;
+
        arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
 }
 
index d2e50a27140c9254be2a80b6c6ae69bc71a93b4a..24f9f98968a5d860f83920287a5b7deb4c98bed6 100644 (file)
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* WORLDE easy key (easykey.25) MIDI controller  */
+       { USB_DEVICE(0x0218, 0x0401), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* HP 5300/5370C scanner */
        { USB_DEVICE(0x03f0, 0x0701), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 5490fc51638ede3c565eff9036ff3beaf884d3a9..fd80c1b9c8234cf4de8371c7ca4e528bc4712fc3 100644 (file)
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
                        return -EINVAL;
                length = le32_to_cpu(d->dwSize);
+               if (len < length)
+                       return -EINVAL;
                type = le32_to_cpu(d->dwPropertyDataType);
                if (type < USB_EXT_PROP_UNICODE ||
                    type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
                        return -EINVAL;
                }
                pnl = le16_to_cpu(d->wPropertyNameLength);
+               if (length < 14 + pnl) {
+                       pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
+                                 length, pnl, type);
+                       return -EINVAL;
+               }
                pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
                if (length != 14 + pnl + pdl) {
                        pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
                }
        }
        if (flags & (1 << i)) {
+               if (len < 4) {
+                       goto error;
+               }
                os_descs_count = get_unaligned_le32(data);
                data += 4;
                len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
 
        ENTER();
 
-       if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+       if (unlikely(len < 16 ||
+                    get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
                     get_unaligned_le32(data + 4) != len))
                goto error;
        str_count  = get_unaligned_le32(data + 8);
index fca288bbc8009580ba96198ce1a2a49330074d20..772f1582124255d749ab470a42394498af8f9741 100644 (file)
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                                                | MUSB_PORT_STAT_RESUME;
                                musb->rh_timer = jiffies
                                        + msecs_to_jiffies(USB_RESUME_TIMEOUT);
-                               musb->need_finish_resume = 1;
-
                                musb->xceiv->otg->state = OTG_STATE_A_HOST;
                                musb->is_active = 1;
                                musb_host_resume_root_hub(musb);
+                               schedule_delayed_work(&musb->finish_resume_work,
+                                       msecs_to_jiffies(USB_RESUME_TIMEOUT));
                                break;
                        case OTG_STATE_B_WAIT_ACON:
                                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
 static void musb_irq_work(struct work_struct *data)
 {
        struct musb *musb = container_of(data, struct musb, irq_work.work);
+       int error;
+
+       error = pm_runtime_get_sync(musb->controller);
+       if (error < 0) {
+               dev_err(musb->controller, "Could not enable: %i\n", error);
+
+               return;
+       }
 
        musb_pm_runtime_check_session(musb);
 
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
                musb->xceiv_old_state = musb->xceiv->otg->state;
                sysfs_notify(&musb->controller->kobj, NULL, "mode");
        }
+
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 }
 
 static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
        mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
        if ((devctl & mask) != (musb->context.devctl & mask))
                musb->port1_status = 0;
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                                     msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
 
        /*
         * The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
 
        musb_restore_context(musb);
 
-       if (musb->need_finish_resume) {
-               musb->need_finish_resume = 0;
-               schedule_delayed_work(&musb->finish_resume_work,
-                               msecs_to_jiffies(USB_RESUME_TIMEOUT));
-       }
-
        spin_lock_irqsave(&musb->lock, flags);
        error = musb_run_resume_work(musb);
        if (error)
index ade902ea1221e18543de05a5188c715d86af7398..ce5a18c98c6d1134231a29fa9229cf6804a0ae6e 100644 (file)
@@ -410,7 +410,6 @@ struct musb {
 
        /* is_suspended means USB B_PERIPHERAL suspend */
        unsigned                is_suspended:1;
-       unsigned                need_finish_resume :1;
 
        /* may_wakeup means remote wakeup is enabled */
        unsigned                may_wakeup:1;
index 7ce31a4c7e7fd3d186e8e05b20b9a3ca52700b6c..42cc72e54c051b2115c358bcee8bfc534258d206 100644 (file)
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 46fca6b7584686744a9e79aae0bd78db08192813..1db4b61bdf7bd710d7be6e3ff81b97102d76fbe9 100644 (file)
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
        { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
        { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+       { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
        { USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
        { USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
index e3b7af8adfb73ccefa92d4ba3c2c927a044dfa43..09d9be88209e1ce6b1f53dc052a53c5e4c491336 100644 (file)
@@ -27,6 +27,7 @@
 #define ATEN_VENDOR_ID         0x0557
 #define ATEN_VENDOR_ID2                0x0547
 #define ATEN_PRODUCT_ID                0x2008
+#define ATEN_PRODUCT_ID2       0x2118
 
 #define IODATA_VENDOR_ID       0x04bb
 #define IODATA_PRODUCT_ID      0x0a03
index 1bc6089b90083a05e0ef3e4ff71c0ef752e3831a..696458db7e3c45e661a9825d05df0fe25dc0a832 100644 (file)
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE(0x1410, 0xa021)},   /* Novatel Gobi 3000 Composite */
        {USB_DEVICE(0x413c, 0x8193)},   /* Dell Gobi 3000 QDL */
        {USB_DEVICE(0x413c, 0x8194)},   /* Dell Gobi 3000 Composite */
+       {USB_DEVICE(0x413c, 0x81a6)},   /* Dell DW5570 QDL (MC8805) */
        {USB_DEVICE(0x1199, 0x68a4)},   /* Sierra Wireless QDL */
        {USB_DEVICE(0x1199, 0x68a5)},   /* Sierra Wireless Modem */
        {USB_DEVICE(0x1199, 0x68a8)},   /* Sierra Wireless QDL */
index 128d10282d1632693dc40819ff8b39485ba1e1de..7690e5bf3cf134fc56a0a17d07e57d397d293346 100644 (file)
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
                mutex_lock(&container->lock);
 
                ret = tce_iommu_create_default_window(container);
-               if (ret)
-                       return ret;
-
-               ret = tce_iommu_create_window(container, create.page_shift,
-                               create.window_size, create.levels,
-                               &create.start_addr);
+               if (!ret)
+                       ret = tce_iommu_create_window(container,
+                                       create.page_shift,
+                                       create.window_size, create.levels,
+                                       &create.start_addr);
 
                mutex_unlock(&container->lock);
 
index 9f118388a5b7cc6f70b727f8bafdfadf9a401ac2..4269e621e254ab7acc38c81f2aafe1775e9dbd81 100644 (file)
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
 
 static void vhost_init_is_le(struct vhost_virtqueue *vq)
 {
-       if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
-               vq->is_le = true;
+       vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+               || virtio_legacy_is_little_endian();
 }
 #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
 
 static void vhost_reset_is_le(struct vhost_virtqueue *vq)
 {
-       vq->is_le = virtio_legacy_is_little_endian();
+       vhost_init_is_le(vq);
 }
 
 struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
        int r;
        bool is_le = vq->is_le;
 
-       if (!vq->private_data) {
-               vhost_reset_is_le(vq);
+       if (!vq->private_data)
                return 0;
-       }
 
        vhost_init_is_le(vq);
 
index 7e38ed79c3fc0f2c095164d480f75b31630a6694..409aeaa49246a0edd7c6da07ca38b58c3f876109 100644 (file)
@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
        if (xen_domain())
                return true;
 
-       /*
-        * On ARM-based machines, the DMA ops will do the right thing,
-        * so always use them with legacy devices.
-        */
-       if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
-               return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
-
        return false;
 }
 
index 3af2da5e64ce77fa8ae4b3f294c82882d350120f..c45598b912e14c981fdeb002b01c1535218c0ff2 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1031,6 +1031,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
                struct blk_dax_ctl dax = { 0 };
                ssize_t map_len;
 
+               if (fatal_signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+
                dax.sector = dax_iomap_sector(iomap, pos);
                dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
                map_len = dax_map_atomic(iomap->bdev, &dax);
index 354a123f170e534a016f74ca7006458e3b823ef8..a51cb4c07d4d8cd3a09715361c84126cecae2ca2 100644 (file)
@@ -114,6 +114,9 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 
        BUG_ON(pos + len > iomap->offset + iomap->length);
 
+       if (fatal_signal_pending(current))
+               return -EINTR;
+
        page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
        if (!page)
                return -ENOMEM;
index 596205d939a1f43f1faa292d31680377045a8589..1fc07a9c70e9c6028342e8c97d183dfe914a343b 100644 (file)
@@ -223,10 +223,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
        struct nfs4_layout_stateid *ls;
        struct nfs4_stid *stp;
 
-       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
+       stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
+                                       nfsd4_free_layout_stateid);
        if (!stp)
                return NULL;
-       stp->sc_free = nfsd4_free_layout_stateid;
+
        get_nfs4_file(fp);
        stp->sc_file = fp;
 
index 4b4beaaa4eaac01233f874c7dfdb8d1a6d7cd3d6..a0dee8ae9f97f16a18e40ba19f8e84a45ad1a02b 100644 (file)
@@ -633,8 +633,8 @@ out:
        return co;
 }
 
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-                                        struct kmem_cache *slab)
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *))
 {
        struct nfs4_stid *stid;
        int new_id;
@@ -650,6 +650,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
        idr_preload_end();
        if (new_id < 0)
                goto out_free;
+
+       stid->sc_free = sc_free;
        stid->sc_client = cl;
        stid->sc_stateid.si_opaque.so_id = new_id;
        stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
@@ -675,15 +677,12 @@ out_free:
 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
 {
        struct nfs4_stid *stid;
-       struct nfs4_ol_stateid *stp;
 
-       stid = nfs4_alloc_stid(clp, stateid_slab);
+       stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
        if (!stid)
                return NULL;
 
-       stp = openlockstateid(stid);
-       stp->st_stid.sc_free = nfs4_free_ol_stateid;
-       return stp;
+       return openlockstateid(stid);
 }
 
 static void nfs4_free_deleg(struct nfs4_stid *stid)
@@ -781,11 +780,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
                goto out_dec;
        if (delegation_blocked(&current_fh->fh_handle))
                goto out_dec;
-       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
+       dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
        if (dp == NULL)
                goto out_dec;
 
-       dp->dl_stid.sc_free = nfs4_free_deleg;
        /*
         * delegation seqid's are never incremented.  The 4.1 special
         * meaning of seqid 0 isn't meaningful, really, but let's avoid
@@ -5580,7 +5578,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
        get_nfs4_file(fp);
        stp->st_stid.sc_file = fp;
-       stp->st_stid.sc_free = nfs4_free_lock_stateid;
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
@@ -5623,7 +5620,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
        lst = find_lock_stateid(lo, fi);
        if (lst == NULL) {
                spin_unlock(&clp->cl_lock);
-               ns = nfs4_alloc_stid(clp, stateid_slab);
+               ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
                if (ns == NULL)
                        return NULL;
 
index c9399366f9dfc73b343d079fbad2dc2127927aae..4516e8b7d776305d94fb89f86256ee3fc54dec27 100644 (file)
@@ -603,8 +603,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
 __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
                     stateid_t *stateid, unsigned char typemask,
                     struct nfs4_stid **s, struct nfsd_net *nn);
-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
-               struct kmem_cache *slab);
+struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
+                                 void (*sc_free)(struct nfs4_stid *));
 void nfs4_unhash_stid(struct nfs4_stid *s);
 void nfs4_put_stid(struct nfs4_stid *s);
 void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
index 26c6fdb4bf67cf1e3e3a843e8e816d7a76eae265..ca13236dbb1f33afb1c6664c030d63a5bfcfacc5 100644 (file)
@@ -332,37 +332,6 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
        }
 }
 
-static __be32
-nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
-               struct iattr *iap)
-{
-       struct inode *inode = d_inode(fhp->fh_dentry);
-       int host_err;
-
-       if (iap->ia_size < inode->i_size) {
-               __be32 err;
-
-               err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
-                               NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
-               if (err)
-                       return err;
-       }
-
-       host_err = get_write_access(inode);
-       if (host_err)
-               goto out_nfserrno;
-
-       host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
-       if (host_err)
-               goto out_put_write_access;
-       return 0;
-
-out_put_write_access:
-       put_write_access(inode);
-out_nfserrno:
-       return nfserrno(host_err);
-}
-
 /*
  * Set various file attributes.  After this call fhp needs an fh_put.
  */
@@ -377,7 +346,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
        __be32          err;
        int             host_err;
        bool            get_write_count;
-       int             size_change = 0;
 
        if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
                accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
@@ -390,11 +358,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
        /* Get inode */
        err = fh_verify(rqstp, fhp, ftype, accmode);
        if (err)
-               goto out;
+               return err;
        if (get_write_count) {
                host_err = fh_want_write(fhp);
                if (host_err)
-                       return nfserrno(host_err);
+                       goto out_host_err;
        }
 
        dentry = fhp->fh_dentry;
@@ -405,50 +373,59 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
                iap->ia_valid &= ~ATTR_MODE;
 
        if (!iap->ia_valid)
-               goto out;
+               return 0;
 
        nfsd_sanitize_attrs(inode, iap);
 
+       if (check_guard && guardtime != inode->i_ctime.tv_sec)
+               return nfserr_notsync;
+
        /*
         * The size case is special, it changes the file in addition to the
-        * attributes.
+        * attributes, and file systems don't expect it to be mixed with
+        * "random" attribute changes.  We thus split out the size change
+        * into a separate call for vfs_truncate, and do the rest as a
+        * a separate setattr call.
         */
        if (iap->ia_valid & ATTR_SIZE) {
-               err = nfsd_get_write_access(rqstp, fhp, iap);
-               if (err)
-                       goto out;
-               size_change = 1;
+               struct path path = {
+                       .mnt    = fhp->fh_export->ex_path.mnt,
+                       .dentry = dentry,
+               };
+               bool implicit_mtime = false;
 
                /*
-                * RFC5661, Section 18.30.4:
-                *   Changing the size of a file with SETATTR indirectly
-                *   changes the time_modify and change attributes.
-                *
-                * (and similar for the older RFCs)
+                * vfs_truncate implicity updates the mtime IFF the file size
+                * actually changes.  Avoid the additional seattr call below if
+                * the only other attribute that the client sends is the mtime.
                 */
-               if (iap->ia_size != i_size_read(inode))
-                       iap->ia_valid |= ATTR_MTIME;
-       }
+               if (iap->ia_size != i_size_read(inode) &&
+                   ((iap->ia_valid & ~(ATTR_SIZE | ATTR_MTIME)) == 0))
+                       implicit_mtime = true;
 
-       iap->ia_valid |= ATTR_CTIME;
+               host_err = vfs_truncate(&path, iap->ia_size);
+               if (host_err)
+                       goto out_host_err;
 
-       if (check_guard && guardtime != inode->i_ctime.tv_sec) {
-               err = nfserr_notsync;
-               goto out_put_write_access;
+               iap->ia_valid &= ~ATTR_SIZE;
+               if (implicit_mtime)
+                       iap->ia_valid &= ~ATTR_MTIME;
+               if (!iap->ia_valid)
+                       goto done;
        }
 
+       iap->ia_valid |= ATTR_CTIME;
+
        fh_lock(fhp);
        host_err = notify_change(dentry, iap, NULL);
        fh_unlock(fhp);
-       err = nfserrno(host_err);
+       if (host_err)
+               goto out_host_err;
 
-out_put_write_access:
-       if (size_change)
-               put_write_access(inode);
-       if (!err)
-               err = nfserrno(commit_metadata(fhp));
-out:
-       return err;
+done:
+       host_err = commit_metadata(fhp);
+out_host_err:
+       return nfserrno(host_err);
 }
 
 #if defined(CONFIG_NFSD_V4)
index a2066e6dee9058ac41b3c51cfcad83f0796e0598..2726536489b19a30394226a27e7a0183690e6b4c 100644 (file)
@@ -173,7 +173,8 @@ u64 stable_page_flags(struct page *page)
        u |= kpf_copy_bit(k, KPF_ACTIVE,        PG_active);
        u |= kpf_copy_bit(k, KPF_RECLAIM,       PG_reclaim);
 
-       u |= kpf_copy_bit(k, KPF_SWAPCACHE,     PG_swapcache);
+       if (PageSwapCache(page))
+               u |= 1 << KPF_SWAPCACHE;
        u |= kpf_copy_bit(k, KPF_SWAPBACKED,    PG_swapbacked);
 
        u |= kpf_copy_bit(k, KPF_UNEVICTABLE,   PG_unevictable);
index 63554e9f6e0c68595943e27d2734ad4fa8271007..719db1968d8177a91028fd5e8bd6068f5d71c491 100644 (file)
@@ -9,18 +9,15 @@
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 8
 #endif
-#ifndef KCRC_ALIGN
-#define KCRC_ALIGN 8
-#endif
 #else
 #define __put .long
 #ifndef KSYM_ALIGN
 #define KSYM_ALIGN 4
 #endif
+#endif
 #ifndef KCRC_ALIGN
 #define KCRC_ALIGN 4
 #endif
-#endif
 
 #ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
 #define KSYM(name) _##name
@@ -52,7 +49,11 @@ KSYM(__kstrtab_\name):
        .section ___kcrctab\sec+\name,"a"
        .balign KCRC_ALIGN
 KSYM(__kcrctab_\name):
-       __put KSYM(__crc_\name)
+#if defined(CONFIG_MODULE_REL_CRCS)
+       .long KSYM(__crc_\name) - .
+#else
+       .long KSYM(__crc_\name)
+#endif
        .weak KSYM(__crc_\name)
        .previous
 #endif
index 192016e2b5183c7a22fd13fd21372de5862f44d8..9c4ee144b5f6b799baf92a9722cddcaee3eb2b4f 100644 (file)
@@ -517,6 +517,7 @@ struct drm_device {
        struct drm_minor *control;              /**< Control node */
        struct drm_minor *primary;              /**< Primary node */
        struct drm_minor *render;               /**< Render node */
+       bool registered;
 
        /* currently active master for this device. Protected by master_mutex */
        struct drm_master *master;
index a9b95246e26efcf3d44cd5afc85f7031f0fa77fe..045a97cbeba24f44eb1b1d5582b4145801fcafdd 100644 (file)
@@ -381,6 +381,8 @@ struct drm_connector_funcs {
         * core drm connector interfaces. Everything added from this callback
         * should be unregistered in the early_unregister callback.
         *
+        * This is called while holding drm_connector->mutex.
+        *
         * Returns:
         *
         * 0 on success, or a negative error code on failure.
@@ -395,6 +397,8 @@ struct drm_connector_funcs {
         * late_register(). It is called from drm_connector_unregister(),
         * early in the driver unload sequence to disable userspace access
         * before data structures are torndown.
+        *
+        * This is called while holding drm_connector->mutex.
         */
        void (*early_unregister)(struct drm_connector *connector);
 
@@ -559,7 +563,6 @@ struct drm_cmdline_mode {
  * @interlace_allowed: can this connector handle interlaced modes?
  * @doublescan_allowed: can this connector handle doublescan?
  * @stereo_allowed: can this connector handle stereo modes?
- * @registered: is this connector exposed (registered) with userspace?
  * @modes: modes available on this connector (from fill_modes() + user)
  * @status: one of the drm_connector_status enums (connected, not, or unknown)
  * @probed_modes: list of modes derived directly from the display
@@ -607,6 +610,13 @@ struct drm_connector {
 
        char *name;
 
+       /**
+        * @mutex: Lock for general connector state, but currently only protects
+        * @registered. Most of the connector state is still protected by the
+        * mutex in &drm_mode_config.
+        */
+       struct mutex mutex;
+
        /**
         * @index: Compacted connector index, which matches the position inside
         * the mode_config.list for drivers not supporting hot-add/removing. Can
@@ -620,6 +630,10 @@ struct drm_connector {
        bool interlace_allowed;
        bool doublescan_allowed;
        bool stereo_allowed;
+       /**
+        * @registered: Is this connector exposed (registered) with userspace?
+        * Protected by @mutex.
+        */
        bool registered;
        struct list_head modes; /* list of modes on this connector */
 
index d936a0021839cca651e19ec43e71b8f21cb69cf0..921acaaa16017979df0722fb9803b204d77c0be0 100644 (file)
@@ -8,9 +8,7 @@ enum cpuhp_state {
        CPUHP_CREATE_THREADS,
        CPUHP_PERF_PREPARE,
        CPUHP_PERF_X86_PREPARE,
-       CPUHP_PERF_X86_UNCORE_PREP,
        CPUHP_PERF_X86_AMD_UNCORE_PREP,
-       CPUHP_PERF_X86_RAPL_PREP,
        CPUHP_PERF_BFIN,
        CPUHP_PERF_POWER,
        CPUHP_PERF_SUPERH,
@@ -86,7 +84,6 @@ enum cpuhp_state {
        CPUHP_AP_IRQ_ARMADA_XP_STARTING,
        CPUHP_AP_IRQ_BCM2836_STARTING,
        CPUHP_AP_ARM_MVEBU_COHERENCY,
-       CPUHP_AP_PERF_X86_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
        CPUHP_AP_PERF_X86_STARTING,
        CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
index 2a0f61fbc7310e61f5927c31250e208d217c3e26..1a1dfdb2a5c6d8806d11e2cd58722e304f293054 100644 (file)
@@ -43,12 +43,19 @@ extern struct module __this_module;
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
+#if defined(CONFIG_MODULE_REL_CRCS)
 #define __CRC_SYMBOL(sym, sec)                                         \
-       extern __visible void *__crc_##sym __attribute__((weak));       \
-       static const unsigned long __kcrctab_##sym                      \
-       __used                                                          \
-       __attribute__((section("___kcrctab" sec "+" #sym), used))       \
-       = (unsigned long) &__crc_##sym;
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n"     \
+           "   .previous                                       \n");
+#else
+#define __CRC_SYMBOL(sym, sec)                                         \
+       asm("   .section \"___kcrctab" sec "+" #sym "\", \"a\"  \n"     \
+           "   .weak   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .long   " VMLINUX_SYMBOL_STR(__crc_##sym) "     \n"     \
+           "   .previous                                       \n");
+#endif
 #else
 #define __CRC_SYMBOL(sym, sec)
 #endif
index 42fe43fb0c80605f9553c746f04310f43914f683..183efde54269e18c5d4d1eda7dc448717fe85800 100644 (file)
@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
        u32 ring_data_startoffset;
        u32 priv_write_index;
        u32 priv_read_index;
+       u32 cached_read_index;
 };
 
 /*
@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
        return write;
 }
 
+static inline u32 hv_get_cached_bytes_to_write(
+       const struct hv_ring_buffer_info *rbi)
+{
+       u32 read_loc, write_loc, dsize, write;
+
+       dsize = rbi->ring_datasize;
+       read_loc = rbi->cached_read_index;
+       write_loc = rbi->ring_buffer->write_index;
+
+       write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
+               read_loc - write_loc;
+       return write;
+}
 /*
  * VMBUS version is 32 bit entity broken up into
  * two 16 bit quantities: major_number. minor_number.
@@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
 
 static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 {
-       u32 cur_write_sz;
+       u32 cur_write_sz, cached_write_sz;
        u32 pending_sz;
        struct hv_ring_buffer_info *rbi = &channel->inbound;
 
@@ -1512,12 +1526,24 @@ static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 
        cur_write_sz = hv_get_bytes_to_write(rbi);
 
-       if (cur_write_sz >= pending_sz)
+       if (cur_write_sz < pending_sz)
+               return;
+
+       cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+       if (cached_write_sz < pending_sz)
                vmbus_setevent(channel);
 
        return;
 }
 
+static inline void
+init_cached_read_index(struct vmbus_channel *channel)
+{
+       struct hv_ring_buffer_info *rbi = &channel->inbound;
+
+       rbi->cached_read_index = rbi->ring_buffer->read_index;
+}
+
 /*
  * An API to support in-place processing of incoming VMBUS packets.
  */
@@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
  * This call commits the read index and potentially signals the host.
  * Here is the pattern for using the "in-place" consumption APIs:
  *
+ * init_cached_read_index();
+ *
  * while (get_next_pkt_raw() {
  *     process the packet "in-place";
  *     put_pkt_raw();
index e79875574b393f33ed183fdc8f44277a49c26ba8..39e3254e5769d7da1d0f74cdfdf90bd9b40cfc8a 100644 (file)
@@ -184,6 +184,7 @@ struct irq_data {
  *
  * IRQD_TRIGGER_MASK           - Mask for the trigger type bits
  * IRQD_SETAFFINITY_PENDING    - Affinity setting is pending
+ * IRQD_ACTIVATED              - Interrupt has already been activated
  * IRQD_NO_BALANCING           - Balancing disabled for this IRQ
  * IRQD_PER_CPU                        - Interrupt is per cpu
  * IRQD_AFFINITY_SET           - Interrupt affinity was set
@@ -202,6 +203,7 @@ struct irq_data {
 enum {
        IRQD_TRIGGER_MASK               = 0xf,
        IRQD_SETAFFINITY_PENDING        = (1 <<  8),
+       IRQD_ACTIVATED                  = (1 <<  9),
        IRQD_NO_BALANCING               = (1 << 10),
        IRQD_PER_CPU                    = (1 << 11),
        IRQD_AFFINITY_SET               = (1 << 12),
@@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
        return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
 }
 
+static inline bool irqd_is_activated(struct irq_data *d)
+{
+       return __irqd_to_state(d) & IRQD_ACTIVATED;
+}
+
+static inline void irqd_set_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) |= IRQD_ACTIVATED;
+}
+
+static inline void irqd_clr_activated(struct irq_data *d)
+{
+       __irqd_to_state(d) &= ~IRQD_ACTIVATED;
+}
+
 #undef __irqd_to_state
 
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
index fd7ff3d91e6a920ff084beca09d10b5b9abba981..ef3d4f67118ce0f60789e6e749a4773754e01e87 100644 (file)
@@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
  *  ... and so on.
  */
 
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+       return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
 
+#define order_base_2(n)                                \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               ((n) == 0 || (n) == 1) ? 0 :    \
+               ilog2((n) - 1) + 1) :           \
+       __order_base_2(n)                       \
+)
 #endif /* _LINUX_LOG2_H */
index c1784c0b4f3585e0d20ca8253813c31d47f11c04..134a2f69c21abf7921181af0adff033bb459edc5 100644 (file)
@@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
 extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
 /* VM interface that may be used by firmware interface */
 extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long, unsigned long);
+extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+       unsigned long *valid_start, unsigned long *valid_end);
 extern void __offline_isolated_pages(unsigned long, unsigned long);
 
 typedef void (*online_page_callback_t)(struct page *page);
index 7c84273d60b963d44c032761cac62194e82d1198..cc7cba219b207de5536f6f9e9353d1a20201e4af 100644 (file)
@@ -346,7 +346,7 @@ struct module {
 
        /* Exported symbols */
        const struct kernel_symbol *syms;
-       const unsigned long *crcs;
+       const s32 *crcs;
        unsigned int num_syms;
 
        /* Kernel parameters. */
@@ -359,18 +359,18 @@ struct module {
        /* GPL-only exported symbols. */
        unsigned int num_gpl_syms;
        const struct kernel_symbol *gpl_syms;
-       const unsigned long *gpl_crcs;
+       const s32 *gpl_crcs;
 
 #ifdef CONFIG_UNUSED_SYMBOLS
        /* unused exported symbols. */
        const struct kernel_symbol *unused_syms;
-       const unsigned long *unused_crcs;
+       const s32 *unused_crcs;
        unsigned int num_unused_syms;
 
        /* GPL-only, unused exported symbols. */
        unsigned int num_unused_gpl_syms;
        const struct kernel_symbol *unused_gpl_syms;
-       const unsigned long *unused_gpl_crcs;
+       const s32 *unused_gpl_crcs;
 #endif
 
 #ifdef CONFIG_MODULE_SIG
@@ -382,7 +382,7 @@ struct module {
 
        /* symbols that will be GPL-only in the near future. */
        const struct kernel_symbol *gpl_future_syms;
-       const unsigned long *gpl_future_crcs;
+       const s32 *gpl_future_crcs;
        unsigned int num_gpl_future_syms;
 
        /* Exception table */
@@ -523,7 +523,7 @@ struct module *find_module(const char *name);
 
 struct symsearch {
        const struct kernel_symbol *start, *stop;
-       const unsigned long *crcs;
+       const s32 *crcs;
        enum {
                NOT_GPL_ONLY,
                GPL_ONLY,
@@ -539,7 +539,7 @@ struct symsearch {
  */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn);
 
index 3ebb168b9afc68ad639b5d32f6182a845c83d759..a34b141f125f0032662f147b598c9fef4fb4bcef 100644 (file)
@@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        }
 
        for (opt_iter = 6; opt_iter < opt_len;) {
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto out;
+               }
                tag_len = opt[opt_iter + 1];
                if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
                        err_offset = opt_iter + 1;
index 6f83e78eaa5a8928b34ecc63d49b1baf19722c10..9ccefa5c5487863e69831c3bf45e4f0edf78eb65 100644 (file)
@@ -2035,7 +2035,9 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
 void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
-                       unsigned int flags);
+                       unsigned int flags,
+                       void (*destructor)(struct sock *sk,
+                                          struct sk_buff *skb));
 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
index 33496595064c1e5a7b656e06347aae5ef4e54562..61df8d392f41f4c197419b2260481d1a41897318 100644 (file)
@@ -25,14 +25,12 @@ struct ipv6_sr_hdr {
        __u8    type;
        __u8    segments_left;
        __u8    first_segment;
-       __u8    flag_1;
-       __u8    flag_2;
-       __u8    reserved;
+       __u8    flags;
+       __u16   reserved;
 
        struct in6_addr segments[0];
 };
 
-#define SR6_FLAG1_CLEANUP      (1 << 7)
 #define SR6_FLAG1_PROTECTED    (1 << 6)
 #define SR6_FLAG1_OAM          (1 << 5)
 #define SR6_FLAG1_ALERT                (1 << 4)
@@ -44,8 +42,7 @@ struct ipv6_sr_hdr {
 #define SR6_TLV_PADDING                4
 #define SR6_TLV_HMAC           5
 
-#define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP)
-#define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC)
+#define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC)
 
 struct sr6_tlv {
        __u8 type;
index e1a937348a3ed2bb3a76820e1ffa6a542f6aa9fb..4dd8bd232a1d4efd012fab8757887426ece9c0aa 100644 (file)
@@ -1987,6 +1987,10 @@ config MODVERSIONS
          make them incompatible with the kernel you are running.  If
          unsure, say N.
 
+config MODULE_REL_CRCS
+       bool
+       depends on MODVERSIONS
+
 config MODULE_SRCVERSION_ALL
        bool "Source checksum for all modules"
        help
index 110b38a58493ee4ba4c19763d2678dae8815e1af..e5aaa806702de888b63a82bdcb72766f7a317563 100644 (file)
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
-
        lockdep_assert_held(&ctx->lock);
 
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
 {
        struct perf_event *group_leader = event->group_leader, *pos;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double attach due to group movement in perf_event_open.
         */
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
        struct perf_event *sibling, *tmp;
        struct list_head *list = NULL;
 
+       lockdep_assert_held(&event->ctx->lock);
+
        /*
         * We can have double detach due to exit/hot-unplug + close.
         */
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
  */
 static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
 {
-       lockdep_assert_held(&event->ctx->mutex);
+       struct perf_event_context *ctx = event->ctx;
+
+       lockdep_assert_held(&ctx->mutex);
 
        event_function_call(event, __perf_remove_from_context, (void *)flags);
+
+       /*
+        * The above event_function_call() can NO-OP when it hits
+        * TASK_TOMBSTONE. In that case we must already have been detached
+        * from the context (by perf_event_exit_event()) but the grouping
+        * might still be in-tact.
+        */
+       WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
+       if ((flags & DETACH_GROUP) &&
+           (event->attach_state & PERF_ATTACH_GROUP)) {
+               /*
+                * Since in that case we cannot possibly be scheduled, simply
+                * detach now.
+                */
+               raw_spin_lock_irq(&ctx->lock);
+               perf_group_detach(event);
+               raw_spin_unlock_irq(&ctx->lock);
+       }
 }
 
 /*
@@ -6609,6 +6632,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        char *buf = NULL;
        char *name;
 
+       if (vma->vm_flags & VM_READ)
+               prot |= PROT_READ;
+       if (vma->vm_flags & VM_WRITE)
+               prot |= PROT_WRITE;
+       if (vma->vm_flags & VM_EXEC)
+               prot |= PROT_EXEC;
+
+       if (vma->vm_flags & VM_MAYSHARE)
+               flags = MAP_SHARED;
+       else
+               flags = MAP_PRIVATE;
+
+       if (vma->vm_flags & VM_DENYWRITE)
+               flags |= MAP_DENYWRITE;
+       if (vma->vm_flags & VM_MAYEXEC)
+               flags |= MAP_EXECUTABLE;
+       if (vma->vm_flags & VM_LOCKED)
+               flags |= MAP_LOCKED;
+       if (vma->vm_flags & VM_HUGETLB)
+               flags |= MAP_HUGETLB;
+
        if (file) {
                struct inode *inode;
                dev_t dev;
@@ -6635,27 +6679,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                maj = MAJOR(dev);
                min = MINOR(dev);
 
-               if (vma->vm_flags & VM_READ)
-                       prot |= PROT_READ;
-               if (vma->vm_flags & VM_WRITE)
-                       prot |= PROT_WRITE;
-               if (vma->vm_flags & VM_EXEC)
-                       prot |= PROT_EXEC;
-
-               if (vma->vm_flags & VM_MAYSHARE)
-                       flags = MAP_SHARED;
-               else
-                       flags = MAP_PRIVATE;
-
-               if (vma->vm_flags & VM_DENYWRITE)
-                       flags |= MAP_DENYWRITE;
-               if (vma->vm_flags & VM_MAYEXEC)
-                       flags |= MAP_EXECUTABLE;
-               if (vma->vm_flags & VM_LOCKED)
-                       flags |= MAP_LOCKED;
-               if (vma->vm_flags & VM_HUGETLB)
-                       flags |= MAP_HUGETLB;
-
                goto got_name;
        } else {
                if (vma->vm_ops && vma->vm_ops->name) {
index 8c0a0ae43521c7f8b9e97964912cc3ab5e10fd15..b59e6768c5e94ad831e78b539437cfd99576e780 100644 (file)
@@ -1346,6 +1346,30 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
 }
 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
 
+static void __irq_domain_activate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (irq_data->parent_data)
+                       __irq_domain_activate_irq(irq_data->parent_data);
+               if (domain->ops->activate)
+                       domain->ops->activate(domain, irq_data);
+       }
+}
+
+static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
+{
+       if (irq_data && irq_data->domain) {
+               struct irq_domain *domain = irq_data->domain;
+
+               if (domain->ops->deactivate)
+                       domain->ops->deactivate(domain, irq_data);
+               if (irq_data->parent_data)
+                       __irq_domain_deactivate_irq(irq_data->parent_data);
+       }
+}
+
 /**
  * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  *                          interrupt
@@ -1356,13 +1380,9 @@ EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  */
 void irq_domain_activate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (irq_data->parent_data)
-                       irq_domain_activate_irq(irq_data->parent_data);
-               if (domain->ops->activate)
-                       domain->ops->activate(domain, irq_data);
+       if (!irqd_is_activated(irq_data)) {
+               __irq_domain_activate_irq(irq_data);
+               irqd_set_activated(irq_data);
        }
 }
 
@@ -1376,13 +1396,9 @@ void irq_domain_activate_irq(struct irq_data *irq_data)
  */
 void irq_domain_deactivate_irq(struct irq_data *irq_data)
 {
-       if (irq_data && irq_data->domain) {
-               struct irq_domain *domain = irq_data->domain;
-
-               if (domain->ops->deactivate)
-                       domain->ops->deactivate(domain, irq_data);
-               if (irq_data->parent_data)
-                       irq_domain_deactivate_irq(irq_data->parent_data);
+       if (irqd_is_activated(irq_data)) {
+               __irq_domain_deactivate_irq(irq_data);
+               irqd_clr_activated(irq_data);
        }
 }
 
index 38d4270925d4d13619d725052aa3f9844f23bc96..3d8f126208e3ae04eeff3fd1b1e00044c0e3d0d2 100644 (file)
@@ -389,16 +389,16 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_gpl[];
 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
-extern const unsigned long __start___kcrctab[];
-extern const unsigned long __start___kcrctab_gpl[];
-extern const unsigned long __start___kcrctab_gpl_future[];
+extern const s32 __start___kcrctab[];
+extern const s32 __start___kcrctab_gpl[];
+extern const s32 __start___kcrctab_gpl_future[];
 #ifdef CONFIG_UNUSED_SYMBOLS
 extern const struct kernel_symbol __start___ksymtab_unused[];
 extern const struct kernel_symbol __stop___ksymtab_unused[];
 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
-extern const unsigned long __start___kcrctab_unused[];
-extern const unsigned long __start___kcrctab_unused_gpl[];
+extern const s32 __start___kcrctab_unused[];
+extern const s32 __start___kcrctab_unused_gpl[];
 #endif
 
 #ifndef CONFIG_MODVERSIONS
@@ -497,7 +497,7 @@ struct find_symbol_arg {
 
        /* Output */
        struct module *owner;
-       const unsigned long *crc;
+       const s32 *crc;
        const struct kernel_symbol *sym;
 };
 
@@ -563,7 +563,7 @@ static bool find_symbol_in_section(const struct symsearch *syms,
  * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 const struct kernel_symbol *find_symbol(const char *name,
                                        struct module **owner,
-                                       const unsigned long **crc,
+                                       const s32 **crc,
                                        bool gplok,
                                        bool warn)
 {
@@ -1249,23 +1249,17 @@ static int try_to_force_load(struct module *mod, const char *reason)
 }
 
 #ifdef CONFIG_MODVERSIONS
-/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
-static unsigned long maybe_relocated(unsigned long crc,
-                                    const struct module *crc_owner)
+
+static u32 resolve_rel_crc(const s32 *crc)
 {
-#ifdef ARCH_RELOCATES_KCRCTAB
-       if (crc_owner == NULL)
-               return crc - (unsigned long)reloc_start;
-#endif
-       return crc;
+       return *(u32 *)((void *)crc + *crc);
 }
 
 static int check_version(Elf_Shdr *sechdrs,
                         unsigned int versindex,
                         const char *symname,
                         struct module *mod,
-                        const unsigned long *crc,
-                        const struct module *crc_owner)
+                        const s32 *crc)
 {
        unsigned int i, num_versions;
        struct modversion_info *versions;
@@ -1283,13 +1277,19 @@ static int check_version(Elf_Shdr *sechdrs,
                / sizeof(struct modversion_info);
 
        for (i = 0; i < num_versions; i++) {
+               u32 crcval;
+
                if (strcmp(versions[i].name, symname) != 0)
                        continue;
 
-               if (versions[i].crc == maybe_relocated(*crc, crc_owner))
+               if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
+                       crcval = resolve_rel_crc(crc);
+               else
+                       crcval = *crc;
+               if (versions[i].crc == crcval)
                        return 1;
-               pr_debug("Found checksum %lX vs module %lX\n",
-                      maybe_relocated(*crc, crc_owner), versions[i].crc);
+               pr_debug("Found checksum %X vs module %lX\n",
+                        crcval, versions[i].crc);
                goto bad_version;
        }
 
@@ -1307,7 +1307,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
                                          unsigned int versindex,
                                          struct module *mod)
 {
-       const unsigned long *crc;
+       const s32 *crc;
 
        /*
         * Since this should be found in kernel (which can't be removed), no
@@ -1321,8 +1321,7 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
        }
        preempt_enable();
        return check_version(sechdrs, versindex,
-                            VMLINUX_SYMBOL_STR(module_layout), mod, crc,
-                            NULL);
+                            VMLINUX_SYMBOL_STR(module_layout), mod, crc);
 }
 
 /* First part is kernel version, which we ignore if module has crcs. */
@@ -1340,8 +1339,7 @@ static inline int check_version(Elf_Shdr *sechdrs,
                                unsigned int versindex,
                                const char *symname,
                                struct module *mod,
-                               const unsigned long *crc,
-                               const struct module *crc_owner)
+                               const s32 *crc)
 {
        return 1;
 }
@@ -1368,7 +1366,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
 {
        struct module *owner;
        const struct kernel_symbol *sym;
-       const unsigned long *crc;
+       const s32 *crc;
        int err;
 
        /*
@@ -1383,8 +1381,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
        if (!sym)
                goto unlock;
 
-       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
-                          owner)) {
+       if (!check_version(info->sechdrs, info->index.vers, name, mod, crc)) {
                sym = ERR_PTR(-EINVAL);
                goto getname;
        }
index a133ecd741e437d938ca377cf5e4358bcb586aa4..7ad9e53ad174bc6cdb0f99490f87e419e9381b02 100644 (file)
@@ -1372,7 +1372,7 @@ kprobe_trace_selftest_target(int a1, int a2, int a3, int a4, int a5, int a6)
        return a1 + a2 + a3 + a4 + a5 + a6;
 }
 
-static struct __init trace_event_file *
+static __init struct trace_event_file *
 find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
 {
        struct trace_event_file *file;
index b772a33ef640ab0d6770bb3d249a6fe6f16eeebc..3f9afded581be1a013bda4db2c0ec3a721323364 100644 (file)
@@ -1791,6 +1791,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
 
                cond_resched();
 find_page:
+               if (fatal_signal_pending(current)) {
+                       error = -EINTR;
+                       goto out;
+               }
+
                page = find_get_page(mapping, index);
                if (!page) {
                        page_cache_sync_readahead(mapping,
index b82b3e2151574ae1abbb2cd57624114727975410..f479365530b6484bbd5cae42064521fed362961e 100644 (file)
@@ -13,6 +13,7 @@
  *
  */
 
+#include <linux/ftrace.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/printk.h>
@@ -300,6 +301,8 @@ void kasan_report(unsigned long addr, size_t size,
        if (likely(!kasan_report_enabled()))
                return;
 
+       disable_trace_on_warning();
+
        info.access_addr = (void *)addr;
        info.access_size = size;
        info.is_write = is_write;
index ca2723d4733849eab01b323a50e6b1bc609e308c..b8c11e063ff0746316fb792f4fe1dde0094cb828 100644 (file)
@@ -1483,17 +1483,20 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) is belongs to the same zone.
+ * Confirm all pages in a range [start, end) belong to the same zone.
+ * When true, return its valid [start, end).
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned long *valid_start, unsigned long *valid_end)
 {
        unsigned long pfn, sec_end_pfn;
+       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
-       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
+       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
             pfn < end_pfn;
-            pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
+            pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
                /* Make sure the memory section is present first */
                if (!present_section_nr(pfn_to_section_nr(pfn)))
                        continue;
@@ -1509,10 +1512,20 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
                                return 0;
+                       if (!zone)
+                               start = pfn + i;
                        zone = page_zone(page);
+                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
-       return 1;
+
+       if (zone) {
+               *valid_start = start;
+               *valid_end = end;
+               return 1;
+       } else {
+               return 0;
+       }
 }
 
 /*
@@ -1839,6 +1852,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        long offlined_pages;
        int ret, drain, retry_max, node;
        unsigned long flags;
+       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
 
@@ -1849,10 +1863,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
                return -EINVAL;
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return -EINVAL;
 
-       zone = page_zone(pfn_to_page(start_pfn));
+       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
        nr_pages = end_pfn - start_pfn;
 
index bb53285a1d99666676e85697330f1a052f7c3cc0..3a7587a0314dc73fb4929a824a74f9b8948ea502 100644 (file)
@@ -415,6 +415,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                struct shrink_control *sc, unsigned long nr_to_split)
 {
        LIST_HEAD(list), *pos, *next;
+       LIST_HEAD(to_remove);
        struct inode *inode;
        struct shmem_inode_info *info;
        struct page *page;
@@ -441,9 +442,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
                /* Check if there's anything to gain */
                if (round_up(inode->i_size, PAGE_SIZE) ==
                                round_up(inode->i_size, HPAGE_PMD_SIZE)) {
-                       list_del_init(&info->shrinklist);
+                       list_move(&info->shrinklist, &to_remove);
                        removed++;
-                       iput(inode);
                        goto next;
                }
 
@@ -454,6 +454,13 @@ next:
        }
        spin_unlock(&sbinfo->shrinklist_lock);
 
+       list_for_each_safe(pos, next, &to_remove) {
+               info = list_entry(pos, struct shmem_inode_info, shrinklist);
+               inode = &info->vfs_inode;
+               list_del_init(&info->shrinklist);
+               iput(inode);
+       }
+
        list_for_each_safe(pos, next, &list) {
                int ret;
 
index 067a0d62f31841d16913d36e38531a277ab59b01..cabf09e0128beebdee2b8a959361fe6464fb3469 100644 (file)
@@ -78,7 +78,13 @@ static u64 zswap_duplicate_entry;
 
 /* Enable/disable zswap (disabled by default) */
 static bool zswap_enabled;
-module_param_named(enabled, zswap_enabled, bool, 0644);
+static int zswap_enabled_param_set(const char *,
+                                  const struct kernel_param *);
+static struct kernel_param_ops zswap_enabled_param_ops = {
+       .set =          zswap_enabled_param_set,
+       .get =          param_get_bool,
+};
+module_param_cb(enabled, &zswap_enabled_param_ops, &zswap_enabled, 0644);
 
 /* Crypto compressor to use */
 #define ZSWAP_COMPRESSOR_DEFAULT "lzo"
@@ -176,6 +182,9 @@ static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 /* used by param callback function */
 static bool zswap_init_started;
 
+/* fatal error during init */
+static bool zswap_init_failed;
+
 /*********************************
 * helpers and fwd declarations
 **********************************/
@@ -624,6 +633,11 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
        char *s = strstrip((char *)val);
        int ret;
 
+       if (zswap_init_failed) {
+               pr_err("can't set param, initialization failed\n");
+               return -ENODEV;
+       }
+
        /* no change required */
        if (!strcmp(s, *(char **)kp->arg))
                return 0;
@@ -703,6 +717,17 @@ static int zswap_zpool_param_set(const char *val,
        return __zswap_param_set(val, kp, NULL, zswap_compressor);
 }
 
+static int zswap_enabled_param_set(const char *val,
+                                  const struct kernel_param *kp)
+{
+       if (zswap_init_failed) {
+               pr_err("can't enable, initialization failed\n");
+               return -ENODEV;
+       }
+
+       return param_set_bool(val, kp);
+}
+
 /*********************************
 * writeback code
 **********************************/
@@ -1201,6 +1226,9 @@ hp_fail:
 dstmem_fail:
        zswap_entry_cache_destroy();
 cache_fail:
+       /* if built-in, we aren't unloaded on failure; don't allow use */
+       zswap_init_failed = true;
+       zswap_enabled = false;
        return -ENOMEM;
 }
 /* must be late so crypto has time to come up */
index 662bea5871656f190a61e35b3c5cd21c2f132441..ea633342ab0d046cbc49e55b679440ef9e015c2d 100644 (file)
@@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
 EXPORT_SYMBOL(__skb_free_datagram_locked);
 
 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
-                       unsigned int flags)
+                       unsigned int flags,
+                       void (*destructor)(struct sock *sk,
+                                          struct sk_buff *skb))
 {
        int err = 0;
 
@@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
                if (skb == skb_peek(&sk->sk_receive_queue)) {
                        __skb_unlink(skb, &sk->sk_receive_queue);
                        atomic_dec(&skb->users);
+                       if (destructor)
+                               destructor(sk, skb);
                        err = 0;
                }
                spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
 
 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
 {
-       int err = __sk_queue_drop_skb(sk, skb, flags);
+       int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
 
        kfree_skb(skb);
        sk_mem_reclaim_partial(sk);
index 3e1a60102e64d1040867181a03605b8eca508557..0921609dfa81b70a13e0b4ca7852fde6ded7ed82 100644 (file)
@@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
 
 static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
-/* We are not allowed to call static_key_slow_dec() from irq context
- * If net_disable_timestamp() is called from irq context, defer the
- * static_key_slow_dec() calls.
- */
 static atomic_t netstamp_needed_deferred;
-#endif
-
-void net_enable_timestamp(void)
+static void netstamp_clear(struct work_struct *work)
 {
-#ifdef HAVE_JUMP_LABEL
        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
 
-       if (deferred) {
-               while (--deferred)
-                       static_key_slow_dec(&netstamp_needed);
-               return;
-       }
+       while (deferred--)
+               static_key_slow_dec(&netstamp_needed);
+}
+static DECLARE_WORK(netstamp_work, netstamp_clear);
 #endif
+
+void net_enable_timestamp(void)
+{
        static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
@@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
 void net_disable_timestamp(void)
 {
 #ifdef HAVE_JUMP_LABEL
-       if (in_interrupt()) {
-               atomic_inc(&netstamp_needed_deferred);
-               return;
-       }
-#endif
+       /* net_disable_timestamp() can be called from non process context */
+       atomic_inc(&netstamp_needed_deferred);
+       schedule_work(&netstamp_work);
+#else
        static_key_slow_dec(&netstamp_needed);
+#endif
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
index d5f412b3093d4b3ccef40d4808bedf353f483594..be7bab1adcde3d2f3228191163c55ca898d08e2f 100644 (file)
@@ -1404,9 +1404,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
        if (regs.len > reglen)
                regs.len = reglen;
 
-       regbuf = vzalloc(reglen);
-       if (reglen && !regbuf)
-               return -ENOMEM;
+       regbuf = NULL;
+       if (reglen) {
+               regbuf = vzalloc(reglen);
+               if (!regbuf)
+                       return -ENOMEM;
+       }
 
        ops->get_regs(dev, &regs, regbuf);
 
index 72d6f056d863603c959e1d04b9f863909a37c758..ae206163c273381ba6e8bd8a24fa050619a4a6ae 100644 (file)
@@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
                                goto validate_return_locked;
                        }
 
+               if (opt_iter + 1 == opt_len) {
+                       err_offset = opt_iter;
+                       goto validate_return_locked;
+               }
                tag_len = tag[1];
                if (tag_len > (opt_len - opt_iter)) {
                        err_offset = opt_iter + 1;
index 8a4409dd390aac7fcb88383af1550a2f967dc3df..ce1386a67e2434203fdffe126958483ffc69fabd 100644 (file)
@@ -1243,7 +1243,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
                pktinfo->ipi_ifindex = 0;
                pktinfo->ipi_spec_dst.s_addr = 0;
        }
-       skb_dst_drop(skb);
+       /* We need to keep the dst for __ip_options_echo()
+        * We could restrict the test to opt.ts_needtime || opt.srr,
+        * but the following is good enough as IP options are not often used.
+        */
+       if (unlikely(IPCB(skb)->opt.optlen))
+               skb_dst_force(skb);
+       else
+               skb_dst_drop(skb);
 }
 
 int ip_setsockopt(struct sock *sk, int level,
index b751abc56935efa4e7e3ca9aa71e478f60b4b3fd..d44a6989e76d69aa44e2a26d37b4204376c94966 100644 (file)
@@ -773,6 +773,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                                ret = -EAGAIN;
                                break;
                        }
+                       /* if __tcp_splice_read() got nothing while we have
+                        * an skb in receive queue, we do not want to loop.
+                        * This might happen with URG data.
+                        */
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               break;
                        sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
index 4a1ba04565d11539d2be1fd6de968e826eb5cbce..ea6e4cff9fafe99af23fd8ea666cd979d5af9104 100644 (file)
@@ -1490,7 +1490,7 @@ try_again:
        return err;
 
 csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
                UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        }
index 156ed578d3c09547d031963b268bb284d593824e..a69ae7d4e6f8178a3d476c897a459671559ed85a 100644 (file)
@@ -3393,9 +3393,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        }
 
                        if (idev) {
-                               if (idev->if_flags & IF_READY)
-                                       /* device is already configured. */
+                               if (idev->if_flags & IF_READY) {
+                                       /* device is already configured -
+                                        * but resend MLD reports, we might
+                                        * have roamed and need to update
+                                        * multicast snooping switches
+                                        */
+                                       ipv6_mc_up(idev);
                                        break;
+                               }
                                idev->if_flags |= IF_READY;
                        }
 
index e4198502fd98ce55c1ab6d4d9767b8506b2cf707..275cac628a95066f0a27e93f5015ddeb0172c28c 100644 (file)
@@ -327,7 +327,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
        struct ipv6_sr_hdr *hdr;
        struct inet6_dev *idev;
        struct in6_addr *addr;
-       bool cleanup = false;
        int accept_seg6;
 
        hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
@@ -351,11 +350,7 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
 #endif
 
 looped_back:
-       if (hdr->segments_left > 0) {
-               if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 &&
-                   sr_has_cleanup(hdr))
-                       cleanup = true;
-       } else {
+       if (hdr->segments_left == 0) {
                if (hdr->nexthdr == NEXTHDR_IPV6) {
                        int offset = (hdr->hdrlen + 1) << 3;
 
@@ -418,21 +413,6 @@ looped_back:
 
        ipv6_hdr(skb)->daddr = *addr;
 
-       if (cleanup) {
-               int srhlen = (hdr->hdrlen + 1) << 3;
-               int nh = hdr->nexthdr;
-
-               skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen);
-               memmove(skb_network_header(skb) + srhlen,
-                       skb_network_header(skb),
-                       (unsigned char *)hdr - skb_network_header(skb));
-               skb->network_header += srhlen;
-               ipv6_hdr(skb)->nexthdr = nh;
-               ipv6_hdr(skb)->payload_len = htons(skb->len -
-                                                  sizeof(struct ipv6hdr));
-               skb_push_rcsum(skb, sizeof(struct ipv6hdr));
-       }
-
        skb_dst_drop(skb);
 
        ip6_route_input(skb);
@@ -453,13 +433,8 @@ looped_back:
                }
                ipv6_hdr(skb)->hop_limit--;
 
-               /* be sure that srh is still present before reinjecting */
-               if (!cleanup) {
-                       skb_pull(skb, sizeof(struct ipv6hdr));
-                       goto looped_back;
-               }
-               skb_set_transport_header(skb, sizeof(struct ipv6hdr));
-               IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
+               skb_pull(skb, sizeof(struct ipv6hdr));
+               goto looped_back;
        }
 
        dst_input(skb);
index 51b9835b3176ac2d05639fd318af89788655dd69..6fcb7cb49bb20dcfe518177177e3e0ac1c7d1091 100644 (file)
@@ -367,35 +367,37 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 
 
 static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-               u8 type, u8 code, int offset, __be32 info)
+                      u8 type, u8 code, int offset, __be32 info)
 {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
-       __be16 *p = (__be16 *)(skb->data + offset);
-       int grehlen = offset + 4;
+       const struct gre_base_hdr *greh;
+       const struct ipv6hdr *ipv6h;
+       int grehlen = sizeof(*greh);
        struct ip6_tnl *t;
+       int key_off = 0;
        __be16 flags;
+       __be32 key;
 
-       flags = p[0];
-       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
-               if (flags&(GRE_VERSION|GRE_ROUTING))
-                       return;
-               if (flags&GRE_KEY) {
-                       grehlen += 4;
-                       if (flags&GRE_CSUM)
-                               grehlen += 4;
-               }
+       if (!pskb_may_pull(skb, offset + grehlen))
+               return;
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       flags = greh->flags;
+       if (flags & (GRE_VERSION | GRE_ROUTING))
+               return;
+       if (flags & GRE_CSUM)
+               grehlen += 4;
+       if (flags & GRE_KEY) {
+               key_off = grehlen + offset;
+               grehlen += 4;
        }
 
-       /* If only 8 bytes returned, keyed message will be dropped here */
-       if (!pskb_may_pull(skb, grehlen))
+       if (!pskb_may_pull(skb, offset + grehlen))
                return;
        ipv6h = (const struct ipv6hdr *)skb->data;
-       p = (__be16 *)(skb->data + offset);
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
 
        t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
-                               flags & GRE_KEY ?
-                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
-                               p[1]);
+                                key, greh->protocol);
        if (!t)
                return;
 
index b274f1d95e037c93e8e0e6531cfc51709f71297c..f950cb53d5e3c9b460bff5f72f108af4f1b2d29a 100644 (file)
@@ -174,7 +174,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
         * hash function (RadioGatun) with up to 1216 bits
         */
 
-       /* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */
+       /* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */
        plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16;
 
        /* this limit allows for 14 segments */
@@ -186,7 +186,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
         *
         * 1. Source IPv6 address (128 bits)
         * 2. first_segment value (8 bits)
-        * 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0)
+        * 3. Flags (8 bits)
         * 4. HMAC Key ID (32 bits)
         * 5. All segments in the segments list (n * 128 bits)
         */
@@ -202,8 +202,8 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
        /* first_segment value */
        *off++ = hdr->first_segment;
 
-       /* cleanup flag */
-       *off++ = !!(sr_has_cleanup(hdr)) << 7;
+       /* flags */
+       *off++ = hdr->flags;
 
        /* HMAC Key ID */
        memcpy(off, &hmackeyid, 4);
index 6b9fc63fd4d23f17a6716b3557098e09f4f1d439..b5d27212db2fe6d2147719c18850263cd9ecb611 100644 (file)
@@ -996,6 +996,16 @@ drop:
        return 0; /* don't send reset */
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+}
+
 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                         struct request_sock *req,
                                         struct dst_entry *dst,
@@ -1184,8 +1194,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                                                      sk_gfp_mask(sk, GFP_ATOMIC));
                        consume_skb(ireq->pktopts);
                        ireq->pktopts = NULL;
-                       if (newnp->pktoptions)
+                       if (newnp->pktoptions) {
+                               tcp_v6_restore_cb(newnp->pktoptions);
                                skb_set_owner_r(newnp->pktoptions, newsk);
+                       }
                }
        }
 
@@ -1200,16 +1212,6 @@ out:
        return NULL;
 }
 
-static void tcp_v6_restore_cb(struct sk_buff *skb)
-{
-       /* We need to move header back to the beginning if xfrm6_policy_check()
-        * and tcp_v6_fill_cb() are going to be called again.
-        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
-        */
-       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-               sizeof(struct inet6_skb_parm));
-}
-
 /* The socket must have it's spinlock held when we get
  * here, unless it is a TCP_LISTEN socket.
  *
index 51346fa70298a0864e3997a6c94845a5707fa305..df71ba05f41d646d461d2b67c5ad2909ac2a74c7 100644 (file)
@@ -454,7 +454,7 @@ try_again:
        return err;
 
 csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                if (is_udp4) {
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_CSUMERRORS, is_udplite);
index ecfdd97758a386ed9d642a3fc141f2ec6da13db2..5c3af5eb405232167bbd62a13b4d1f37370d6bc0 100644 (file)
@@ -124,7 +124,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len,
 
        /* CTR */
 
-       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
+       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm2)) {
                kfree(tmp);
                return PTR_ERR(tfm2);
@@ -183,7 +183,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len,
 
        /* CTR */
 
-       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
+       tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm2))
                return PTR_ERR(tfm2);
        /* K2 for CTR */
@@ -272,7 +272,7 @@ int fils_encrypt_assoc_req(struct sk_buff *skb,
        crypt_len = skb->data + skb->len - encr;
        skb_put(skb, AES_BLOCK_SIZE);
        return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
-                              encr, crypt_len, 1, addr, len, encr);
+                              encr, crypt_len, 5, addr, len, encr);
 }
 
 int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
index 9c23172feba07abd63d7fdf2ab1530cbca28b8b9..c28b0af9c1f21735915433aecd632165a4a82580 100644 (file)
@@ -335,7 +335,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
        /* fast-forward to vendor IEs */
        offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
 
-       if (offset) {
+       if (offset < ifmsh->ie_len) {
                len = ifmsh->ie_len - offset;
                data = ifmsh->ie + offset;
                if (skb_tailroom(skb) < len)
index a4609a0be76dd46ec7d1d9eabe5d72b8c1b841b3..a8b4252fe0842882fa4a067a25204689cd1b12cd 100644 (file)
@@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
        union sctp_addr *laddr = (union sctp_addr *)addr;
        struct sctp_transport *transport;
 
-       if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+       if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
                return NULL;
 
        addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
@@ -7540,7 +7540,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
                 */
                release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
-               BUG_ON(sk != asoc->base.sk);
+               if (sk != asoc->base.sk)
+                       goto do_error;
                lock_sock(sk);
 
                *timeo_p = current_timeo;
index dc6fb79a361f1ca3ab9869fc02ba05c1a533ad9b..25d9a9cf7b66b7f4e501d38d91f6a1908830972e 100644 (file)
@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
        if (!oa->data)
                return -ENOMEM;
 
-       creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
+       creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
        if (!creds) {
                kfree(oa->data);
                return -ENOMEM;
index 63dfa60a29ef940cc4a2a0ca92a98bd5757b8dbe..3aee94b0c6c51657bb49b9fb878667a7b86ae616 100644 (file)
@@ -5921,6 +5921,7 @@ do {                                                                          \
                        break;
                }
                cfg->ht_opmode = ht_opmode;
+               mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
                                  1, 65535, mask,
index eadcd4d359d91fc7823a75263c44c520e05f900b..d883116ebaa452d9c2f6c657de53121ebd9d50bd 100644 (file)
@@ -164,6 +164,7 @@ cmd_gensymtypes_c =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) $< |                                   \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
@@ -337,6 +338,7 @@ cmd_gensymtypes_S =                                                         \
     $(CPP) -D__GENKSYMS__ $(c_flags) -xc - |                                \
     $(GENKSYMS) $(if $(1), -T $(2))                                         \
      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+     $(patsubst y,-R,$(CONFIG_MODULE_REL_CRCS))                             \
      $(if $(KBUILD_PRESERVE),-p)                                            \
      -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
 
index 06121ce524a76006072459d352d727b4aebdf203..c9235d8340f1e7ba33eacfaee94642c18f5fd211 100644 (file)
@@ -44,7 +44,7 @@ char *cur_filename, *source_file;
 int in_source_file;
 
 static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
-          flag_preserve, flag_warnings;
+          flag_preserve, flag_warnings, flag_rel_crcs;
 static const char *mod_prefix = "";
 
 static int errors;
@@ -693,7 +693,10 @@ void export_symbol(const char *name)
                        fputs(">\n", debugfile);
 
                /* Used as a linker script. */
-               printf("%s__crc_%s = 0x%08lx ;\n", mod_prefix, name, crc);
+               printf(!flag_rel_crcs ? "%s__crc_%s = 0x%08lx;\n" :
+                      "SECTIONS { .rodata : ALIGN(4) { "
+                      "%s__crc_%s = .; LONG(0x%08lx); } }\n",
+                      mod_prefix, name, crc);
        }
 }
 
@@ -730,7 +733,7 @@ void error_with_pos(const char *fmt, ...)
 
 static void genksyms_usage(void)
 {
-       fputs("Usage:\n" "genksyms [-adDTwqhV] > /path/to/.tmp_obj.ver\n" "\n"
+       fputs("Usage:\n" "genksyms [-adDTwqhVR] > /path/to/.tmp_obj.ver\n" "\n"
 #ifdef __GNU_LIBRARY__
              "  -s, --symbol-prefix   Select symbol prefix\n"
              "  -d, --debug           Increment the debug level (repeatable)\n"
@@ -742,6 +745,7 @@ static void genksyms_usage(void)
              "  -q, --quiet           Disable warnings (default)\n"
              "  -h, --help            Print this message\n"
              "  -V, --version         Print the release version\n"
+             "  -R, --relative-crc    Emit section relative symbol CRCs\n"
 #else                          /* __GNU_LIBRARY__ */
              "  -s                    Select symbol prefix\n"
              "  -d                    Increment the debug level (repeatable)\n"
@@ -753,6 +757,7 @@ static void genksyms_usage(void)
              "  -q                    Disable warnings (default)\n"
              "  -h                    Print this message\n"
              "  -V                    Print the release version\n"
+             "  -R                    Emit section relative symbol CRCs\n"
 #endif                         /* __GNU_LIBRARY__ */
              , stderr);
 }
@@ -774,13 +779,14 @@ int main(int argc, char **argv)
                {"preserve", 0, 0, 'p'},
                {"version", 0, 0, 'V'},
                {"help", 0, 0, 'h'},
+               {"relative-crc", 0, 0, 'R'},
                {0, 0, 0, 0}
        };
 
-       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:ph",
+       while ((o = getopt_long(argc, argv, "s:dwqVDr:T:phR",
                                &long_opts[0], NULL)) != EOF)
 #else                          /* __GNU_LIBRARY__ */
-       while ((o = getopt(argc, argv, "s:dwqVDr:T:ph")) != EOF)
+       while ((o = getopt(argc, argv, "s:dwqVDr:T:phR")) != EOF)
 #endif                         /* __GNU_LIBRARY__ */
                switch (o) {
                case 's':
@@ -823,6 +829,9 @@ int main(int argc, char **argv)
                case 'h':
                        genksyms_usage();
                        return 0;
+               case 'R':
+                       flag_rel_crcs = 1;
+                       break;
                default:
                        genksyms_usage();
                        return 1;
index 299b92ca1ae092d82e9a0e3bffaec45988ebcc37..5d554419170b7d54ec82ddb1d31093d3eab0aa7d 100644 (file)
@@ -219,6 +219,10 @@ static int symbol_valid(struct sym_entry *s)
                "_SDA2_BASE_",          /* ppc */
                NULL };
 
+       static char *special_prefixes[] = {
+               "__crc_",               /* modversions */
+               NULL };
+
        static char *special_suffixes[] = {
                "_veneer",              /* arm */
                "_from_arm",            /* arm */
@@ -259,6 +263,14 @@ static int symbol_valid(struct sym_entry *s)
                if (strcmp(sym_name, special_symbols[i]) == 0)
                        return 0;
 
+       for (i = 0; special_prefixes[i]; i++) {
+               int l = strlen(special_prefixes[i]);
+
+               if (l <= strlen(sym_name) &&
+                   strncmp(sym_name, special_prefixes[i], l) == 0)
+                       return 0;
+       }
+
        for (i = 0; special_suffixes[i]; i++) {
                int l = strlen(sym_name) - strlen(special_suffixes[i]);
 
index 29c89a6bad3d3ac34e539189e83769f1c63ddab3..4dedd0d3d3a7fda58af2bc6150b9f6b6195d2cac 100644 (file)
@@ -621,6 +621,16 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        if (strncmp(symname, CRC_PFX, strlen(CRC_PFX)) == 0) {
                is_crc = true;
                crc = (unsigned int) sym->st_value;
+               if (sym->st_shndx != SHN_UNDEF && sym->st_shndx != SHN_ABS) {
+                       unsigned int *crcp;
+
+                       /* symbol points to the CRC in the ELF object */
+                       crcp = (void *)info->hdr + sym->st_value +
+                              info->sechdrs[sym->st_shndx].sh_offset -
+                              (info->hdr->e_type != ET_REL ?
+                               info->sechdrs[sym->st_shndx].sh_addr : 0);
+                       crc = *crcp;
+               }
                sym_update_crc(symname + strlen(CRC_PFX), mod, crc,
                                export);
        }
index 5e0dea2cdc01f65849f49f10392293a21b3a468d..039636ffb6c8a3edb6c14fd9a2b3a854ab84f982 100644 (file)
@@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
                *type = INSN_RETURN;
                break;
 
-       case 0xc5: /* iret */
        case 0xca: /* retf */
        case 0xcb: /* retf */
+       case 0xcf: /* iret */
                *type = INSN_CONTEXT_SWITCH;
                break;