From: Linus Torvalds Date: Wed, 28 Oct 2015 09:59:53 +0000 (+0900) Subject: Merge tag 'powerpc-4.3-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc... X-Git-Tag: v4.3~16 X-Git-Url: https://git.kernelconcepts.de/?p=karo-tx-linux.git;a=commitdiff_plain;h=8a28d67457b613258aa0578ccece206d166f2b9f;hp=977bf062bba3eb8d03f66d5b4e227e5d7ebc1e08 Merge tag 'powerpc-4.3-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fix from Michael Ellerman: - powerpc/dma: dma_set_coherent_mask() should not be GPL only from Ben * tag 'powerpc-4.3-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc/dma: dma_set_coherent_mask() should not be GPL only --- diff --git a/.mailmap b/.mailmap index 4b31af54ccd5..b1e9a97653dc 100644 --- a/.mailmap +++ b/.mailmap @@ -59,6 +59,7 @@ James Bottomley James Bottomley James E Wilson James Ketrenos + Jean Tourrilhes Jeff Garzik Jens Axboe diff --git a/Documentation/Changes b/Documentation/Changes index 6d8863004858..f447f0516f07 100644 --- a/Documentation/Changes +++ b/Documentation/Changes @@ -43,7 +43,7 @@ o udev 081 # udevd --version o grub 0.93 # grub --version || grub-install --version o mcelog 0.6 # mcelog --version o iptables 1.4.2 # iptables -V -o openssl & libcrypto 1.0.1k # openssl version +o openssl & libcrypto 1.0.0 # openssl version Kernel compilation diff --git a/Documentation/arm/OMAP/README b/Documentation/arm/OMAP/README new file mode 100644 index 000000000000..75645c45d14a --- /dev/null +++ b/Documentation/arm/OMAP/README @@ -0,0 +1,7 @@ +This file contains documentation for running mainline +kernel on omaps. + +KERNEL NEW DEPENDENCIES +v4.3+ Update is needed for custom .config files to make sure + CONFIG_REGULATOR_PBIAS is enabled for MMC1 to work + properly. diff --git a/Documentation/device-mapper/snapshot.txt b/Documentation/device-mapper/snapshot.txt index 0d5bc46dc167..ad6949bff2e3 100644 --- a/Documentation/device-mapper/snapshot.txt +++ b/Documentation/device-mapper/snapshot.txt @@ -41,9 +41,13 @@ useless and be disabled, returning errors. So it is important to monitor the amount of free space and expand the before it fills up. is P (Persistent) or N (Not persistent - will not survive -after reboot). -The difference is that for transient snapshots less metadata must be -saved on disk - they can be kept in memory by the kernel. +after reboot). O (Overflow) can be added as a persistent store option +to allow userspace to advertise its support for seeing "Overflow" in the +snapshot status. So supported store types are "P", "PO" and "N". + +The difference between persistent and transient is with transient +snapshots less metadata must be saved on disk - they can be kept in +memory by the kernel. * snapshot-merge diff --git a/Documentation/devicetree/bindings/input/cypress,cyapa.txt b/Documentation/devicetree/bindings/input/cypress,cyapa.txt index 635a3b036630..8d91ba9ff2fd 100644 --- a/Documentation/devicetree/bindings/input/cypress,cyapa.txt +++ b/Documentation/devicetree/bindings/input/cypress,cyapa.txt @@ -25,7 +25,7 @@ Example: /* Cypress Gen3 touchpad */ touchpad@67 { compatible = "cypress,cyapa"; - reg = <0x24>; + reg = <0x67>; interrupt-parent = <&gpio>; interrupts = <2 IRQ_TYPE_EDGE_FALLING>; /* GPIO 2 */ wakeup-source; diff --git a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt index 391717a68f3b..ec96b1f01478 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/qca,ath79-misc-intc.txt @@ -4,8 +4,8 @@ The MISC interrupt controller is a secondary controller for lower priority interrupt. Required Properties: -- compatible: has to be "qca,-cpu-intc", "qca,ar7100-misc-intc" - as fallback +- compatible: has to be "qca,-cpu-intc", "qca,ar7100-misc-intc" or + "qca,-cpu-intc", "qca,ar7240-misc-intc" - reg: Base address and size of the controllers memory area - interrupt-parent: phandle of the parent interrupt controller. - interrupts: Interrupt specifier for the controllers interrupt. @@ -13,6 +13,9 @@ Required Properties: - #interrupt-cells : Specifies the number of cells needed to encode interrupt source, should be 1 +Compatible fallback depends on the SoC. Use ar7100 for ar71xx and ar913x, +use ar7240 for all other SoCs. + Please refer to interrupts.txt in this directory for details of the common Interrupt Controllers bindings used by client devices. @@ -28,3 +31,16 @@ Example: interrupt-controller; #interrupt-cells = <1>; }; + +Another example: + + interrupt-controller@18060010 { + compatible = "qca,ar9331-misc-intc", qca,ar7240-misc-intc"; + reg = <0x18060010 0x4>; + + interrupt-parent = <&cpuintc>; + interrupts = <6>; + + interrupt-controller; + #interrupt-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt index 8f771441be60..705075da2f10 100644 --- a/Documentation/devicetree/bindings/spi/sh-msiof.txt +++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt @@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings: - renesas,tx-fifo-size : Overrides the default tx fifo size given in words (default is 64) - renesas,rx-fifo-size : Overrides the default rx fifo size given in words - (default is 64, or 256 on R-Car Gen2) + (default is 64) Pinctrl properties might be needed, too. See Documentation/devicetree/bindings/pinctrl/renesas,*. diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt index 64a4ca6cf96f..7d48f63db44e 100644 --- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt +++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt @@ -5,6 +5,7 @@ Required properties: - "renesas,usbhs-r8a7790" - "renesas,usbhs-r8a7791" - "renesas,usbhs-r8a7794" + - "renesas,usbhs-r8a7795" - reg: Base address and length of the register for the USBHS - interrupts: Interrupt specifier for the USBHS - clocks: A list of phandle + clock specifier pairs diff --git a/Documentation/input/multi-touch-protocol.txt b/Documentation/input/multi-touch-protocol.txt index b85d000faeb4..c51f1146f3bd 100644 --- a/Documentation/input/multi-touch-protocol.txt +++ b/Documentation/input/multi-touch-protocol.txt @@ -361,7 +361,7 @@ For win8 devices with both T and C coordinates, the position mapping is ABS_MT_POSITION_X := T_X ABS_MT_POSITION_Y := T_Y ABS_MT_TOOL_X := C_X - ABS_MT_TOOL_X := C_Y + ABS_MT_TOOL_Y := C_Y Unfortunately, there is not enough information to specify both the touching ellipse and the tool ellipse, so one has to resort to approximations. One diff --git a/Documentation/power/pci.txt b/Documentation/power/pci.txt index 62328d76b55b..b0e911e0e8f5 100644 --- a/Documentation/power/pci.txt +++ b/Documentation/power/pci.txt @@ -979,20 +979,45 @@ every time right after the runtime_resume() callback has returned (alternatively, the runtime_suspend() callback will have to check if the device should really be suspended and return -EAGAIN if that is not the case). -The runtime PM of PCI devices is disabled by default. It is also blocked by -pci_pm_init() that runs the pm_runtime_forbid() helper function. If a PCI -driver implements the runtime PM callbacks and intends to use the runtime PM -framework provided by the PM core and the PCI subsystem, it should enable this -feature by executing the pm_runtime_enable() helper function. However, the -driver should not call the pm_runtime_allow() helper function unblocking -the runtime PM of the device. Instead, it should allow user space or some -platform-specific code to do that (user space can do it via sysfs), although -once it has called pm_runtime_enable(), it must be prepared to handle the +The runtime PM of PCI devices is enabled by default by the PCI core. PCI +device drivers do not need to enable it and should not attempt to do so. +However, it is blocked by pci_pm_init() that runs the pm_runtime_forbid() +helper function. In addition to that, the runtime PM usage counter of +each PCI device is incremented by local_pci_probe() before executing the +probe callback provided by the device's driver. + +If a PCI driver implements the runtime PM callbacks and intends to use the +runtime PM framework provided by the PM core and the PCI subsystem, it needs +to decrement the device's runtime PM usage counter in its probe callback +function. If it doesn't do that, the counter will always be different from +zero for the device and it will never be runtime-suspended. The simplest +way to do that is by calling pm_runtime_put_noidle(), but if the driver +wants to schedule an autosuspend right away, for example, it may call +pm_runtime_put_autosuspend() instead for this purpose. Generally, it +just needs to call a function that decrements the devices usage counter +from its probe routine to make runtime PM work for the device. + +It is important to remember that the driver's runtime_suspend() callback +may be executed right after the usage counter has been decremented, because +user space may already have cuased the pm_runtime_allow() helper function +unblocking the runtime PM of the device to run via sysfs, so the driver must +be prepared to cope with that. + +The driver itself should not call pm_runtime_allow(), though. Instead, it +should let user space or some platform-specific code do that (user space can +do it via sysfs as stated above), but it must be prepared to handle the runtime PM of the device correctly as soon as pm_runtime_allow() is called -(which may happen at any time). [It also is possible that user space causes -pm_runtime_allow() to be called via sysfs before the driver is loaded, so in -fact the driver has to be prepared to handle the runtime PM of the device as -soon as it calls pm_runtime_enable().] +(which may happen at any time, even before the driver is loaded). + +When the driver's remove callback runs, it has to balance the decrementation +of the device's runtime PM usage counter at the probe time. For this reason, +if it has decremented the counter in its probe callback, it must run +pm_runtime_get_noresume() in its remove callback. [Since the core carries +out a runtime resume of the device and bumps up the device's usage counter +before running the driver's remove callback, the runtime PM of the device +is effectively disabled for the duration of the remove execution and all +runtime PM helper functions incrementing the device's usage counter are +then effectively equivalent to pm_runtime_get_noresume().] The runtime PM framework works by processing requests to suspend or resume devices, or to check if they are idle (in which cases it is reasonable to diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c index 2bc8abc57fa0..6c6247aaa7b9 100644 --- a/Documentation/ptp/testptp.c +++ b/Documentation/ptp/testptp.c @@ -18,6 +18,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define _GNU_SOURCE +#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */ #include #include #include diff --git a/MAINTAINERS b/MAINTAINERS index 9f6685f6c5a9..9de185da5f5b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -894,11 +894,12 @@ M: Lennert Buytenhek L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -ARM/Allwinner A1X SoC support +ARM/Allwinner sunXi SoC support M: Maxime Ripard +M: Chen-Yu Tsai L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -N: sun[x4567]i +N: sun[x456789]i ARM/Allwinner SoC Clock Support M: Emilio López @@ -3591,6 +3592,13 @@ F: drivers/gpu/drm/i915/ F: include/drm/i915* F: include/uapi/drm/i915* +DRM DRIVERS FOR ATMEL HLCDC +M: Boris Brezillon +L: dri-devel@lists.freedesktop.org +S: Supported +F: drivers/gpu/drm/atmel-hlcdc/ +F: Documentation/devicetree/bindings/drm/atmel/ + DRM DRIVERS FOR EXYNOS M: Inki Dae M: Joonyoung Shim @@ -3619,6 +3627,14 @@ S: Maintained F: drivers/gpu/drm/imx/ F: Documentation/devicetree/bindings/drm/imx/ +DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets) +M: Patrik Jakobsson +L: dri-devel@lists.freedesktop.org +T: git git://github.com/patjak/drm-gma500 +S: Maintained +F: drivers/gpu/drm/gma500 +F: include/drm/gma500* + DRM DRIVERS FOR NVIDIA TEGRA M: Thierry Reding M: Terje Bergström @@ -4003,7 +4019,7 @@ S: Maintained F: sound/usb/misc/ua101.c EXTENSIBLE FIRMWARE INTERFACE (EFI) -M: Matt Fleming +M: Matt Fleming L: linux-efi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git S: Maintained @@ -4018,7 +4034,7 @@ F: include/linux/efi*.h EFI VARIABLE FILESYSTEM M: Matthew Garrett M: Jeremy Kerr -M: Matt Fleming +M: Matt Fleming T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git L: linux-efi@vger.kernel.org S: Maintained @@ -5957,7 +5973,7 @@ F: virt/kvm/ KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V M: Joerg Roedel L: kvm@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ S: Maintained F: arch/x86/include/asm/svm.h F: arch/x86/kvm/svm.c @@ -5965,7 +5981,7 @@ F: arch/x86/kvm/svm.c KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC M: Alexander Graf L: kvm-ppc@vger.kernel.org -W: http://kvm.qumranet.com +W: http://www.linux-kvm.org/ T: git git://github.com/agraf/linux-2.6.git S: Supported F: arch/powerpc/include/asm/kvm* @@ -6778,7 +6794,6 @@ F: drivers/scsi/megaraid/ MELLANOX ETHERNET DRIVER (mlx4_en) M: Amir Vadai -M: Ido Shamay L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com @@ -9101,6 +9116,15 @@ S: Supported F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt F: drivers/net/ethernet/synopsys/dwc_eth_qos.c +SYNOPSYS DESIGNWARE I2C DRIVER +M: Andy Shevchenko +M: Jarkko Nikula +M: Mika Westerberg +L: linux-i2c@vger.kernel.org +S: Maintained +F: drivers/i2c/busses/i2c-designware-* +F: include/linux/platform_data/i2c-designware.h + SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Seungwon Jeon M: Jaehoon Chung @@ -9914,7 +9938,6 @@ S: Maintained F: drivers/staging/lustre STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) -M: Julian Andres Klode M: Marc Dietrich L: ac100@lists.launchpad.net (moderated for non-subscribers) L: linux-tegra@vger.kernel.org @@ -11378,15 +11401,6 @@ W: http://oops.ghostprotocols.net:81/blog S: Maintained F: drivers/net/wireless/wl3501* -WM97XX TOUCHSCREEN DRIVERS -M: Mark Brown -M: Liam Girdwood -L: linux-input@vger.kernel.org -W: https://github.com/CirrusLogic/linux-drivers/wiki -S: Supported -F: drivers/input/touchscreen/*wm97* -F: include/linux/wm97xx.h - WOLFSON MICROELECTRONICS DRIVERS L: patches@opensource.wolfsonmicro.com T: git https://github.com/CirrusLogic/linux-drivers.git @@ -11661,6 +11675,7 @@ F: drivers/tty/serial/zs.* ZSMALLOC COMPRESSED SLAB MEMORY ALLOCATOR M: Minchan Kim M: Nitin Gupta +R: Sergey Senozhatsky L: linux-mm@kvack.org S: Maintained F: mm/zsmalloc.c diff --git a/Makefile b/Makefile index 1d341eba143d..431067a41fcf 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ VERSION = 4 PATCHLEVEL = 3 SUBLEVEL = 0 -EXTRAVERSION = -rc3 -NAME = Hurr durr I'ma sheep +EXTRAVERSION = -rc7 +NAME = Blurry Fish Butt # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/alpha/include/asm/word-at-a-time.h b/arch/alpha/include/asm/word-at-a-time.h index 6b340d0f1521..902e6ab00a06 100644 --- a/arch/alpha/include/asm/word-at-a-time.h +++ b/arch/alpha/include/asm/word-at-a-time.h @@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits) #endif } +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 7611b10a2d23..0b10ef2a4372 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -48,4 +48,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 72ad724c67ae..639411f73ca9 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -645,6 +645,7 @@ config ARCH_SHMOBILE_LEGACY config ARCH_RPC bool "RiscPC" + depends on MMU select ARCH_ACORN select ARCH_MAY_HAVE_PC_FDC select ARCH_SPARSEMEM_ENABLE diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 233159d2eaab..bb8fa023d574 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -578,7 +578,7 @@ dtb-$(CONFIG_MACH_SUN4I) += \ sun4i-a10-hackberry.dtb \ sun4i-a10-hyundai-a7hd.dtb \ sun4i-a10-inet97fv2.dtb \ - sun4i-a10-itead-iteaduino-plus.dts \ + sun4i-a10-itead-iteaduino-plus.dtb \ sun4i-a10-jesurun-q5.dtb \ sun4i-a10-marsboard.dtb \ sun4i-a10-mini-xplus.dtb \ diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 568adf5efde0..d55e3ea89fda 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -402,11 +402,12 @@ /* SMPS9 unused */ ldo1_reg: ldo1 { - /* VDD_SD */ + /* VDD_SD / VDDSHV8 */ regulator-name = "ldo1"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; + regulator-always-on; }; ldo2_reg: ldo2 { diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts index 89f5a95954ed..4047621b137e 100644 --- a/arch/arm/boot/dts/armada-385-db-ap.dts +++ b/arch/arm/boot/dts/armada-385-db-ap.dts @@ -46,7 +46,7 @@ / { model = "Marvell Armada 385 Access Point Development Board"; - compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x"; + compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380"; chosen { stdout-path = "serial1:115200n8"; diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi index 63a48490e2f9..d4dbd28d348c 100644 --- a/arch/arm/boot/dts/berlin2q.dtsi +++ b/arch/arm/boot/dts/berlin2q.dtsi @@ -152,7 +152,7 @@ }; usb_phy2: phy@a2f400 { - compatible = "marvell,berlin2-usb-phy"; + compatible = "marvell,berlin2cd-usb-phy"; reg = <0xa2f400 0x128>; #phy-cells = <0>; resets = <&chip_rst 0x104 14>; @@ -170,7 +170,7 @@ }; usb_phy0: phy@b74000 { - compatible = "marvell,berlin2-usb-phy"; + compatible = "marvell,berlin2cd-usb-phy"; reg = <0xb74000 0x128>; #phy-cells = <0>; resets = <&chip_rst 0x104 12>; @@ -178,7 +178,7 @@ }; usb_phy1: phy@b78000 { - compatible = "marvell,berlin2-usb-phy"; + compatible = "marvell,berlin2cd-usb-phy"; reg = <0xb78000 0x128>; #phy-cells = <0>; resets = <&chip_rst 0x104 13>; diff --git a/arch/arm/boot/dts/exynos4412.dtsi b/arch/arm/boot/dts/exynos4412.dtsi index ca0e3c15977f..294cfe40388d 100644 --- a/arch/arm/boot/dts/exynos4412.dtsi +++ b/arch/arm/boot/dts/exynos4412.dtsi @@ -98,6 +98,7 @@ opp-hz = /bits/ 64 <800000000>; opp-microvolt = <1000000>; clock-latency-ns = <200000>; + opp-suspend; }; opp07 { opp-hz = /bits/ 64 <900000000>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 15aea760c1da..c625e71217aa 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -197,6 +197,7 @@ regulator-name = "P1.8V_LDO_OUT10"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; + regulator-always-on; }; ldo11_reg: LDO11 { diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index 8f4d76c5e11c..1b95da79293c 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -915,6 +915,11 @@ }; }; +&pmu_system_controller { + assigned-clocks = <&pmu_system_controller 0>; + assigned-clock-parents = <&clock CLK_FIN_PLL>; +}; + &rtc { status = "okay"; clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index df9aee92ecf4..1b3d6c769a3c 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -1117,7 +1117,7 @@ interrupt-parent = <&combiner>; interrupts = <3 0>; clock-names = "sysmmu", "master"; - clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>; + clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>; power-domains = <&disp_pd>; #iommu-cells = <0>; }; diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi index 79ffdfe712aa..3b43e57845ae 100644 --- a/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi +++ b/arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi @@ -472,7 +472,6 @@ */ pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; pinctrl-names = "default"; - samsung,pwm-outputs = <0>; status = "okay"; }; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index 7d5b386b5ae6..8f40c7e549bd 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -878,6 +878,11 @@ }; }; +&pmu_system_controller { + assigned-clocks = <&pmu_system_controller 0>; + assigned-clock-parents = <&clock CLK_FIN_PLL>; +}; + &rtc { status = "okay"; clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>; diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts index 66e47de5e826..96d7eede412e 100644 --- a/arch/arm/boot/dts/imx53-qsrb.dts +++ b/arch/arm/boot/dts/imx53-qsrb.dts @@ -36,7 +36,7 @@ pinctrl-0 = <&pinctrl_pmic>; reg = <0x08>; interrupt-parent = <&gpio5>; - interrupts = <23 0x8>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH>; regulators { sw1_reg: sw1a { regulator-name = "SW1"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index c3e3ca9362fb..cd170376eaca 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -15,6 +15,7 @@ #include #include #include +#include / { aliases { diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 3373fd958e95..a50356243888 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi @@ -35,7 +35,6 @@ compatible = "regulator-fixed"; reg = <1>; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbh1>; regulator-name = "usbh1_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; @@ -47,7 +46,6 @@ compatible = "regulator-fixed"; reg = <2>; pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbotg>; regulator-name = "usb_otg_vbus"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index b738ce0f9d9b..6e444bb873f9 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi @@ -588,10 +588,10 @@ status = "disabled"; }; - uart2: serial@30870000 { + uart2: serial@30890000 { compatible = "fsl,imx7d-uart", "fsl,imx6q-uart"; - reg = <0x30870000 0x10000>; + reg = <0x30890000 0x10000>; interrupts = ; clocks = <&clks IMX7D_UART2_ROOT_CLK>, <&clks IMX7D_UART2_ROOT_CLK>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 91146c318798..5b0430041ec6 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts @@ -12,7 +12,7 @@ / { model = "LogicPD Zoom DM3730 Torpedo Development Kit"; - compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap36xx"; + compatible = "logicpd,dm3730-torpedo-devkit", "ti,omap3630", "ti,omap3"; gpio_keys { compatible = "gpio-keys"; diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 548441384d2a..8c77c87660cd 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -67,7 +67,7 @@ timer@c1109940 { compatible = "amlogic,meson6-timer"; - reg = <0xc1109940 0x14>; + reg = <0xc1109940 0x18>; interrupts = <0 10 1>; }; @@ -80,36 +80,37 @@ wdt: watchdog@c1109900 { compatible = "amlogic,meson6-wdt"; reg = <0xc1109900 0x8>; + interrupts = <0 0 1>; }; uart_AO: serial@c81004c0 { compatible = "amlogic,meson-uart"; - reg = <0xc81004c0 0x14>; + reg = <0xc81004c0 0x18>; interrupts = <0 90 1>; clocks = <&clk81>; status = "disabled"; }; - uart_A: serial@c81084c0 { + uart_A: serial@c11084c0 { compatible = "amlogic,meson-uart"; - reg = <0xc81084c0 0x14>; - interrupts = <0 90 1>; + reg = <0xc11084c0 0x18>; + interrupts = <0 26 1>; clocks = <&clk81>; status = "disabled"; }; - uart_B: serial@c81084dc { + uart_B: serial@c11084dc { compatible = "amlogic,meson-uart"; - reg = <0xc81084dc 0x14>; - interrupts = <0 90 1>; + reg = <0xc11084dc 0x18>; + interrupts = <0 75 1>; clocks = <&clk81>; status = "disabled"; }; - uart_C: serial@c8108700 { + uart_C: serial@c1108700 { compatible = "amlogic,meson-uart"; - reg = <0xc8108700 0x14>; - interrupts = <0 90 1>; + reg = <0xc1108700 0x18>; + interrupts = <0 93 1>; clocks = <&clk81>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts index 16e8ce350dda..bb339d1648e0 100644 --- a/arch/arm/boot/dts/omap3-evm-37xx.dts +++ b/arch/arm/boot/dts/omap3-evm-37xx.dts @@ -13,7 +13,7 @@ / { model = "TI OMAP37XX EVM (TMDSEVM3730)"; - compatible = "ti,omap3-evm-37xx", "ti,omap36xx"; + compatible = "ti,omap3-evm-37xx", "ti,omap3630", "ti,omap3"; memory { device_type = "memory"; diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index a0b2a79cbfbd..4624d0f2a754 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -1627,6 +1627,7 @@ "mix.0", "mix.1", "dvc.0", "dvc.1", "clk_a", "clk_b", "clk_c", "clk_i"; + power-domains = <&cpg_clocks>; status = "disabled"; diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 831525dd39a6..1666c8a6b143 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi @@ -1677,6 +1677,7 @@ "mix.0", "mix.1", "dvc.0", "dvc.1", "clk_a", "clk_b", "clk_c", "clk_i"; + power-domains = <&cpg_clocks>; status = "disabled"; diff --git a/arch/arm/boot/dts/ste-hrefv60plus.dtsi b/arch/arm/boot/dts/ste-hrefv60plus.dtsi index 810cda743b6d..9c2387b34d0c 100644 --- a/arch/arm/boot/dts/ste-hrefv60plus.dtsi +++ b/arch/arm/boot/dts/ste-hrefv60plus.dtsi @@ -56,7 +56,7 @@ /* VMMCI level-shifter enable */ default_hrefv60_cfg2 { pins = "GPIO169_D22"; - ste,config = <&gpio_out_lo>; + ste,config = <&gpio_out_hi>; }; /* VMMCI level-shifter voltage select */ default_hrefv60_cfg3 { diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 2bebaa286f9a..391230c3dc93 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -107,7 +107,7 @@ 720000 1200000 528000 1100000 312000 1000000 - 144000 900000 + 144000 1000000 >; #cooling-cells = <2>; cooling-min-level = <0>; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 9d4f86e9c50a..d845bd1448b5 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -234,7 +234,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 246>; + */ }; apbmisc@70000800 { diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index 1e204a6de12c..819e2ae2cabe 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -258,7 +258,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 251>; + */ }; apbdma: dma@0,60020000 { diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index e058709e6d98..969b828505ae 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -244,7 +244,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 224>; + */ }; apbmisc@70000800 { diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index fe04fb5e155f..c6938ad1b543 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -349,7 +349,9 @@ gpio-controller; #interrupt-cells = <2>; interrupt-controller; + /* gpio-ranges = <&pinmux 0 0 248>; + */ }; apbmisc@70000800 { diff --git a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts index 33963acd7e8f..f80f772d99fb 100644 --- a/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts +++ b/arch/arm/boot/dts/uniphier-ph1-ld6b-ref.dts @@ -85,7 +85,7 @@ }; ðsc { - interrupts = <0 50 4>; + interrupts = <0 52 4>; }; &serial0 { diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig index 210eccadb69a..356970f3b25e 100644 --- a/arch/arm/kvm/Kconfig +++ b/arch/arm/kvm/Kconfig @@ -21,6 +21,7 @@ config KVM depends on MMU && OF select PREEMPT_NOTIFIERS select ANON_INODES + select ARM_GIC select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_ARCH_TLB_FLUSH_ALL select KVM_MMIO diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index dc017adfddc8..78b286994577 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1080,7 +1080,7 @@ static int init_hyp_mode(void) */ err = kvm_timer_hyp_init(); if (err) - goto out_free_mappings; + goto out_free_context; #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index 9bdf54795f05..56978199c479 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "regs-pmu.h" #include "common.h" @@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster) cluster >= EXYNOS5420_NR_CLUSTERS) return -EINVAL; - exynos_cpu_power_up(cpunr); + if (!exynos_cpu_power_state(cpunr)) { + exynos_cpu_power_up(cpunr); + + /* + * This assumes the cluster number of the big cores(Cortex A15) + * is 0 and the Little cores(Cortex A7) is 1. + * When the system was booted from the Little core, + * they should be reset during power up cpu. + */ + if (cluster && + cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { + /* + * Before we reset the Little cores, we should wait + * the SPARE2 register is set to 1 because the init + * codes of the iROM will set the register after + * initialization. + */ + while (!pmu_raw_readl(S5P_PMU_SPARE2)) + udelay(10); + + pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu), + EXYNOS_SWRESET); + } + } + return 0; } diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 4a87e86dec45..7c21760f590f 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -200,15 +200,15 @@ no_clk: args.args_count = 0; child_domain = of_genpd_get_from_provider(&args); if (IS_ERR(child_domain)) - goto next_pd; + continue; if (of_parse_phandle_with_args(np, "power-domains", "#power-domain-cells", 0, &args) != 0) - goto next_pd; + continue; parent_domain = of_genpd_get_from_provider(&args); if (IS_ERR(parent_domain)) - goto next_pd; + continue; if (pm_genpd_add_subdomain(parent_domain, child_domain)) pr_warn("%s failed to add subdomain: %s\n", @@ -216,8 +216,6 @@ no_clk: else pr_info("%s has as child subdomain: %s.\n", parent_domain->name, child_domain->name); -next_pd: - of_node_put(np); } return 0; diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h index b7614333d296..fba9068ed260 100644 --- a/arch/arm/mach-exynos/regs-pmu.h +++ b/arch/arm/mach-exynos/regs-pmu.h @@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr) #define SPREAD_ENABLE 0xF #define SPREAD_USE_STANDWFI 0xF +#define EXYNOS5420_KFC_CORE_RESET0 BIT(8) +#define EXYNOS5420_KFC_ETM_RESET0 BIT(20) + +#define EXYNOS5420_KFC_CORE_RESET(_nr) \ + ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) + #define EXYNOS5420_BB_CON1 0x0784 #define EXYNOS5420_BB_SEL_EN BIT(31) #define EXYNOS5420_BB_PMOS_EN BIT(7) diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index b3a0dff67e3f..33d1460a5639 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -49,6 +49,7 @@ config SOC_OMAP5 select OMAP_INTERCONNECT select OMAP_INTERCONNECT_BARRIER select PM_OPP if PM + select ZONE_DMA if ARM_LPAE config SOC_AM33XX bool "TI AM33XX" @@ -78,6 +79,7 @@ config SOC_DRA7XX select OMAP_INTERCONNECT select OMAP_INTERCONNECT_BARRIER select PM_OPP if PM + select ZONE_DMA if ARM_LPAE config ARCH_OMAP2PLUS bool diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 6133eaac685d..fb219a30c10c 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -106,6 +106,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)") MACHINE_END static const char *const omap36xx_boards_compat[] __initconst = { + "ti,omap3630", "ti,omap36xx", NULL, }; @@ -243,6 +244,9 @@ static const char *const omap5_boards_compat[] __initconst = { }; DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)") +#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) + .dma_zone_size = SZ_2G, +#endif .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), .map_io = omap5_map_io, @@ -288,6 +292,9 @@ static const char *const dra74x_boards_compat[] __initconst = { }; DT_MACHINE_START(DRA74X_DT, "Generic DRA74X (Flattened Device Tree)") +#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) + .dma_zone_size = SZ_2G, +#endif .reserve = omap_reserve, .smp = smp_ops(omap4_smp_ops), .map_io = dra7xx_map_io, @@ -308,6 +315,9 @@ static const char *const dra72x_boards_compat[] __initconst = { }; DT_MACHINE_START(DRA72X_DT, "Generic DRA72X (Flattened Device Tree)") +#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) + .dma_zone_size = SZ_2G, +#endif .reserve = omap_reserve, .map_io = dra7xx_map_io, .init_early = dra7xx_init_early, diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index ea56397599c2..1dfe34654c43 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -559,7 +559,14 @@ static void pdata_quirks_check(struct pdata_init *quirks) void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table) { - omap_sdrc_init(NULL, NULL); + /* + * We still need this for omap2420 and omap3 PM to work, others are + * using drivers/misc/sram.c already. + */ + if (of_machine_is_compatible("ti,omap2420") || + of_machine_is_compatible("ti,omap3")) + omap_sdrc_init(NULL, NULL); + pdata_quirks_check(auxdata_quirks); of_platform_populate(NULL, omap_dt_match_table, omap_auxdata_lookup, NULL); diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 06005d3f2ba3..20ce2d386f17 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c @@ -42,10 +42,6 @@ #define PECR_IS(n) ((1 << ((n) * 2)) << 29) extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); -#ifdef CONFIG_PM - -#define ISRAM_START 0x5c000000 -#define ISRAM_SIZE SZ_256K /* * NAND NFC: DFI bus arbitration subset @@ -54,6 +50,11 @@ extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)); #define NDCR_ND_ARB_EN (1 << 12) #define NDCR_ND_ARB_CNTL (1 << 19) +#ifdef CONFIG_PM + +#define ISRAM_START 0x5c000000 +#define ISRAM_SIZE SZ_256K + static void __iomem *sram; static unsigned long wakeup_src; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 876060bcceeb..b8efb8cd1f73 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -614,6 +614,7 @@ load_common: case BPF_LD | BPF_B | BPF_IND: load_order = 0; load_ind: + update_on_xread(ctx); OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); goto load_common; case BPF_LDX | BPF_IMM: diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 2235081a04ee..8861c367d061 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -495,7 +495,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq) d->netdev = &orion_ge00.dev; for (i = 0; i < d->nr_chips; i++) - d->chip[i].host_dev = &orion_ge00_shared.dev; + d->chip[i].host_dev = &orion_ge_mvmdio.dev; orion_switch_device.dev.platform_data = d; platform_device_register(&orion_switch_device); diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c index aedec81d1198..0cebd98cd88c 100644 --- a/arch/arm/vdso/vdsomunge.c +++ b/arch/arm/vdso/vdsomunge.c @@ -45,7 +45,6 @@ * it does. */ -#include #include #include #include @@ -59,6 +58,16 @@ #include #include +#define swab16(x) \ + ((((x) & 0x00ff) << 8) | \ + (((x) & 0xff00) >> 8)) + +#define swab32(x) \ + ((((x) & 0x000000ff) << 24) | \ + (((x) & 0x0000ff00) << 8) | \ + (((x) & 0x00ff0000) >> 8) | \ + (((x) & 0xff000000) << 24)) + #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define HOST_ORDER ELFDATA2LSB #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ @@ -104,17 +113,17 @@ static void cleanup(void) static Elf32_Word read_elf_word(Elf32_Word word, bool swap) { - return swap ? bswap_32(word) : word; + return swap ? swab32(word) : word; } static Elf32_Half read_elf_half(Elf32_Half half, bool swap) { - return swap ? bswap_16(half) : half; + return swap ? swab16(half) : half; } static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap) { - *dst = swap ? bswap_32(val) : val; + *dst = swap ? swab32(val) : val; } int main(int argc, char **argv) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f9914d7c1bb0..d10b5d483022 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -42,7 +42,7 @@ endif CHECKFLAGS += -D__aarch64__ ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) -CFLAGS_MODULE += -mcmodel=large +KBUILD_CFLAGS_MODULE += -mcmodel=large endif # Default value diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b0329be95cb1..26b066690593 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) @@ -496,7 +496,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; + PTE_PROT_NONE | PTE_VALID | PTE_WRITE; /* preserve the hardware dirty information */ if (pte_hw_dirty(pte)) pte = pte_mkdirty(pte); diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 3bc498c250dc..41e58fe3c041 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 388 +#define __NR_compat_syscalls 390 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index cef934a90f17..5b925b761a2a 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -797,3 +797,12 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) __SYSCALL(__NR_bpf, sys_bpf) #define __NR_execveat 387 __SYSCALL(__NR_execveat, compat_sys_execveat) +#define __NR_userfaultfd 388 +__SYSCALL(__NR_userfaultfd, sys_userfaultfd) +#define __NR_membarrier 389 +__SYSCALL(__NR_membarrier, sys_membarrier) + +/* + * Please add new compat syscalls above this comment and update + * __NR_compat_syscalls in asm/unistd.h. + */ diff --git a/arch/arm64/include/uapi/asm/signal.h b/arch/arm64/include/uapi/asm/signal.h index 8d1e7236431b..991bf5db2ca1 100644 --- a/arch/arm64/include/uapi/asm/signal.h +++ b/arch/arm64/include/uapi/asm/signal.h @@ -19,6 +19,9 @@ /* Required for AArch32 compatibility. */ #define SA_RESTORER 0x04000000 +#define MINSIGSTKSZ 5120 +#define SIGSTKSZ 16384 + #include #endif diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index cebf78661a55..253021ef2769 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c @@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook) } /* - * Call registered single step handers + * Call registered single step handlers * There is no Syndrome info to check for determining the handler. * So we call all the registered handlers, until the right handler is * found which returns zero. @@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr, * Use reader/writer locks instead of plain spinlock. */ static LIST_HEAD(break_hook); -static DEFINE_RWLOCK(break_hook_lock); +static DEFINE_SPINLOCK(break_hook_lock); void register_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_add(&hook->node, &break_hook); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_add_rcu(&hook->node, &break_hook); + spin_unlock(&break_hook_lock); } void unregister_break_hook(struct break_hook *hook) { - write_lock(&break_hook_lock); - list_del(&hook->node); - write_unlock(&break_hook_lock); + spin_lock(&break_hook_lock); + list_del_rcu(&hook->node); + spin_unlock(&break_hook_lock); + synchronize_rcu(); } static int call_break_hook(struct pt_regs *regs, unsigned int esr) @@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) struct break_hook *hook; int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; - read_lock(&break_hook_lock); - list_for_each_entry(hook, &break_hook, node) + rcu_read_lock(); + list_for_each_entry_rcu(hook, &break_hook, node) if ((esr & hook->esr_mask) == hook->esr_val) fn = hook->fn; - read_unlock(&break_hook_lock); + rcu_read_unlock(); return fn ? fn(regs, esr) : DBG_HOOK_ERROR; } diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index e8ca6eaedd02..13671a9cf016 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void) */ if (!is_normal_ram(md)) prot = __pgprot(PROT_DEVICE_nGnRE); - else if (md->type == EFI_RUNTIME_SERVICES_CODE) + else if (md->type == EFI_RUNTIME_SERVICES_CODE || + !PAGE_ALIGNED(md->phys_addr)) prot = PAGE_KERNEL_EXEC; else prot = PAGE_KERNEL; diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 08cafc518b9a..0f03a8fe2314 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -178,6 +178,24 @@ ENTRY(ftrace_stub) ENDPROC(ftrace_stub) #ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* save return value regs*/ + .macro save_return_regs + sub sp, sp, #64 + stp x0, x1, [sp] + stp x2, x3, [sp, #16] + stp x4, x5, [sp, #32] + stp x6, x7, [sp, #48] + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldp x0, x1, [sp] + ldp x2, x3, [sp, #16] + ldp x4, x5, [sp, #32] + ldp x6, x7, [sp, #48] + add sp, sp, #64 + .endm + /* * void ftrace_graph_caller(void) * @@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller) * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled. */ ENTRY(return_to_handler) - str x0, [sp, #-16]! + save_return_regs mov x0, x29 // parent's fp bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp); mov x30, x0 // restore the original return address - ldr x0, [sp], #16 + restore_return_regs ret END(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index f341866aa810..c08b9ad6f429 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn) aarch64_insn_is_bcond(insn)); } -static DEFINE_SPINLOCK(patch_lock); +static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap) { @@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn) unsigned long flags = 0; int ret; - spin_lock_irqsave(&patch_lock, flags); + raw_spin_lock_irqsave(&patch_lock, flags); waddr = patch_map(addr, FIX_TEXT_POKE0); ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); patch_unmap(FIX_TEXT_POKE0); - spin_unlock_irqrestore(&patch_lock, flags); + raw_spin_unlock_irqrestore(&patch_lock, flags); return ret; } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 6bab21f84a9f..232247945b1c 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -364,6 +364,8 @@ static void __init relocate_initrd(void) to_free = ram_end - orig_start; size = orig_end - orig_start; + if (!size) + return; /* initrd needs to be relocated completely inside linear mapping */ new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index aba9ead1384c..9fadf6d7039b 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -287,6 +287,7 @@ retry: * starvation. */ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; + mm_flags |= FAULT_FLAG_TRIED; goto retry; } } diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index f61f2dd67464..241b9b9729d8 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -20,4 +20,5 @@ generic-y += sections.h generic-y += topology.h generic-y += trace_clock.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 61cd1e786a14..91d49c0a3118 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -46,4 +46,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index f17c4dc6050c..945544ec603e 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -59,4 +59,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index b7f68192d15b..1778805f6380 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -43,4 +43,5 @@ generic-y += topology.h generic-y += trace_clock.h generic-y += types.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index 8e47b832cc76..1fa084cf1a43 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -7,3 +7,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 70e6ae1e7006..373cb23301e3 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -73,4 +73,5 @@ generic-y += uaccess.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index daee37bd0999..db8ddabc6bd2 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -58,4 +58,5 @@ generic-y += types.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild index 9de3ba12f6b9..502a91d8dbbd 100644 --- a/arch/ia64/include/asm/Kbuild +++ b/arch/ia64/include/asm/Kbuild @@ -8,3 +8,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h generic-y += vtime.h +generic-y += word-at-a-time.h diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e0eb704ca1fa..fd104bd221ce 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += module.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 0b6b40d37b95..5b4ec541ba7c 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -57,7 +58,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -67,10 +67,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -179,6 +181,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -206,6 +209,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -370,6 +375,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -537,6 +543,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index eeb3a8991fc4..6e5198e2c124 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -344,6 +349,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -495,6 +501,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 3a7006654ce9..f75600b0ca23 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -355,6 +360,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +523,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 0586b323a673..a42d91c389a6 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ad1dbce07aa4..77f4a11083e9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -55,7 +56,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -65,10 +65,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -177,6 +179,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -204,6 +207,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -269,6 +273,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -345,6 +350,7 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -497,6 +503,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index b44acacaecf4..5a329f77329b 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -54,7 +55,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -64,10 +64,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -176,6 +178,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -203,6 +206,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -271,6 +275,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -364,6 +369,7 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -519,6 +525,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8afca3753db1..83c80d2030ec 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -64,7 +65,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -74,10 +74,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -186,6 +188,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -213,6 +216,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -281,6 +285,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -410,6 +415,7 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SEEQ is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -599,6 +605,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index ef00875994d9..6cb42c3bf5a2 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -52,7 +53,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -62,10 +62,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -174,6 +176,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -201,6 +204,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -266,6 +270,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 387c2bd90ff1..c7508c30330c 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -343,6 +348,7 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -488,6 +494,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 35355c1bc714..64b71664a303 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -53,7 +54,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -63,10 +63,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -175,6 +177,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -202,6 +205,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -267,6 +271,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -354,6 +359,7 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -510,6 +516,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 8442d267b877..9a4cab78a2ea 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m CONFIG_CRYPTO_MANAGER=y diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 0e1b542e1555..1a2eaac13dbd 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -10,6 +10,7 @@ CONFIG_LOG_BUF_SHIFT=16 # CONFIG_PID_NS is not set # CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y +CONFIG_USERFAULTFD=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y @@ -50,7 +51,6 @@ CONFIG_NET_IPGRE_DEMUX=m CONFIG_NET_IPGRE=m CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_GENEVE_CORE=m CONFIG_INET_AH=m CONFIG_INET_ESP=m CONFIG_INET_IPCOMP=m @@ -60,10 +60,12 @@ CONFIG_INET_XFRM_MODE_BEET=m # CONFIG_INET_LRO is not set CONFIG_INET_DIAG=m CONFIG_INET_UDP_DIAG=m +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m CONFIG_IPV6_GRE=m CONFIG_NETFILTER=y @@ -172,6 +174,7 @@ CONFIG_IP_SET_HASH_NETIFACE=m CONFIG_IP_SET_LIST_SET=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_NFT_CHAIN_ROUTE_IPV4=m +CONFIG_NFT_DUP_IPV4=m CONFIG_NF_TABLES_ARP=m CONFIG_NF_LOG_ARP=m CONFIG_NFT_CHAIN_NAT_IPV4=m @@ -199,6 +202,7 @@ CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m CONFIG_NFT_CHAIN_ROUTE_IPV6=m +CONFIG_NFT_DUP_IPV6=m CONFIG_NFT_CHAIN_NAT_IPV6=m CONFIG_NFT_MASQ_IPV6=m CONFIG_NFT_REDIR_IPV6=m @@ -264,6 +268,7 @@ CONFIG_NETLINK_DIAG=m CONFIG_MPLS=y CONFIG_NET_MPLS_GSO=m CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m # CONFIG_WIRELESS is not set # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -341,6 +346,7 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_SAMSUNG is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -489,6 +495,7 @@ CONFIG_TEST_USER_COPY=m CONFIG_TEST_BPF=m CONFIG_TEST_FIRMWARE=m CONFIG_TEST_UDELAY=m +CONFIG_TEST_STATIC_KEYS=m CONFIG_EARLY_PRINTK=y CONFIG_ENCRYPTED_KEYS=m CONFIG_CRYPTO_RSA=m diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h index 5a822bb790f7..066e74f666ae 100644 --- a/arch/m68k/include/asm/linkage.h +++ b/arch/m68k/include/asm/linkage.h @@ -4,4 +4,34 @@ #define __ALIGN .align 4 #define __ALIGN_STR ".align 4" +/* + * Make sure the compiler doesn't do anything stupid with the + * arguments on the stack - they are owned by the *caller*, not + * the callee. This just fools gcc into not spilling into them, + * and keeps it from doing tailcall recursion and/or using the + * stack slots for temporaries, since they are live and "used" + * all the way to the end of the function. + */ +#define asmlinkage_protect(n, ret, args...) \ + __asmlinkage_protect##n(ret, ##args) +#define __asmlinkage_protect_n(ret, args...) \ + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args) +#define __asmlinkage_protect0(ret) \ + __asmlinkage_protect_n(ret) +#define __asmlinkage_protect1(ret, arg1) \ + __asmlinkage_protect_n(ret, "m" (arg1)) +#define __asmlinkage_protect2(ret, arg1, arg2) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2)) +#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3)) +#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4)) +#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5)) +#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \ + "m" (arg4), "m" (arg5), "m" (arg6)) + #endif diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 244e0dbe45db..0793a7f17417 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include -#define NR_syscalls 356 +#define NR_syscalls 375 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 61fb6cb9d2ae..5e6fae6c275f 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -361,5 +361,24 @@ #define __NR_memfd_create 353 #define __NR_bpf 354 #define __NR_execveat 355 +#define __NR_socket 356 +#define __NR_socketpair 357 +#define __NR_bind 358 +#define __NR_connect 359 +#define __NR_listen 360 +#define __NR_accept4 361 +#define __NR_getsockopt 362 +#define __NR_setsockopt 363 +#define __NR_getsockname 364 +#define __NR_getpeername 365 +#define __NR_sendto 366 +#define __NR_sendmsg 367 +#define __NR_recvfrom 368 +#define __NR_recvmsg 369 +#define __NR_shutdown 370 +#define __NR_recvmmsg 371 +#define __NR_sendmmsg 372 +#define __NR_userfaultfd 373 +#define __NR_membarrier 374 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index a0ec4303f2c8..5dd0e80042f5 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -376,4 +376,22 @@ ENTRY(sys_call_table) .long sys_memfd_create .long sys_bpf .long sys_execveat /* 355 */ - + .long sys_socket + .long sys_socketpair + .long sys_bind + .long sys_connect + .long sys_listen /* 360 */ + .long sys_accept4 + .long sys_getsockopt + .long sys_setsockopt + .long sys_getsockname + .long sys_getpeername /* 365 */ + .long sys_sendto + .long sys_sendmsg + .long sys_recvfrom + .long sys_recvmsg + .long sys_shutdown /* 370 */ + .long sys_recvmmsg + .long sys_sendmmsg + .long sys_userfaultfd + .long sys_membarrier diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index df31353fd200..29acb89daaaa 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -54,4 +54,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 2f222f355c4b..b0ae88c9fed9 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -10,3 +10,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += syscalls.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c index 15ecb4831e12..eeb3953ed8ac 100644 --- a/arch/mips/ath79/irq.c +++ b/arch/mips/ath79/irq.c @@ -293,8 +293,26 @@ static int __init ath79_misc_intc_of_init( return 0; } -IRQCHIP_DECLARE(ath79_misc_intc, "qca,ar7100-misc-intc", - ath79_misc_intc_of_init); + +static int __init ar7100_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7100_misc_intc, "qca,ar7100-misc-intc", + ar7100_misc_intc_of_init); + +static int __init ar7240_misc_intc_of_init( + struct device_node *node, struct device_node *parent) +{ + ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; + return ath79_misc_intc_of_init(node, parent); +} + +IRQCHIP_DECLARE(ar7240_misc_intc, "qca,ar7240-misc-intc", + ar7240_misc_intc_of_init); static int __init ar79_cpu_intc_of_init( struct device_node *node, struct device_node *parent) diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 89a628455bc2..bd634259eab9 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -933,7 +933,7 @@ void __init plat_mem_setup(void) while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX) && (total < MAX_MEMORY)) { memory = cvmx_bootmem_phy_alloc(mem_alloc_size, - __pa_symbol(&__init_end), -1, + __pa_symbol(&_end), -1, 0x100000, CVMX_BOOTMEM_FLAG_NO_LOCKING); if (memory >= 0) { diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 40ec4ca3f946..c7fe4d01e79c 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -17,4 +17,5 @@ generic-y += segment.h generic-y += serial.h generic-y += trace_clock.h generic-y += user.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 9801ac982655..fe67f12ac239 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -20,6 +20,9 @@ #ifndef cpu_has_tlb #define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB) #endif +#ifndef cpu_has_ftlb +#define cpu_has_ftlb (cpu_data[0].options & MIPS_CPU_FTLB) +#endif #ifndef cpu_has_tlbinv #define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV) #endif diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index cd89e9855775..82ad15f11049 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -385,6 +385,7 @@ enum cpu_type_enum { #define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */ #define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */ #define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */ +#define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 9e777cd42b67..d10fd80dbb7e 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -256,6 +256,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si */ #define ioremap_nocache(offset, size) \ __ioremap_mode((offset), (size), _CACHE_UNCACHED) +#define ioremap_uc ioremap_nocache /* * ioremap_cachable - map bus memory into CPU space diff --git a/arch/mips/include/asm/maar.h b/arch/mips/include/asm/maar.h index b02891f9caaf..21d9607c80d7 100644 --- a/arch/mips/include/asm/maar.h +++ b/arch/mips/include/asm/maar.h @@ -65,6 +65,15 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower, back_to_back_c0_hazard(); } +/** + * maar_init() - initialise MAARs + * + * Performs initialisation of MAARs for the current CPU, making use of the + * platforms implementation of platform_maar_init where necessary and + * duplicating the setup it provides on secondary CPUs. + */ +extern void maar_init(void); + /** * struct maar_config - MAAR configuration data * @lower: The lowest address that the MAAR pair will affect. Must be diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index d75b75e78ebb..1f1927ab4269 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -194,6 +194,7 @@ BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8) BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0) BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0) BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130) +BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150) /* Core Local & Core Other register accessor functions */ BUILD_CM_Cx_RW(reset_release, 0x00) @@ -316,6 +317,10 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80) #define CM_GCR_L2_CONFIG_ASSOC_SHF 0 #define CM_GCR_L2_CONFIG_ASSOC_MSK (_ULCAST_(0xff) << 0) +/* GCR_SYS_CONFIG2 register fields */ +#define CM_GCR_SYS_CONFIG2_MAXVPW_SHF 0 +#define CM_GCR_SYS_CONFIG2_MAXVPW_MSK (_ULCAST_(0xf) << 0) + /* GCR_Cx_COHERENCE register fields */ #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0 #define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0) @@ -405,4 +410,38 @@ static inline int mips_cm_revision(void) return read_gcr_rev(); } +/** + * mips_cm_max_vp_width() - return the width in bits of VP indices + * + * Return: the width, in bits, of VP indices in fields that combine core & VP + * indices. + */ +static inline unsigned int mips_cm_max_vp_width(void) +{ + extern int smp_num_siblings; + + if (mips_cm_revision() >= CM_REV_CM3) + return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; + + return smp_num_siblings; +} + +/** + * mips_cm_vp_id() - calculate the hardware VP ID for a CPU + * @cpu: the CPU whose VP ID to calculate + * + * Hardware such as the GIC uses identifiers for VPs which may not match the + * CPU numbers used by Linux. This function calculates the hardware VP + * identifier corresponding to a given CPU. + * + * Return: the VP ID for the CPU. + */ +static inline unsigned int mips_cm_vp_id(unsigned int cpu) +{ + unsigned int core = cpu_data[cpu].core; + unsigned int vp = cpu_vpe_id(&cpu_data[cpu]); + + return (core * mips_cm_max_vp_width()) + vp; +} + #endif /* __MIPS_ASM_MIPS_CM_H__ */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index d3cd8eac81e3..c64781cf649f 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -487,6 +487,8 @@ /* Bits specific to the MIPS32/64 PRA. */ #define MIPS_CONF_MT (_ULCAST_(7) << 7) +#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) +#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) #define MIPS_CONF_AR (_ULCAST_(7) << 10) #define MIPS_CONF_AT (_ULCAST_(3) << 13) #define MIPS_CONF_M (_ULCAST_(1) << 31) diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h index c4ddc4f0d2dc..23cd9b118c9e 100644 --- a/arch/mips/include/uapi/asm/swab.h +++ b/arch/mips/include/uapi/asm/swab.h @@ -13,16 +13,15 @@ #define __SWAB_64_THRU_32__ -#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ - defined(_MIPS_ARCH_LOONGSON3A) +#if !defined(__mips16) && \ + ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ + defined(_MIPS_ARCH_LOONGSON3A)) -static inline __attribute__((nomips16)) __attribute_const__ - __u16 __arch_swab16(__u16 x) +static inline __attribute_const__ __u16 __arch_swab16(__u16 x) { __asm__( " .set push \n" " .set arch=mips32r2 \n" - " .set nomips16 \n" " wsbh %0, %1 \n" " .set pop \n" : "=r" (x) @@ -32,13 +31,11 @@ static inline __attribute__((nomips16)) __attribute_const__ } #define __arch_swab16 __arch_swab16 -static inline __attribute__((nomips16)) __attribute_const__ - __u32 __arch_swab32(__u32 x) +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) { __asm__( " .set push \n" " .set arch=mips32r2 \n" - " .set nomips16 \n" " wsbh %0, %1 \n" " rotr %0, %0, 16 \n" " .set pop \n" @@ -54,13 +51,11 @@ static inline __attribute__((nomips16)) __attribute_const__ * 64-bit kernel on r2 CPUs. */ #ifdef __mips64 -static inline __attribute__((nomips16)) __attribute_const__ - __u64 __arch_swab64(__u64 x) +static inline __attribute_const__ __u64 __arch_swab64(__u64 x) { __asm__( " .set push \n" " .set arch=mips64r2 \n" - " .set nomips16 \n" " dsbh %0, %1 \n" " dshd %0, %0 \n" " .set pop \n" @@ -71,5 +66,5 @@ static inline __attribute__((nomips16)) __attribute_const__ } #define __arch_swab64 __arch_swab64 #endif /* __mips64 */ -#endif /* MIPS R2 or newer or Loongson 3A */ +#endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */ #endif /* _ASM_SWAB_H */ diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index c03088f9f514..cfabadb135d9 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -377,16 +377,18 @@ #define __NR_memfd_create (__NR_Linux + 354) #define __NR_bpf (__NR_Linux + 355) #define __NR_execveat (__NR_Linux + 356) +#define __NR_userfaultfd (__NR_Linux + 357) +#define __NR_membarrier (__NR_Linux + 358) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 356 +#define __NR_Linux_syscalls 358 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 356 +#define __NR_O32_Linux_syscalls 358 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -711,16 +713,18 @@ #define __NR_memfd_create (__NR_Linux + 314) #define __NR_bpf (__NR_Linux + 315) #define __NR_execveat (__NR_Linux + 316) +#define __NR_userfaultfd (__NR_Linux + 317) +#define __NR_membarrier (__NR_Linux + 318) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 316 +#define __NR_Linux_syscalls 318 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 316 +#define __NR_64_Linux_syscalls 318 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1049,15 +1053,17 @@ #define __NR_memfd_create (__NR_Linux + 318) #define __NR_bpf (__NR_Linux + 319) #define __NR_execveat (__NR_Linux + 320) +#define __NR_userfaultfd (__NR_Linux + 321) +#define __NR_membarrier (__NR_Linux + 322) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 320 +#define __NR_Linux_syscalls 322 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 320 +#define __NR_N32_Linux_syscalls 322 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 4e62bf85d0b0..459cb017306c 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index a74e181058b0..8c6d76c9b2d6 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c @@ -28,6 +28,7 @@ #include #include +#include #define JZ4740_GPIO_BASE_A (32*0) #define JZ4740_GPIO_BASE_B (32*1) diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 9f71c06aebf6..209ded16806b 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -39,6 +39,7 @@ mfc0 \dest, CP0_CONFIG, 3 andi \dest, \dest, MIPS_CONF3_MT beqz \dest, \nomt + nop .endm .section .text.cps-vec @@ -223,10 +224,9 @@ LEAF(excep_ejtag) END(excep_ejtag) LEAF(mips_cps_core_init) -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* Check that the core implements the MT ASE */ has_mt t0, 3f - nop .set push .set mips64r2 @@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes) PTR_ADDU t0, t0, t1 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ + li t9, 0 +#ifdef CONFIG_MIPS_MT_SMP has_mt ta2, 1f - li t9, 0 /* Find the number of VPEs present in the core */ mfc0 t1, CP0_MVPCONF0 @@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes) /* Retrieve the VPE ID from EBase.CPUNum */ mfc0 t9, $15, 1 and t9, t9, t1 +#endif 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ li t1, VPEBOOTCFG_SIZE @@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes) PTR_L ta3, COREBOOTCFG_VPECONFIG(t0) PTR_ADDU v0, v0, ta3 -#ifdef CONFIG_MIPS_MT +#ifdef CONFIG_MIPS_MT_SMP /* If the core doesn't support MT then return */ bnez ta2, 1f @@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes) 2: .set pop -#endif /* CONFIG_MIPS_MT */ +#endif /* CONFIG_MIPS_MT_SMP */ /* Return */ jr ra diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 571a8e6ea5bd..09a51d091941 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -410,16 +410,18 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) static inline unsigned int decode_config0(struct cpuinfo_mips *c) { unsigned int config0; - int isa; + int isa, mt; config0 = read_c0_config(); /* * Look for Standard TLB or Dual VTLB and FTLB */ - if ((((config0 & MIPS_CONF_MT) >> 7) == 1) || - (((config0 & MIPS_CONF_MT) >> 7) == 4)) + mt = config0 & MIPS_CONF_MT; + if (mt == MIPS_CONF_MT_TLB) c->options |= MIPS_CPU_TLB; + else if (mt == MIPS_CONF_MT_FTLB) + c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; isa = (config0 & MIPS_CONF_AT) >> 13; switch (isa) { @@ -559,15 +561,18 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c) if (cpu_has_tlb) { if (((config4 & MIPS_CONF4_IE) >> 29) == 2) c->options |= MIPS_CPU_TLBINV; + /* - * This is a bit ugly. R6 has dropped that field from - * config4 and the only valid configuration is VTLB+FTLB so - * set a good value for mmuextdef for that case. + * R6 has dropped the MMUExtDef field from config4. + * On R6 the fields always describe the FTLB, and only if it is + * present according to Config.MT. */ - if (cpu_has_mips_r6) + if (!cpu_has_mips_r6) + mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + else if (cpu_has_ftlb) mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; else - mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; + mmuextdef = 0; switch (mmuextdef) { case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 423ae83af1fb..3375745b9198 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -18,7 +18,7 @@ .set pop /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ .align 7 LEAF(resume) @@ -28,30 +28,6 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) - /* - * check if we need to save FPU registers - */ - .set push - .set noreorder - beqz a3, 1f - PTR_L t3, TASK_THREAD_INFO(a0) - .set pop - - /* - * clear saved user stack CU1 bit - */ - LONG_L t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - LONG_S t0, ST_OFF(t3) - - .set push - .set arch=mips64r2 - fpu_save_double a0 t0 t1 # c0_status passed in t0 - # clobbers t1 - .set pop -1: - #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ dmfc0 t0, $11,7 /* CvmMemCtl */ diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 5087a4b72e6b..ac27ef7d4d0e 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -30,19 +30,9 @@ */ #define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) -/* - * FPU context is saved iff the process has used it's FPU in the current - * time slice as indicated by TIF_USEDFPU. In any case, the CU1 bit for user - * space STATUS register should be 0, so that a process *always* starts its - * userland with FPU disabled after each context switch. - * - * FPU will be enabled as soon as the process accesses FPU again, through - * do_cpu() trap. - */ - /* * task_struct *resume(task_struct *prev, task_struct *next, - * struct thread_info *next_ti, int usedfpu) + * struct thread_info *next_ti) */ LEAF(resume) mfc0 t1, CP0_STATUS @@ -50,22 +40,6 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) - beqz a3, 1f - - PTR_L t3, TASK_THREAD_INFO(a0) - - /* - * clear saved user stack CU1 bit - */ - lw t0, ST_OFF(t3) - li t1, ~ST0_CU1 - and t0, t0, t1 - sw t0, ST_OFF(t3) - - fpu_save_single a0, t0 # clobbers t0 - -1: - #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 4cc13508d967..65a74e4f0f45 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -36,16 +36,8 @@ NESTED(handle_sys, PT_SIZE, sp) lw t1, PT_EPC(sp) # skip syscall on return subu v0, v0, __NR_O32_Linux # check syscall number - sltiu t0, v0, __NR_O32_Linux_syscalls + 1 addiu t1, 4 # skip to next instruction sw t1, PT_EPC(sp) - beqz t0, illegal_syscall - - sll t0, v0, 2 - la t1, sys_call_table - addu t1, t0 - lw t2, (t1) # syscall routine - beqz t2, illegal_syscall sw a3, PT_R26(sp) # save a3 for syscall restarting @@ -96,6 +88,16 @@ loads_done: li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 bnez t0, syscall_trace_entry # -> yes +syscall_common: + sltiu t0, v0, __NR_O32_Linux_syscalls + 1 + beqz t0, illegal_syscall + + sll t0, v0, 2 + la t1, sys_call_table + addu t1, t0 + lw t2, (t1) # syscall routine + + beqz t2, illegal_syscall jalr t2 # Do The Real Thing (TM) @@ -116,7 +118,7 @@ o32_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp /* @@ -129,27 +131,18 @@ syscall_trace_entry: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall + + move v0, s0 # restore syscall - move t0, s0 RESTORE_STATIC lw a0, PT_R4(sp) # Restore argument registers lw a1, PT_R5(sp) lw a2, PT_R6(sp) lw a3, PT_R7(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sw t0, PT_R7(sp) # set error flag - beqz t0, 1f - - lw t1, PT_R2(sp) # syscall number - negu v0 # error - sw t1, PT_R0(sp) # save it for syscall restarting -1: sw v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -599,3 +592,5 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index a6f6b762c47a..e732981cf99f 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -39,18 +39,11 @@ NESTED(handle_sys64, PT_SIZE, sp) .set at #endif - dsubu t0, v0, __NR_64_Linux # check syscall number - sltiu t0, t0, __NR_64_Linux_syscalls + 1 #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) ld t1, PT_EPC(sp) # skip syscall on return daddiu t1, 4 # skip to next instruction sd t1, PT_EPC(sp) #endif - beqz t0, illegal_syscall - - dsll t0, v0, 3 # offset into table - ld t2, (sys_call_table - (__NR_64_Linux * 8))(t0) - # syscall routine sd a3, PT_R26(sp) # save a3 for syscall restarting @@ -59,6 +52,17 @@ NESTED(handle_sys64, PT_SIZE, sp) and t0, t1, t0 bnez t0, syscall_trace_entry +syscall_common: + dsubu t2, v0, __NR_64_Linux + sltiu t0, t2, __NR_64_Linux_syscalls + 1 + beqz t0, illegal_syscall + + dsll t0, t2, 3 # offset into table + dla t2, sys_call_table + daddu t0, t2, t0 + ld t2, (t0) # syscall routine + beqz t2, illegal_syscall + jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -78,14 +82,14 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC - move s0, t2 + move s0, v0 move a0, sp move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move v0, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -93,19 +97,9 @@ syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 - - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result + j syscall_common -2: j syscall_exit +1: j syscall_exit illegal_syscall: /* This also isn't a 64-bit syscall, throw an error. */ @@ -436,4 +430,6 @@ EXPORT(sys_call_table) PTR sys_memfd_create PTR sys_bpf /* 5315 */ PTR sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 4b2010654c46..c79484397584 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -52,6 +52,7 @@ NESTED(handle_sysn32, PT_SIZE, sp) and t0, t1, t0 bnez t0, n32_syscall_trace_entry +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -75,9 +76,9 @@ n32_syscall_trace_entry: move a1, v0 jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -85,19 +86,9 @@ n32_syscall_trace_entry: ld a3, PT_R7(sp) ld a4, PT_R8(sp) ld a5, PT_R9(sp) - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit not_n32_scall: /* This is not an n32 compatibility syscall, pass it on to @@ -429,4 +420,6 @@ EXPORT(sysn32_call_table) PTR sys_memfd_create PTR sys_bpf PTR compat_sys_execveat /* 6320 */ + PTR sys_userfaultfd + PTR sys_membarrier .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index f543ff4feef9..6369cfd390c6 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -87,6 +87,7 @@ loads_done: and t0, t1, t0 bnez t0, trace_a_syscall +syscall_common: jalr t2 # Do The Real Thing (TM) li t0, -EMAXERRNO - 1 # error? @@ -130,9 +131,9 @@ trace_a_syscall: 1: jal syscall_trace_enter - bltz v0, 2f # seccomp failed? Skip syscall + bltz v0, 1f # seccomp failed? Skip syscall - move t0, s0 + move t2, s0 RESTORE_STATIC ld a0, PT_R4(sp) # Restore argument registers ld a1, PT_R5(sp) @@ -142,19 +143,9 @@ trace_a_syscall: ld a5, PT_R9(sp) ld a6, PT_R10(sp) ld a7, PT_R11(sp) # For indirect syscalls - jalr t0 + j syscall_common - li t0, -EMAXERRNO - 1 # error? - sltu t0, t0, v0 - sd t0, PT_R7(sp) # set error flag - beqz t0, 1f - - ld t1, PT_R2(sp) # syscall number - dnegu v0 # error - sd t1, PT_R0(sp) # save it for syscall restarting -1: sd v0, PT_R2(sp) # result - -2: j syscall_exit +1: j syscall_exit /* ------------------------------------------------------------------------ */ @@ -584,4 +575,6 @@ EXPORT(sys32_call_table) PTR sys_memfd_create PTR sys_bpf /* 4355 */ PTR compat_sys_execveat + PTR sys_userfaultfd + PTR sys_membarrier .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 35b8316002f8..479515109e5b 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -338,7 +338,7 @@ static void __init bootmem_init(void) if (end <= reserved_end) continue; #ifdef CONFIG_BLK_DEV_INITRD - /* mapstart should be after initrd_end */ + /* Skip zones before initrd and initrd itself */ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end))) continue; #endif @@ -371,6 +371,14 @@ static void __init bootmem_init(void) max_low_pfn = PFN_DOWN(HIGHMEM_START); } +#ifdef CONFIG_BLK_DEV_INITRD + /* + * mapstart should be after initrd_end + */ + if (initrd_end) + mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end))); +#endif + /* * Initialize the boot-time allocator with low memory only. */ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index a31896c33716..bd4385a8e6e8 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -42,6 +42,7 @@ #include #include #include +#include cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ @@ -157,6 +158,7 @@ asmlinkage void start_secondary(void) mips_clockevent_init(); mp_ops->init_secondary(); cpu_report(); + maar_init(); /* * XXX parity protection should be folded in here when it's converted diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c index f6c44dd332e2..d6d07ad56180 100644 --- a/arch/mips/loongson64/common/env.c +++ b/arch/mips/loongson64/common/env.c @@ -64,6 +64,9 @@ void __init prom_init_env(void) } if (memsize == 0) memsize = 256; + + loongson_sysconf.nr_uarts = 1; + pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize); #else struct boot_params *boot_p; diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c index a914dc1cb6d1..d8117be729a2 100644 --- a/arch/mips/mm/dma-default.c +++ b/arch/mips/mm/dma-default.c @@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) else #endif #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) - if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) dma_flag = __GFP_DMA; else #endif diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 66d0f49c5bec..8770e619185e 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -44,6 +44,7 @@ #include #include #include +#include /* * We have up to 8 empty zeroed pages so we can map one of the right colour @@ -252,6 +253,119 @@ void __init fixrange_init(unsigned long start, unsigned long end, #endif } +unsigned __weak platform_maar_init(unsigned num_pairs) +{ + struct maar_config cfg[BOOT_MEM_MAP_MAX]; + unsigned i, num_configured, num_cfg = 0; + phys_addr_t skip; + + for (i = 0; i < boot_mem_map.nr_map; i++) { + switch (boot_mem_map.map[i].type) { + case BOOT_MEM_RAM: + case BOOT_MEM_INIT_RAM: + break; + default: + continue; + } + + skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); + + cfg[num_cfg].lower = boot_mem_map.map[i].addr; + cfg[num_cfg].lower += skip; + + cfg[num_cfg].upper = cfg[num_cfg].lower; + cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; + cfg[num_cfg].upper -= skip; + + cfg[num_cfg].attrs = MIPS_MAAR_S; + num_cfg++; + } + + num_configured = maar_config(cfg, num_cfg, num_pairs); + if (num_configured < num_cfg) + pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", + num_pairs, num_cfg); + + return num_configured; +} + +void maar_init(void) +{ + unsigned num_maars, used, i; + phys_addr_t lower, upper, attr; + static struct { + struct maar_config cfgs[3]; + unsigned used; + } recorded = { { { 0 } }, 0 }; + + if (!cpu_has_maar) + return; + + /* Detect the number of MAARs */ + write_c0_maari(~0); + back_to_back_c0_hazard(); + num_maars = read_c0_maari() + 1; + + /* MAARs should be in pairs */ + WARN_ON(num_maars % 2); + + /* Set MAARs using values we recorded already */ + if (recorded.used) { + used = maar_config(recorded.cfgs, recorded.used, num_maars / 2); + BUG_ON(used != recorded.used); + } else { + /* Configure the required MAARs */ + used = platform_maar_init(num_maars / 2); + } + + /* Disable any further MAARs */ + for (i = (used * 2); i < num_maars; i++) { + write_c0_maari(i); + back_to_back_c0_hazard(); + write_c0_maar(0); + back_to_back_c0_hazard(); + } + + if (recorded.used) + return; + + pr_info("MAAR configuration:\n"); + for (i = 0; i < num_maars; i += 2) { + write_c0_maari(i); + back_to_back_c0_hazard(); + upper = read_c0_maar(); + + write_c0_maari(i + 1); + back_to_back_c0_hazard(); + lower = read_c0_maar(); + + attr = lower & upper; + lower = (lower & MIPS_MAAR_ADDR) << 4; + upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff; + + pr_info(" [%d]: ", i / 2); + if (!(attr & MIPS_MAAR_V)) { + pr_cont("disabled\n"); + continue; + } + + pr_cont("%pa-%pa", &lower, &upper); + + if (attr & MIPS_MAAR_S) + pr_cont(" speculate"); + + pr_cont("\n"); + + /* Record the setup for use on secondary CPUs */ + if (used <= ARRAY_SIZE(recorded.cfgs)) { + recorded.cfgs[recorded.used].lower = lower; + recorded.cfgs[recorded.used].upper = upper; + recorded.cfgs[recorded.used].attrs = attr; + recorded.used++; + } + } +} + #ifndef CONFIG_NEED_MULTIPLE_NODES int page_is_ram(unsigned long pagenr) { @@ -334,69 +448,6 @@ static inline void mem_init_free_highmem(void) #endif } -unsigned __weak platform_maar_init(unsigned num_pairs) -{ - struct maar_config cfg[BOOT_MEM_MAP_MAX]; - unsigned i, num_configured, num_cfg = 0; - phys_addr_t skip; - - for (i = 0; i < boot_mem_map.nr_map; i++) { - switch (boot_mem_map.map[i].type) { - case BOOT_MEM_RAM: - case BOOT_MEM_INIT_RAM: - break; - default: - continue; - } - - skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); - - cfg[num_cfg].lower = boot_mem_map.map[i].addr; - cfg[num_cfg].lower += skip; - - cfg[num_cfg].upper = cfg[num_cfg].lower; - cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; - cfg[num_cfg].upper -= skip; - - cfg[num_cfg].attrs = MIPS_MAAR_S; - num_cfg++; - } - - num_configured = maar_config(cfg, num_cfg, num_pairs); - if (num_configured < num_cfg) - pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n", - num_pairs, num_cfg); - - return num_configured; -} - -static void maar_init(void) -{ - unsigned num_maars, used, i; - - if (!cpu_has_maar) - return; - - /* Detect the number of MAARs */ - write_c0_maari(~0); - back_to_back_c0_hazard(); - num_maars = read_c0_maari() + 1; - - /* MAARs should be in pairs */ - WARN_ON(num_maars % 2); - - /* Configure the required MAARs */ - used = platform_maar_init(num_maars / 2); - - /* Disable any further MAARs */ - for (i = (used * 2); i < num_maars; i++) { - write_c0_maari(i); - back_to_back_c0_hazard(); - write_c0_maar(0); - back_to_back_c0_hazard(); - } -} - void __init mem_init(void) { #ifdef CONFIG_HIGHMEM diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S index e92726099be0..5d2e0c8d29c0 100644 --- a/arch/mips/net/bpf_jit_asm.S +++ b/arch/mips/net/bpf_jit_asm.S @@ -57,15 +57,28 @@ LEAF(sk_load_word) is_offset_negative(word) - .globl sk_load_word_positive -sk_load_word_positive: +FEXPORT(sk_load_word_positive) is_offset_in_header(4, word) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lw $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A rotr $r_A, t0, 16 +# else + sll t0, $r_A, 24 + srl t1, $r_A, 24 + srl t2, $r_A, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_A, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -73,15 +86,24 @@ sk_load_word_positive: LEAF(sk_load_half) is_offset_negative(half) - .globl sk_load_half_positive -sk_load_half_positive: +FEXPORT(sk_load_half_positive) is_offset_in_header(2, half) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset + .set reorder lh $r_A, 0(t1) + .set noreorder #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_A seh $r_A, t0 +# else + sll t0, $r_A, 24 + andi t1, $r_A, 0xff00 + sra t0, t0, 16 + srl t1, t1, 8 + or $r_A, t0, t1 +# endif #endif jr $r_ra move $r_ret, zero @@ -89,8 +111,7 @@ sk_load_half_positive: LEAF(sk_load_byte) is_offset_negative(byte) - .globl sk_load_byte_positive -sk_load_byte_positive: +FEXPORT(sk_load_byte_positive) is_offset_in_header(1, byte) /* Offset within header boundaries */ PTR_ADDU t1, $r_skb_data, offset @@ -148,23 +169,47 @@ sk_load_byte_positive: NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) bpf_slow_path_common(4) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) wsbh t0, $r_s0 jr $r_ra rotr $r_A, t0, 16 -#endif +# else + sll t0, $r_s0, 24 + srl t1, $r_s0, 24 + srl t2, $r_s0, 8 + or t0, t0, t1 + andi t2, t2, 0xff00 + andi t1, $r_s0, 0xff00 + or t0, t0, t2 + sll t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra - move $r_A, $r_s0 + move $r_A, $r_s0 +#endif END(bpf_slow_path_word) NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) bpf_slow_path_common(2) #ifdef CONFIG_CPU_LITTLE_ENDIAN +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2) jr $r_ra wsbh $r_A, $r_s0 -#endif +# else + sll t0, $r_s0, 8 + andi t1, $r_s0, 0xff00 + andi t0, t0, 0xff00 + srl t1, t1, 8 + jr $r_ra + or $r_A, t0, t1 +# endif +#else jr $r_ra move $r_A, $r_s0 +#endif END(bpf_slow_path_half) diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 6edb9ee6128e..1c8dd0f5cd5d 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -9,3 +9,4 @@ generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += sections.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index 914864eb5a25..d63330e88379 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -61,4 +61,5 @@ generic-y += types.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index 5b3a903adae6..e4396a7d0f7c 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -40,6 +40,11 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +static inline unsigned long zero_bytemask(unsigned long mask) +{ + return ~1ul << __fls(mask); +} + #else #ifdef CONFIG_64BIT diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index d4788111c161..fac6ac9790fa 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile @@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING -KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks +KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) KBUILD_CFLAGS += $(call cc-option,-ffreestanding) diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0c98f1508542..ed7da281df66 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 82083e1fbdc4..9858b14cde1e 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index c05c9e0821e3..7f14f80717d4 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m CONFIG_SCSI_DEBUG=m CONFIG_ZFCP=y CONFIG_SCSI_VIRTIO=m -CONFIG_SCSI_DH=m +CONFIG_SCSI_DH=y CONFIG_SCSI_DH_RDAC=m CONFIG_SCSI_DH_HP_SW=m CONFIG_SCSI_DH_EMC=m diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild index 5ad26dd94d77..9043d2e1e2ae 100644 --- a/arch/s390/include/asm/Kbuild +++ b/arch/s390/include/asm/Kbuild @@ -6,3 +6,4 @@ generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += preempt.h generic-y += trace_clock.h +generic-y += word-at-a-time.h diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h index 2a0efc63b9e5..dc19ee0c92aa 100644 --- a/arch/s390/include/asm/numa.h +++ b/arch/s390/include/asm/numa.h @@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn); int __node_distance(int a, int b); void numa_update_cpu_topology(void); -extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; extern int numa_debug_enabled; #else diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 27ebde643933..94fc55fc72ce 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu) #define cpumask_of_node cpumask_of_node static inline const struct cpumask *cpumask_of_node(int node) { - return node_to_cpumask_map[node]; + return &node_to_cpumask_map[node]; } /* diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 48c9af7a7683..3aeeb1b562c0 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -176,6 +176,7 @@ int main(void) DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); + DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 09b039d7983d..582fe44ab07c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -733,6 +733,14 @@ ENTRY(psw_idle) stg %r3,__SF_EMPTY(%r15) larl %r1,.Lpsw_idle_lpsw+4 stg %r1,__SF_EMPTY+8(%r15) +#ifdef CONFIG_SMP + larl %r1,smp_cpu_mtid + llgf %r1,0(%r1) + ltgr %r1,%r1 + jz .Lpsw_idle_stcctm + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) +.Lpsw_idle_stcctm: +#endif STCK __CLOCK_IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2) .Lpsw_idle_lpsw: @@ -1159,7 +1167,27 @@ cleanup_critical: jhe 1f mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) -1: # account system time going idle +1: # calculate idle cycles +#ifdef CONFIG_SMP + clg %r9,BASED(.Lcleanup_idle_insn) + jl 3f + larl %r1,smp_cpu_mtid + llgf %r1,0(%r1) + ltgr %r1,%r1 + jz 3f + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) + larl %r3,mt_cycles + ag %r3,__LC_PERCPU_OFFSET + la %r4,__SF_EMPTY+16(%r15) +2: lg %r0,0(%r3) + slg %r0,0(%r4) + alg %r0,64(%r4) + stg %r0,0(%r3) + la %r3,8(%r3) + la %r4,8(%r4) + brct %r1,2b +#endif +3: # account system time going idle lg %r9,__LC_STEAL_TIMER alg %r9,__CLOCK_IDLE_ENTER(%r2) slg %r9,__LC_LAST_UPDATE_CLOCK diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index c8653435c70d..dafc44f519c3 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock); static atomic64_t virt_timer_current; static atomic64_t virt_timer_elapsed; -static DEFINE_PER_CPU(u64, mt_cycles[32]); +DEFINE_PER_CPU(u64, mt_cycles[8]); static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_jiffies); @@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed) return elapsed >= atomic64_read(&virt_timer_current); } +static void update_mt_scaling(void) +{ + u64 cycles_new[8], *cycles_old; + u64 delta, fac, mult, div; + int i; + + stcctm5(smp_cpu_mtid + 1, cycles_new); + cycles_old = this_cpu_ptr(mt_cycles); + fac = 1; + mult = div = 0; + for (i = 0; i <= smp_cpu_mtid; i++) { + delta = cycles_new[i] - cycles_old[i]; + div += delta; + mult *= i + 1; + mult += delta * fac; + fac *= i + 1; + } + div *= fac; + if (div > 0) { + /* Update scaling factor */ + __this_cpu_write(mt_scaling_mult, mult); + __this_cpu_write(mt_scaling_div, div); + memcpy(cycles_old, cycles_new, + sizeof(u64) * (smp_cpu_mtid + 1)); + } + __this_cpu_write(mt_scaling_jiffies, jiffies_64); +} + /* * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. @@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) struct thread_info *ti = task_thread_info(tsk); u64 timer, clock, user, system, steal; u64 user_scaled, system_scaled; - int i; timer = S390_lowcore.last_update_timer; clock = S390_lowcore.last_update_clock; @@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; - /* Do MT utilization calculation */ + /* Update MT utilization calculation */ if (smp_cpu_mtid && - time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { - u64 cycles_new[32], *cycles_old; - u64 delta, fac, mult, div; - - cycles_old = this_cpu_ptr(mt_cycles); - if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { - fac = 1; - mult = div = 0; - for (i = 0; i <= smp_cpu_mtid; i++) { - delta = cycles_new[i] - cycles_old[i]; - div += delta; - mult *= i + 1; - mult += delta * fac; - fac *= i + 1; - } - div *= fac; - if (div > 0) { - /* Update scaling factor */ - __this_cpu_write(mt_scaling_mult, mult); - __this_cpu_write(mt_scaling_div, div); - memcpy(cycles_old, cycles_new, - sizeof(u64) * (smp_cpu_mtid + 1)); - } - } - __this_cpu_write(mt_scaling_jiffies, jiffies_64); - } + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) + update_mt_scaling(); user = S390_lowcore.user_timer - ti->user_timer; S390_lowcore.steal_timer -= user; @@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk) S390_lowcore.last_update_timer = get_vtimer(); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; + /* Update MT utilization calculation */ + if (smp_cpu_mtid && + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) + update_mt_scaling(); + system = S390_lowcore.system_timer - ti->system_timer; S390_lowcore.steal_timer -= system; ti->system_timer = S390_lowcore.system_timer; diff --git a/arch/s390/numa/mode_emu.c b/arch/s390/numa/mode_emu.c index 7de4e2f780d7..30b2698a28e2 100644 --- a/arch/s390/numa/mode_emu.c +++ b/arch/s390/numa/mode_emu.c @@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core) cpumask_copy(&top->thread_mask, &core->mask); cpumask_copy(&top->core_mask, &core_mc(core)->mask); cpumask_copy(&top->book_mask, &core_book(core)->mask); - cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); + cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]); top->node_id = core_node(core)->id; } } @@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa) /* Clear all node masks */ for (i = 0; i < MAX_NUMNODES; i++) - cpumask_clear(node_to_cpumask_map[i]); + cpumask_clear(&node_to_cpumask_map[i]); /* Rebuild all masks */ toptree_for_each(core, numa, CORE) diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 09b1d2355bd9..43f32ce60aa3 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -23,7 +23,7 @@ pg_data_t *node_data[MAX_NUMNODES]; EXPORT_SYMBOL(node_data); -cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +cpumask_t node_to_cpumask_map[MAX_NUMNODES]; EXPORT_SYMBOL(node_to_cpumask_map); const struct numa_mode numa_mode_plain = { @@ -144,7 +144,7 @@ void __init numa_setup(void) static int __init numa_init_early(void) { /* Attach all possible CPUs to node 0 for now. */ - cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); + cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); return 0; } early_initcall(numa_init_early); diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 92ffe397b893..a05218ff3fe4 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -13,3 +13,4 @@ generic-y += sections.h generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h +generic-y += word-at-a-time.h diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h index fe20d14ae051..ceb5201a30ed 100644 --- a/arch/sh/include/asm/page.h +++ b/arch/sh/include/asm/page.h @@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2) #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) extern void copy_page(void *to, void *from); +#define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE) struct page; struct vm_area_struct; diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index 2e48eb8813ff..c90930de76ba 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, @@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, .setkey = aes_set_key, .encrypt = ctr_crypt, .decrypt = ctr_crypt, diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c index 6bf2479a12fb..561a84d93cf6 100644 --- a/arch/sparc/crypto/camellia_glue.c +++ b/arch/sparc/crypto/camellia_glue.c @@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = CAMELLIA_MIN_KEY_SIZE, .max_keysize = CAMELLIA_MAX_KEY_SIZE, + .ivsize = CAMELLIA_BLOCK_SIZE, .setkey = camellia_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c index dd6a34fa6e19..61af794aa2d3 100644 --- a/arch/sparc/crypto/des_glue.c +++ b/arch/sparc/crypto/des_glue.c @@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, .setkey = des_set_key, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, @@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { { .blkcipher = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_set_key, .encrypt = cbc3_encrypt, .decrypt = cbc3_decrypt, diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c index ee186e13dfe6..f102048d9c0e 100644 --- a/arch/tile/gxio/mpipe.c +++ b/arch/tile/gxio/mpipe.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -29,32 +30,6 @@ /* HACK: Avoid pointless "shadow" warnings. */ #define link link_shadow -/** - * strscpy - Copy a C-string into a sized buffer, but only if it fits - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @size: size of destination buffer - * - * Use this routine to avoid copying too-long strings. - * The routine returns the total number of bytes copied - * (including the trailing NUL) or zero if the buffer wasn't - * big enough. To ensure that programmers pay attention - * to the return code, the destination has a single NUL - * written at the front (if size is non-zero) when the - * buffer is not big enough. - */ -static size_t strscpy(char *dest, const char *src, size_t size) -{ - size_t len = strnlen(src, size) + 1; - if (len > size) { - if (size) - dest[0] = '\0'; - return 0; - } - memcpy(dest, src, len); - return len; -} - int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index) { char file[32]; @@ -540,7 +515,7 @@ int gxio_mpipe_link_instance(const char *link_name) if (!context) return GXIO_ERR_NO_DEVICE; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; return gxio_mpipe_info_instance_aux(context, name); @@ -559,7 +534,7 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac); if (rv >= 0) { - if (strscpy(link_name, name.name, sizeof(name.name)) == 0) + if (strscpy(link_name, name.name, sizeof(name.name)) < 0) return GXIO_ERR_INVAL_MEMORY_SIZE; memcpy(link_mac, mac.mac, sizeof(mac.mac)); } @@ -576,7 +551,7 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link, _gxio_mpipe_link_name_t name; int rv; - if (strscpy(name.name, link_name, sizeof(name.name)) == 0) + if (strscpy(name.name, link_name, sizeof(name.name)) < 0) return GXIO_ERR_NO_DEVICE; rv = gxio_mpipe_link_open_aux(context, name, flags); diff --git a/arch/tile/include/asm/word-at-a-time.h b/arch/tile/include/asm/word-at-a-time.h index 9e5ce0d7b292..b66a693c2c34 100644 --- a/arch/tile/include/asm/word-at-a-time.h +++ b/arch/tile/include/asm/word-at-a-time.h @@ -6,7 +6,7 @@ struct word_at_a_time { /* unused */ }; #define WORD_AT_A_TIME_CONSTANTS {} -/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */ +/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */ static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { @@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask) #endif } +#ifdef __BIG_ENDIAN +#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask))) +#else +#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1) +#endif + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c index f0da5a237e94..9f1e05e12255 100644 --- a/arch/tile/kernel/usb.c +++ b/arch/tile/kernel/usb.c @@ -22,6 +22,7 @@ #include #include #include +#include #include static u64 ehci_dmamask = DMA_BIT_MASK(32); diff --git a/arch/um/Makefile b/arch/um/Makefile index 098ab3333e7c..e3abe6f3156d 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -70,8 +70,8 @@ KBUILD_AFLAGS += $(ARCH_INCLUDE) USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \ $(ARCH_INCLUDE) $(MODE_INCLUDE) $(filter -I%,$(CFLAGS)) \ - -D_FILE_OFFSET_BITS=64 -idirafter include \ - -D__KERNEL__ -D__UM_HOST__ + -D_FILE_OFFSET_BITS=64 -idirafter $(srctree)/include \ + -idirafter $(obj)/include -D__KERNEL__ -D__UM_HOST__ #This will adjust *FLAGS accordingly to the platform. include $(ARCH_DIR)/Makefile-os-$(OS) diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 149ec55f9c46..904f3ebf4220 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -25,4 +25,5 @@ generic-y += preempt.h generic-y += switch_to.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index d8a9fce6ee2e..98783dd0fa2e 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -220,7 +220,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, show_regs(container_of(regs, struct pt_regs, regs)); panic("Segfault with no mm"); } - else if (!is_user && address < TASK_SIZE) { + else if (!is_user && address > PAGE_SIZE && address < TASK_SIZE) { show_regs(container_of(regs, struct pt_regs, regs)); panic("Kernel tried to access user memory at addr 0x%lx, ip 0x%lx", address, ip); diff --git a/arch/um/os-Linux/helper.c b/arch/um/os-Linux/helper.c index e3ee4a51ef63..3f02d4232812 100644 --- a/arch/um/os-Linux/helper.c +++ b/arch/um/os-Linux/helper.c @@ -96,7 +96,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv) "ret = %d\n", -n); ret = n; } - CATCH_EINTR(waitpid(pid, NULL, __WCLONE)); + CATCH_EINTR(waitpid(pid, NULL, __WALL)); } out_free2: @@ -129,7 +129,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags, return err; } if (stack_out == NULL) { - CATCH_EINTR(pid = waitpid(pid, &status, __WCLONE)); + CATCH_EINTR(pid = waitpid(pid, &status, __WALL)); if (pid < 0) { err = -errno; printk(UM_KERN_ERR "run_helper_thread - wait failed, " @@ -148,7 +148,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags, int helper_wait(int pid) { int ret, status; - int wflags = __WCLONE; + int wflags = __WALL; CATCH_EINTR(ret = waitpid(pid, &status, wflags)); if (ret < 0) { diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1fc7a286dc6f..256c45b3ae34 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -62,4 +62,5 @@ generic-y += ucontext.h generic-y += unaligned.h generic-y += user.h generic-y += vga.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 328c8352480c..96d058a87100 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1308,6 +1308,7 @@ config HIGHMEM config X86_PAE bool "PAE (Physical Address Extension) Support" depends on X86_32 && !HIGHMEM4G + select SWIOTLB ---help--- PAE is required for NX support, and furthermore enables larger swapspace support for non-overcommit purposes. It diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index ee1b6d346b98..db51c1f27446 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -667,6 +667,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto, bool conout_found = false; void *dummy = NULL; u32 h = handles[i]; + u32 current_fb_base; status = efi_call_early(handle_protocol, h, proto, (void **)&gop32); @@ -678,7 +679,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query32(gop32, &info, &size, &fb_base); + status = __gop_query32(gop32, &info, &size, ¤t_fb_base); if (status == EFI_SUCCESS && (!first_gop || conout_found)) { /* * Systems that use the UEFI Console Splitter may @@ -692,6 +693,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto, pixel_format = info->pixel_format; pixel_info = info->pixel_information; pixels_per_scan_line = info->pixels_per_scan_line; + fb_base = current_fb_base; /* * Once we've found a GOP supporting ConOut, @@ -770,6 +772,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto, bool conout_found = false; void *dummy = NULL; u64 h = handles[i]; + u32 current_fb_base; status = efi_call_early(handle_protocol, h, proto, (void **)&gop64); @@ -781,7 +784,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto, if (status == EFI_SUCCESS) conout_found = true; - status = __gop_query64(gop64, &info, &size, &fb_base); + status = __gop_query64(gop64, &info, &size, ¤t_fb_base); if (status == EFI_SUCCESS && (!first_gop || conout_found)) { /* * Systems that use the UEFI Console Splitter may @@ -795,6 +798,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto, pixel_format = info->pixel_format; pixel_info = info->pixel_information; pixels_per_scan_line = info->pixels_per_scan_line; + fb_base = current_fb_base; /* * Once we've found a GOP supporting ConOut, diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c index 80a0e4389c9a..bacaa13acac5 100644 --- a/arch/x86/crypto/camellia_aesni_avx_glue.c +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c @@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void) { const char *feature_name; + if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) { + pr_info("AVX or AES-NI instructions are not detected.\n"); + return -ENODEV; + } + if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) { pr_info("CPU feature '%s' is not supported.\n", feature_name); return -ENODEV; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index e6cf2ad350d1..9727b3b48bd1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -193,7 +193,7 @@ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ -#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ +#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index ab5f1d447ef9..ae68be92f755 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -86,6 +86,7 @@ extern u64 asmlinkage efi_call(void *fp, ...); extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, u32 type, u64 attribute); +#ifdef CONFIG_KASAN /* * CONFIG_KASAN may redefine memset to __memset. __memset function is present * only in kernel binary. Since the EFI stub linked into a separate binary it @@ -95,6 +96,7 @@ extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, #undef memcpy #undef memset #undef memmove +#endif #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2beee0382088..3a36ee704c30 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1226,10 +1226,8 @@ void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err); int kvm_is_in_guest(void); -int __x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem); -int x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem); +int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); +int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size); bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu); bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index b98b471a3b7e..b8c14bb7fc8f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -141,6 +141,8 @@ #define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) #define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) +#define MSR_PEBS_FRONTEND 0x000003f7 + #define MSR_IA32_POWER_CTL 0x000001fc #define MSR_IA32_MC0_CTL 0x00000400 diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h index 655e07a48f6c..67f08230103a 100644 --- a/arch/x86/include/asm/pvclock-abi.h +++ b/arch/x86/include/asm/pvclock-abi.h @@ -41,6 +41,7 @@ struct pvclock_wall_clock { #define PVCLOCK_TSC_STABLE_BIT (1 << 0) #define PVCLOCK_GUEST_STOPPED (1 << 1) +/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */ #define PVCLOCK_COUNTS_FROM_ZERO (1 << 2) #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PVCLOCK_ABI_H */ diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index e4661196994e..ff8b9a17dc4b 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -27,12 +27,11 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t function. */ #define __HAVE_ARCH_MEMCPY 1 +extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); #ifndef CONFIG_KMEMCHECK -#if (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) || __GNUC__ > 4 -extern void *memcpy(void *to, const void *from, size_t len); -#else +#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #define memcpy(dst, src, len) \ ({ \ size_t __len = (len); \ diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 83aea8055119..4c20dd333412 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc) return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); } -static inline int +static inline long HYPERVISOR_memory_op(unsigned int cmd, void *arg) { - return _hypercall2(int, memory_op, cmd, arg); + return _hypercall2(long, memory_op, cmd, arg); } static inline int diff --git a/arch/x86/include/uapi/asm/bitsperlong.h b/arch/x86/include/uapi/asm/bitsperlong.h index b0ae1c4dc791..217909b4d6f5 100644 --- a/arch/x86/include/uapi/asm/bitsperlong.h +++ b/arch/x86/include/uapi/asm/bitsperlong.h @@ -1,7 +1,7 @@ #ifndef __ASM_X86_BITSPERLONG_H #define __ASM_X86_BITSPERLONG_H -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__ILP32__) # define __BITS_PER_LONG 64 #else # define __BITS_PER_LONG 32 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 5c60bb162622..bb6bfc01cb82 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2907,6 +2907,7 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, struct irq_data *irq_data; struct mp_chip_data *data; struct irq_alloc_info *info = arg; + unsigned long flags; if (!info || nr_irqs > 1) return -EINVAL; @@ -2939,11 +2940,14 @@ int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, cfg = irqd_cfg(irq_data); add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin); + + local_irq_save(flags); if (info->ioapic_entry) mp_setup_entry(cfg, data, info->ioapic_entry); mp_register_handler(virq, data->trigger); if (virq < nr_legacy_irqs()) legacy_pic->mask(virq); + local_irq_restore(flags); apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n", diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 381c8b9b3a33..20e242ea1bc4 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -34,11 +34,10 @@ struct ms_hyperv_info ms_hyperv; EXPORT_SYMBOL_GPL(ms_hyperv); -static void (*hv_kexec_handler)(void); -static void (*hv_crash_handler)(struct pt_regs *regs); - #if IS_ENABLED(CONFIG_HYPERV) static void (*vmbus_handler)(void); +static void (*hv_kexec_handler)(void); +static void (*hv_crash_handler)(struct pt_regs *regs); void hyperv_vector_handler(struct pt_regs *regs) { @@ -96,8 +95,8 @@ void hv_remove_crash_handler(void) hv_crash_handler = NULL; } EXPORT_SYMBOL_GPL(hv_remove_crash_handler); -#endif +#ifdef CONFIG_KEXEC_CORE static void hv_machine_shutdown(void) { if (kexec_in_progress && hv_kexec_handler) @@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) hv_crash_handler(regs); native_machine_crash_shutdown(regs); } - +#endif /* CONFIG_KEXEC_CORE */ +#endif /* CONFIG_HYPERV */ static uint32_t __init ms_hyperv_platform(void) { @@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void) no_timer_check = 1; #endif +#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE) machine_ops.shutdown = hv_machine_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown; +#endif mark_tsc_unstable("running on Hyper-V"); } diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 5edf6d868fc1..165be83a7fa4 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -47,6 +47,7 @@ enum extra_reg_type { EXTRA_REG_RSP_1 = 1, /* offcore_response_1 */ EXTRA_REG_LBR = 2, /* lbr_select */ EXTRA_REG_LDLAT = 3, /* ld_lat_threshold */ + EXTRA_REG_FE = 4, /* fe_* */ EXTRA_REG_MAX /* number of entries needed */ }; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 3fefebfbdf4b..f63360be2238 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -205,6 +205,11 @@ static struct extra_reg intel_skl_extra_regs[] __read_mostly = { INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), + /* + * Note the low 8 bits eventsel code is not a continuous field, containing + * some #GPing bits. These are masked out. + */ + INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE), EVENT_EXTRA_END }; @@ -250,7 +255,7 @@ struct event_constraint intel_bdw_event_constraints[] = { FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */ - INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */ + INTEL_UEVENT_CONSTRAINT(0x8a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */ EVENT_CONSTRAINT_END }; @@ -2891,6 +2896,8 @@ PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(ldlat, "config1:0-15"); +PMU_FORMAT_ATTR(frontend, "config1:0-23"); + static struct attribute *intel_arch3_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -2907,6 +2914,11 @@ static struct attribute *intel_arch3_formats_attr[] = { NULL, }; +static struct attribute *skl_format_attr[] = { + &format_attr_frontend.attr, + NULL, +}; + static __initconst const struct x86_pmu core_pmu = { .name = "core", .handle_irq = x86_pmu_handle_irq, @@ -3516,7 +3528,8 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.cpu_events = hsw_events_attrs; + x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, + skl_format_attr); WARN_ON(!x86_pmu.format_attrs); x86_pmu.cpu_events = hsw_events_attrs; pr_cont("Skylake events, "); diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c index 086b12eae794..f32ac13934f2 100644 --- a/arch/x86/kernel/cpu/perf_event_msr.c +++ b/arch/x86/kernel/cpu/perf_event_msr.c @@ -10,12 +10,12 @@ enum perf_msr_id { PERF_MSR_EVENT_MAX, }; -bool test_aperfmperf(int idx) +static bool test_aperfmperf(int idx) { return boot_cpu_has(X86_FEATURE_APERFMPERF); } -bool test_intel(int idx) +static bool test_intel(int idx) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 3d423a101fae..608fb26c7254 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, - { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, + { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 }, { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e068d6683dba..74ca2fe7a0b3 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) } #ifdef CONFIG_KEXEC_FILE -static int get_nr_ram_ranges_callback(unsigned long start_pfn, - unsigned long nr_pfn, void *arg) +static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg) { - int *nr_ranges = arg; + unsigned int *nr_ranges = arg; (*nr_ranges)++; return 0; @@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, ced->image = image; - walk_system_ram_range(0, -1, &nr_ranges, + walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); ced->max_nr_ranges = nr_ranges; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 6d0e62ae8516..e28db181e4fc 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return randomize_range(mm->brk, range_end, 0) ? : mm->brk; } +/* + * Called from fs/proc with a reference on @p to find the function + * which called into schedule(). This needs to be done carefully + * because the task might wake up and we might look at a stack + * changing under us. + */ +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long start, bottom, top, sp, fp, ip; + int count = 0; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + start = (unsigned long)task_stack_page(p); + if (!start) + return 0; + + /* + * Layout of the stack page: + * + * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long) + * PADDING + * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING + * stack + * ----------- bottom = start + sizeof(thread_info) + * thread_info + * ----------- start + * + * The tasks stack pointer points at the location where the + * framepointer is stored. The data on the stack is: + * ... IP FP ... IP FP + * + * We need to read FP and IP, so we need to adjust the upper + * bound by another unsigned long. + */ + top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; + top -= 2 * sizeof(unsigned long); + bottom = start + sizeof(struct thread_info); + + sp = READ_ONCE(p->thread.sp); + if (sp < bottom || sp > top) + return 0; + + fp = READ_ONCE_NOCHECK(*(unsigned long *)sp); + do { + if (fp < bottom || fp > top) + return 0; + ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long))); + if (!in_sched_functions(ip)) + return ip; + fp = READ_ONCE_NOCHECK(*(unsigned long *)fp); + } while (count++ < 16 && p->state != TASK_RUNNING); + return 0; +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c13df2c735f8..737527b40e5b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) return prev_p; } - -#define top_esp (THREAD_SIZE - sizeof(unsigned long)) -#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) - -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long bp, sp, ip; - unsigned long stack_page; - int count = 0; - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack_page = (unsigned long)task_stack_page(p); - sp = p->thread.sp; - if (!stack_page || sp < stack_page || sp > top_esp+stack_page) - return 0; - /* include/asm-i386/system.h:switch_to() pushes bp last. */ - bp = *(unsigned long *) sp; - do { - if (bp < stack_page || bp > top_ebp+stack_page) - return 0; - ip = *(unsigned long *) (bp+4); - if (!in_sched_functions(ip)) - return ip; - bp = *(unsigned long *) bp; - } while (count++ < 16); - return 0; -} - diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3c1bbcf12924..b35921a670b2 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -499,30 +499,6 @@ void set_personality_ia32(bool x32) } EXPORT_SYMBOL_GPL(set_personality_ia32); -unsigned long get_wchan(struct task_struct *p) -{ - unsigned long stack; - u64 fp, ip; - int count = 0; - - if (!p || p == current || p->state == TASK_RUNNING) - return 0; - stack = (unsigned long)task_stack_page(p); - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) - return 0; - fp = *(u64 *)(p->thread.sp); - do { - if (fp < (unsigned long)stack || - fp >= (unsigned long)stack+THREAD_SIZE) - return 0; - ip = *(u64 *)(fp+8); - if (!in_sched_functions(ip)) - return ip; - fp = *(u64 *)fp; - } while (count++ < 16); - return 0; -} - long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) { int ret = 0; diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index fdb7f2a2d328..a3cccbfc5f77 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1173,6 +1173,14 @@ void __init setup_arch(char **cmdline_p) clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY, swapper_pg_dir + KERNEL_PGD_BOUNDARY, KERNEL_PGD_PTRS); + + /* + * sync back low identity map too. It is used for example + * in the 32-bit EFI stub. + */ + clone_pgd_range(initial_page_table, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); #endif tboot_probe(); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index e0c198e5f920..892ee2e5ecbc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid) */ #define UDELAY_10MS_DEFAULT 10000 -static unsigned int init_udelay = UDELAY_10MS_DEFAULT; +static unsigned int init_udelay = INT_MAX; static int __init cpu_init_udelay(char *str) { @@ -522,13 +522,16 @@ early_param("cpu_init_udelay", cpu_init_udelay); static void __init smp_quirk_init_udelay(void) { /* if cmdline changed it from default, leave it alone */ - if (init_udelay != UDELAY_10MS_DEFAULT) + if (init_udelay != INT_MAX) return; /* if modern processor, use no delay */ if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) init_udelay = 0; + + /* else, use legacy delay */ + init_udelay = UDELAY_10MS_DEFAULT; } /* @@ -657,7 +660,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - if (init_udelay) + if (init_udelay == 0) + udelay(10); + else udelay(300); pr_debug("Startup point 1\n"); @@ -668,7 +673,9 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) /* * Give the other CPU some time to accept the IPI. */ - if (init_udelay) + if (init_udelay == 0) + udelay(10); + else udelay(200); if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b372a7557c16..9da95b9daf8d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) u64 val, cr0, cr4; u32 base3; u16 selector; - int i; + int i, r; for (i = 0; i < 16; i++) *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); @@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) dt.address = GET_SMSTATE(u64, smbase, 0x7e68); ctxt->ops->set_gdt(ctxt, &dt); + r = rsm_enter_protected_mode(ctxt, cr0, cr4); + if (r != X86EMUL_CONTINUE) + return r; + for (i = 0; i < 6; i++) { - int r = rsm_load_seg_64(ctxt, smbase, i); + r = rsm_load_seg_64(ctxt, smbase, i); if (r != X86EMUL_CONTINUE) return r; } - return rsm_enter_protected_mode(ctxt, cr0, cr4); + return X86EMUL_CONTINUE; } static int em_rsm(struct x86_emulate_ctxt *ctxt) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 94b7d15db3fc..2f9ed1ff0632 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -514,7 +514,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) { - WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS)); + WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); svm->next_rip = svm->vmcb->control.next_rip; } @@ -866,64 +866,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm) set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0); } -#define MTRR_TYPE_UC_MINUS 7 -#define MTRR2PROTVAL_INVALID 0xff - -static u8 mtrr2protval[8]; - -static u8 fallback_mtrr_type(int mtrr) -{ - /* - * WT and WP aren't always available in the host PAT. Treat - * them as UC and UC- respectively. Everything else should be - * there. - */ - switch (mtrr) - { - case MTRR_TYPE_WRTHROUGH: - return MTRR_TYPE_UNCACHABLE; - case MTRR_TYPE_WRPROT: - return MTRR_TYPE_UC_MINUS; - default: - BUG(); - } -} - -static void build_mtrr2protval(void) -{ - int i; - u64 pat; - - for (i = 0; i < 8; i++) - mtrr2protval[i] = MTRR2PROTVAL_INVALID; - - /* Ignore the invalid MTRR types. */ - mtrr2protval[2] = 0; - mtrr2protval[3] = 0; - - /* - * Use host PAT value to figure out the mapping from guest MTRR - * values to nested page table PAT/PCD/PWT values. We do not - * want to change the host PAT value every time we enter the - * guest. - */ - rdmsrl(MSR_IA32_CR_PAT, pat); - for (i = 0; i < 8; i++) { - u8 mtrr = pat >> (8 * i); - - if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID) - mtrr2protval[mtrr] = __cm_idx2pte(i); - } - - for (i = 0; i < 8; i++) { - if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) { - u8 fallback = fallback_mtrr_type(i); - mtrr2protval[i] = mtrr2protval[fallback]; - BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID); - } - } -} - static __init int svm_hardware_setup(void) { int cpu; @@ -990,7 +932,6 @@ static __init int svm_hardware_setup(void) } else kvm_disable_tdp(); - build_mtrr2protval(); return 0; err: @@ -1145,43 +1086,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) return target_tsc - tsc; } -static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat) -{ - struct kvm_vcpu *vcpu = &svm->vcpu; - - /* Unlike Intel, AMD takes the guest's CR0.CD into account. - * - * AMD does not have IPAT. To emulate it for the case of guests - * with no assigned devices, just set everything to WB. If guests - * have assigned devices, however, we cannot force WB for RAM - * pages only, so use the guest PAT directly. - */ - if (!kvm_arch_has_assigned_device(vcpu->kvm)) - *g_pat = 0x0606060606060606; - else - *g_pat = vcpu->arch.pat; -} - -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) -{ - u8 mtrr; - - /* - * 1. MMIO: trust guest MTRR, so same as item 3. - * 2. No passthrough: always map as WB, and force guest PAT to WB as well - * 3. Passthrough: can't guarantee the result, try to trust guest. - */ - if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm)) - return 0; - - if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED) && - kvm_read_cr0(vcpu) & X86_CR0_CD) - return _PAGE_NOCACHE; - - mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn); - return mtrr2protval[mtrr]; -} - static void init_vmcb(struct vcpu_svm *svm, bool init_event) { struct vmcb_control_area *control = &svm->vmcb->control; @@ -1278,7 +1182,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event) clr_cr_intercept(svm, INTERCEPT_CR3_READ); clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); save->g_pat = svm->vcpu.arch.pat; - svm_set_guest_pat(svm, &save->g_pat); save->cr3 = 0; save->cr4 = 0; } @@ -1673,10 +1576,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (!vcpu->fpu_active) cr0 |= X86_CR0_TS; - - /* These are emulated via page tables. */ - cr0 &= ~(X86_CR0_CD | X86_CR0_NW); - + /* + * re-enable caching here because the QEMU bios + * does not do it - this results in some delay at + * reboot + */ + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); update_cr0_intercept(svm); @@ -3351,16 +3257,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; - case MSR_IA32_CR_PAT: - if (npt_enabled) { - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data)) - return 1; - vcpu->arch.pat = data; - svm_set_guest_pat(svm, &svm->vmcb->save.g_pat); - mark_dirty(svm->vmcb, VMCB_NPT); - break; - } - /* fall through */ default: return kvm_set_msr_common(vcpu, msr); } @@ -4195,6 +4091,11 @@ static bool svm_has_high_real_mode_segbase(void) return true; } +static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) +{ + return 0; +} + static void svm_cpuid_update(struct kvm_vcpu *vcpu) { } diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 64076740251e..6a8bc64566ab 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4105,17 +4105,13 @@ static void seg_setup(int seg) static int alloc_apic_access_page(struct kvm *kvm) { struct page *page; - struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; mutex_lock(&kvm->slots_lock); if (kvm->arch.apic_access_page_done) goto out; - kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; - kvm_userspace_mem.flags = 0; - kvm_userspace_mem.guest_phys_addr = APIC_DEFAULT_PHYS_BASE; - kvm_userspace_mem.memory_size = PAGE_SIZE; - r = __x86_set_memory_region(kvm, &kvm_userspace_mem); + r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, + APIC_DEFAULT_PHYS_BASE, PAGE_SIZE); if (r) goto out; @@ -4140,17 +4136,12 @@ static int alloc_identity_pagetable(struct kvm *kvm) { /* Called with kvm->slots_lock held. */ - struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; BUG_ON(kvm->arch.ept_identity_pagetable_done); - kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; - kvm_userspace_mem.flags = 0; - kvm_userspace_mem.guest_phys_addr = - kvm->arch.ept_identity_map_addr; - kvm_userspace_mem.memory_size = PAGE_SIZE; - r = __x86_set_memory_region(kvm, &kvm_userspace_mem); + r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, + kvm->arch.ept_identity_map_addr, PAGE_SIZE); return r; } @@ -4949,14 +4940,9 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) { int ret; - struct kvm_userspace_memory_region tss_mem = { - .slot = TSS_PRIVATE_MEMSLOT, - .guest_phys_addr = addr, - .memory_size = PAGE_SIZE * 3, - .flags = 0, - }; - ret = x86_set_memory_region(kvm, &tss_mem); + ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr, + PAGE_SIZE * 3); if (ret) return ret; kvm->arch.tss_addr = addr; @@ -8617,17 +8603,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) u64 ipat = 0; /* For VT-d and EPT combination - * 1. MMIO: guest may want to apply WC, trust it. + * 1. MMIO: always map as UC * 2. EPT with VT-d: * a. VT-d without snooping control feature: can't guarantee the - * result, try to trust guest. So the same as item 1. + * result, try to trust guest. * b. VT-d with snooping control feature: snooping control feature of * VT-d engine can guarantee the cache correctness. Just set it * to WB to keep consistent with host. So the same as item 3. * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep * consistent with host MTRR */ - if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) { + if (is_mmio) { + cache = MTRR_TYPE_UNCACHABLE; + goto exit; + } + + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { ipat = VMX_EPT_IPAT_BIT; cache = MTRR_TYPE_WRBACK; goto exit; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 991466bf8dee..9a9a19830321 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1708,8 +1708,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) vcpu->pvclock_set_guest_stopped_request = false; } - pvclock_flags |= PVCLOCK_COUNTS_FROM_ZERO; - /* If the host uses TSC clocksource, then it is stable */ if (use_master_clock) pvclock_flags |= PVCLOCK_TSC_STABLE_BIT; @@ -2007,8 +2005,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) &vcpu->requests); ka->boot_vcpu_runs_old_kvmclock = tmp; - - ka->kvmclock_offset = -get_kernel_ns(); } vcpu->arch.time = data; @@ -6457,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) return 1; } +static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && + !vcpu->arch.apf.halted); +} + static int vcpu_run(struct kvm_vcpu *vcpu) { int r; @@ -6465,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); for (;;) { - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && - !vcpu->arch.apf.halted) + if (kvm_vcpu_running(vcpu)) r = vcpu_enter_guest(vcpu); else r = vcpu_block(kvm, vcpu); @@ -7478,34 +7479,66 @@ void kvm_arch_sync_events(struct kvm *kvm) kvm_free_pit(kvm); } -int __x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem) +int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) { int i, r; + unsigned long hva; + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *slot, old; /* Called with kvm->slots_lock held. */ - BUG_ON(mem->slot >= KVM_MEM_SLOTS_NUM); + if (WARN_ON(id >= KVM_MEM_SLOTS_NUM)) + return -EINVAL; + slot = id_to_memslot(slots, id); + if (size) { + if (WARN_ON(slot->npages)) + return -EEXIST; + + /* + * MAP_SHARED to prevent internal slot pages from being moved + * by fork()/COW. + */ + hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, 0); + if (IS_ERR((void *)hva)) + return PTR_ERR((void *)hva); + } else { + if (!slot->npages) + return 0; + + hva = 0; + } + + old = *slot; for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { - struct kvm_userspace_memory_region m = *mem; + struct kvm_userspace_memory_region m; - m.slot |= i << 16; + m.slot = id | (i << 16); + m.flags = 0; + m.guest_phys_addr = gpa; + m.userspace_addr = hva; + m.memory_size = size; r = __kvm_set_memory_region(kvm, &m); if (r < 0) return r; } + if (!size) { + r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); + WARN_ON(r < 0); + } + return 0; } EXPORT_SYMBOL_GPL(__x86_set_memory_region); -int x86_set_memory_region(struct kvm *kvm, - const struct kvm_userspace_memory_region *mem) +int x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) { int r; mutex_lock(&kvm->slots_lock); - r = __x86_set_memory_region(kvm, mem); + r = __x86_set_memory_region(kvm, id, gpa, size); mutex_unlock(&kvm->slots_lock); return r; @@ -7520,16 +7553,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm) * unless the the memory map has changed due to process exit * or fd copying. */ - struct kvm_userspace_memory_region mem; - memset(&mem, 0, sizeof(mem)); - mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; - x86_set_memory_region(kvm, &mem); - - mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; - x86_set_memory_region(kvm, &mem); - - mem.slot = TSS_PRIVATE_MEMSLOT; - x86_set_memory_region(kvm, &mem); + x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0); + x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, 0, 0); + x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0); } kvm_iommu_unmap_guest(kvm); kfree(kvm->arch.vpic); @@ -7632,27 +7658,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { - /* - * Only private memory slots need to be mapped here since - * KVM_SET_MEMORY_REGION ioctl is no longer supported. - */ - if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) { - unsigned long userspace_addr; - - /* - * MAP_SHARED to prevent internal slot pages from being moved - * by fork()/COW. - */ - userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, 0); - - if (IS_ERR((void *)userspace_addr)) - return PTR_ERR((void *)userspace_addr); - - memslot->userspace_addr = userspace_addr; - } - return 0; } @@ -7714,17 +7719,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, { int nr_mmu_pages = 0; - if (change == KVM_MR_DELETE && old->id >= KVM_USER_MEM_SLOTS) { - int ret; - - ret = vm_munmap(old->userspace_addr, - old->npages * PAGE_SIZE); - if (ret < 0) - printk(KERN_WARNING - "kvm_vm_ioctl_set_memory_region: " - "failed to munmap memory\n"); - } - if (!kvm->arch.n_requested_mmu_pages) nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm); @@ -7773,19 +7767,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, kvm_mmu_invalidate_zap_all_pages(kvm); } +static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) +{ + if (!list_empty_careful(&vcpu->async_pf.done)) + return true; + + if (kvm_apic_has_events(vcpu)) + return true; + + if (vcpu->arch.pv.pv_unhalted) + return true; + + if (atomic_read(&vcpu->arch.nmi_queued)) + return true; + + if (test_bit(KVM_REQ_SMI, &vcpu->requests)) + return true; + + if (kvm_arch_interrupt_allowed(vcpu) && + kvm_cpu_has_interrupt(vcpu)) + return true; + + return false; +} + int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) kvm_x86_ops->check_nested_events(vcpu, false); - return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && - !vcpu->arch.apf.halted) - || !list_empty_careful(&vcpu->async_pf.done) - || kvm_apic_has_events(vcpu) - || vcpu->arch.pv.pv_unhalted - || atomic_read(&vcpu->arch.nmi_queued) || - (kvm_arch_interrupt_allowed(vcpu) && - kvm_cpu_has_interrupt(vcpu)); + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 30564e2752d3..df48430c279b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1132,7 +1132,7 @@ void mark_rodata_ro(void) * has been zapped already via cleanup_highmem(). */ all_end = roundup((unsigned long)_brk_end, PMD_SIZE); - set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); + set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1db84c0758b7..6a28ded74211 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -704,6 +704,70 @@ out: return ret; } +/* + * Iterate the EFI memory map in reverse order because the regions + * will be mapped top-down. The end result is the same as if we had + * mapped things forward, but doesn't require us to change the + * existing implementation of efi_map_region(). + */ +static inline void *efi_map_next_entry_reverse(void *entry) +{ + /* Initial call */ + if (!entry) + return memmap.map_end - memmap.desc_size; + + entry -= memmap.desc_size; + if (entry < memmap.map) + return NULL; + + return entry; +} + +/* + * efi_map_next_entry - Return the next EFI memory map descriptor + * @entry: Previous EFI memory map descriptor + * + * This is a helper function to iterate over the EFI memory map, which + * we do in different orders depending on the current configuration. + * + * To begin traversing the memory map @entry must be %NULL. + * + * Returns %NULL when we reach the end of the memory map. + */ +static void *efi_map_next_entry(void *entry) +{ + if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) { + /* + * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE + * config table feature requires us to map all entries + * in the same order as they appear in the EFI memory + * map. That is to say, entry N must have a lower + * virtual address than entry N+1. This is because the + * firmware toolchain leaves relative references in + * the code/data sections, which are split and become + * separate EFI memory regions. Mapping things + * out-of-order leads to the firmware accessing + * unmapped addresses. + * + * Since we need to map things this way whether or not + * the kernel actually makes use of + * EFI_PROPERTIES_TABLE, let's just switch to this + * scheme by default for 64-bit. + */ + return efi_map_next_entry_reverse(entry); + } + + /* Initial call */ + if (!entry) + return memmap.map; + + entry += memmap.desc_size; + if (entry >= memmap.map_end) + return NULL; + + return entry; +} + /* * Map the efi memory ranges of the runtime services and update new_mmap with * virtual addresses. @@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift) unsigned long left = 0; efi_memory_desc_t *md; - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + p = NULL; + while ((p = efi_map_next_entry(p))) { md = p; if (!(md->attribute & EFI_MEMORY_RUNTIME)) { #ifdef CONFIG_X86_64 diff --git a/arch/x86/um/ldt.c b/arch/x86/um/ldt.c index 9701a4fd7bf2..836a1eb5df43 100644 --- a/arch/x86/um/ldt.c +++ b/arch/x86/um/ldt.c @@ -12,7 +12,10 @@ #include #include -extern int modify_ldt(int func, void *ptr, unsigned long bytecount); +static inline int modify_ldt (int func, void *ptr, unsigned long bytecount) +{ + return syscall(__NR_modify_ldt, func, ptr, bytecount); +} static long write_ldt_entry(struct mm_id *mm_idp, int func, struct user_desc *desc, void **addr, int done) diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 30d12afe52ed..993b7a71386d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -33,6 +33,10 @@ #include #include +#ifdef CONFIG_KEXEC_CORE +#include +#endif + #include #include #include @@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) /* Fast syscall setup is all done in hypercalls, so these are all ignored. Stub them out here to stop Xen console noise. */ + break; default: if (!pmu_msr_write(msr, low, high, &ret)) @@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = { .notifier_call = xen_hvm_cpu_notify, }; +#ifdef CONFIG_KEXEC_CORE +static void xen_hvm_shutdown(void) +{ + native_machine_shutdown(); + if (kexec_in_progress) + xen_reboot(SHUTDOWN_soft_reset); +} + +static void xen_hvm_crash_shutdown(struct pt_regs *regs) +{ + native_machine_crash_shutdown(regs); + xen_reboot(SHUTDOWN_soft_reset); +} +#endif + static void __init xen_hvm_guest_init(void) { if (xen_pv_domain()) @@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void) x86_init.irqs.intr_init = xen_init_IRQ; xen_hvm_init_time_ops(); xen_hvm_init_mmu_ops(); +#ifdef CONFIG_KEXEC_CORE + machine_ops.shutdown = xen_hvm_shutdown; + machine_ops.crash_shutdown = xen_hvm_crash_shutdown; +#endif } #endif diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index bfc08b13044b..660b3cfef234 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -112,6 +112,15 @@ static unsigned long *p2m_identity; static pte_t *p2m_missing_pte; static pte_t *p2m_identity_pte; +/* + * Hint at last populated PFN. + * + * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack + * can avoid scanning the whole P2M (which may be sized to account for + * hotplugged memory). + */ +static unsigned long xen_p2m_last_pfn; + static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); @@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void) else HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(p2m_top_mfn); - HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; HYPERVISOR_shared_info->arch.p2m_generation = 0; HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; HYPERVISOR_shared_info->arch.p2m_cr3 = @@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void) static struct vm_struct vm; unsigned long p2m_limit; + xen_p2m_last_pfn = xen_max_p2m_pfn; + p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), @@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn) free_p2m_page(p2m); } + /* Expanded the p2m? */ + if (pfn > xen_p2m_last_pfn) { + xen_p2m_last_pfn = pfn; + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; + } + return true; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index f5ef6746d47a..1c30e4ab1022 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void) { unsigned long max_pages, limit; domid_t domid = DOMID_SELF; - int ret; + long ret; limit = xen_get_pages_limit(); max_pages = limit; @@ -798,7 +798,7 @@ char * __init xen_memory_setup(void) xen_ignore_unusable(); /* Make sure the Xen-supplied memory map is well-ordered. */ - sanitize_e820_map(xen_e820_map, xen_e820_map_entries, + sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), &xen_e820_map_entries); max_pages = xen_get_max_pages(); diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index 63c223dff5f1..b56855a1382a 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -28,4 +28,5 @@ generic-y += statfs.h generic-y += termios.h generic-y += topology.h generic-y += trace_clock.h +generic-y += word-at-a-time.h generic-y += xor.h diff --git a/block/blk-core.c b/block/blk-core.c index 2eb722d48773..18e92a6645e2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -576,7 +576,7 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); - bdi_destroy(&q->backing_dev_info); + bdi_unregister(&q->backing_dev_info); /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); diff --git a/block/blk-lib.c b/block/blk-lib.c index bd40292e5009..9ebf65379556 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -26,13 +26,6 @@ static void bio_batch_end_io(struct bio *bio) bio_put(bio); } -/* - * Ensure that max discard sectors doesn't overflow bi_size and hopefully - * it is of the proper granularity as long as the granularity is a power - * of two. - */ -#define MAX_BIO_SECTORS ((1U << 31) >> 9) - /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for @@ -50,6 +43,8 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, DECLARE_COMPLETION_ONSTACK(wait); struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; + unsigned int granularity; + int alignment; struct bio_batch bb; struct bio *bio; int ret = 0; @@ -61,6 +56,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (!blk_queue_discard(q)) return -EOPNOTSUPP; + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> 9, 1U); + alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; + if (flags & BLKDEV_DISCARD_SECURE) { if (!blk_queue_secdiscard(q)) return -EOPNOTSUPP; @@ -74,7 +73,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, blk_start_plug(&plug); while (nr_sects) { unsigned int req_sects; - sector_t end_sect; + sector_t end_sect, tmp; bio = bio_alloc(gfp_mask, 1); if (!bio) { @@ -82,8 +81,22 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, break; } - req_sects = min_t(sector_t, nr_sects, MAX_BIO_SECTORS); + /* Make sure bi_size doesn't overflow */ + req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); + + /* + * If splitting a request, and the next starting sector would be + * misaligned, stop the discard at the previous aligned sector. + */ end_sect = sector + req_sects; + tmp = end_sect; + if (req_sects < nr_sects && + sector_div(tmp, granularity) != alignment) { + end_sect = end_sect - alignment; + sector_div(end_sect, granularity); + end_sect = end_sect * granularity + alignment; + req_sects = end_sect - sector; + } bio->bi_iter.bi_sector = sector; bio->bi_end_io = bio_batch_end_io; diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index 1e28ddb656b8..8764c241e5bb 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c @@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu) return cpu; } -int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) +int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask) { unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; @@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; - for_each_online_cpu(i) { + for_each_cpu(i, online_mask) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) @@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues) queue = 0; for_each_possible_cpu(i) { - if (!cpu_online(i)) { + if (!cpumask_test_cpu(i, online_mask)) { map[i] = 0; continue; } @@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) if (!map) return NULL; - if (!blk_mq_update_queue_map(map, set->nr_hw_queues)) + if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) return map; kfree(map); diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c index 279c5d674edf..788fffd9b409 100644 --- a/block/blk-mq-sysfs.c +++ b/block/blk-mq-sysfs.c @@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) unsigned int i, first = 1; ssize_t ret = 0; - blk_mq_disable_hotplug(); - for_each_cpu(i, hctx->cpumask) { if (first) ret += sprintf(ret + page, "%u", i); @@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) first = 0; } - blk_mq_enable_hotplug(); - ret += sprintf(ret + page, "\n"); return ret; } @@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return; hctx_for_each_ctx(hctx, ctx, i) @@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) struct blk_mq_ctx *ctx; int i, ret; - if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP)) + if (!hctx->nr_ctx) return 0; ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num); @@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk) struct blk_mq_ctx *ctx; int i, j; + blk_mq_disable_hotplug(); + queue_for_each_hw_ctx(q, hctx, i) { blk_mq_unregister_hctx(hctx); @@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk) kobject_put(&q->mq_kobj); kobject_put(&disk_to_dev(disk)->kobj); + + q->mq_sysfs_init_done = false; + blk_mq_enable_hotplug(); } static void blk_mq_sysfs_init(struct request_queue *q) @@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk) struct blk_mq_hw_ctx *hctx; int ret, i; + blk_mq_disable_hotplug(); + blk_mq_sysfs_init(q); ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); if (ret < 0) - return ret; + goto out; kobject_uevent(&q->mq_kobj, KOBJ_ADD); queue_for_each_hw_ctx(q, hctx, i) { - hctx->flags |= BLK_MQ_F_SYSFS_UP; ret = blk_mq_register_hctx(hctx); if (ret) break; } - if (ret) { + if (ret) blk_mq_unregister_disk(disk); - return ret; - } + else + q->mq_sysfs_init_done = true; +out: + blk_mq_enable_hotplug(); - return 0; + return ret; } EXPORT_SYMBOL_GPL(blk_mq_register_disk); @@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i; + if (!q->mq_sysfs_init_done) + return; + queue_for_each_hw_ctx(q, hctx, i) blk_mq_unregister_hctx(hctx); } @@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q) struct blk_mq_hw_ctx *hctx; int i, ret = 0; + if (!q->mq_sysfs_init_done) + return ret; + queue_for_each_hw_ctx(q, hctx, i) { ret = blk_mq_register_hctx(hctx); if (ret) diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 9115c6d59948..ec2d11915142 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, } EXPORT_SYMBOL(blk_mq_all_tag_busy_iter); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void *priv) { - struct blk_mq_tags *tags = hctx->tags; + struct blk_mq_hw_ctx *hctx; + int i; + + + queue_for_each_hw_ctx(q, hctx, i) { + struct blk_mq_tags *tags = hctx->tags; + + /* + * If not software queues are currently mapped to this + * hardware queue, there's nothing to check + */ + if (!blk_mq_hw_queue_mapped(hctx)) + continue; + + if (tags->nr_reserved_tags) + bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); + bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, + false); + } - if (tags->nr_reserved_tags) - bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true); - bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv, - false); } -EXPORT_SYMBOL(blk_mq_tag_busy_iter); static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) { @@ -628,6 +641,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) { bt_free(&tags->bitmap_tags); bt_free(&tags->breserved_tags); + free_cpumask_var(tags->cpumask); kfree(tags); } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 9eb2cf4f01cb..d468a79f2c4a 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); +void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, + void *priv); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index f2d67b4047a0..85f014327342 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -393,14 +393,16 @@ void __blk_mq_complete_request(struct request *rq) * Ends all I/O on a request. It does not handle partial completions. * The actual completion happens out-of-order, through a IPI handler. **/ -void blk_mq_complete_request(struct request *rq) +void blk_mq_complete_request(struct request *rq, int error) { struct request_queue *q = rq->q; if (unlikely(blk_should_fake_timeout(q))) return; - if (!blk_mark_rq_complete(rq)) + if (!blk_mark_rq_complete(rq)) { + rq->errors = error; __blk_mq_complete_request(rq); + } } EXPORT_SYMBOL(blk_mq_complete_request); @@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, * If a request wasn't started before the queue was * marked dying, kill it here or it'll go unnoticed. */ - if (unlikely(blk_queue_dying(rq->q))) { - rq->errors = -EIO; - blk_mq_complete_request(rq); - } + if (unlikely(blk_queue_dying(rq->q))) + blk_mq_complete_request(rq, -EIO); return; } if (rq->cmd_flags & REQ_NO_TIMEOUT) @@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv) .next = 0, .next_set = 0, }; - struct blk_mq_hw_ctx *hctx; int i; - queue_for_each_hw_ctx(q, hctx, i) { - /* - * If not software queues are currently mapped to this - * hardware queue, there's nothing to check - */ - if (!blk_mq_hw_queue_mapped(hctx)) - continue; - - blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data); - } + blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); if (data.next_set) { data.next = blk_rq_timeout(round_jiffies_up(data.next)); mod_timer(&q->timeout, data.next); } else { + struct blk_mq_hw_ctx *hctx; + queue_for_each_hw_ctx(q, hctx, i) { /* the hctx may be unmapped, so check it here */ if (blk_mq_hw_queue_mapped(hctx)) @@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q, } } -static void blk_mq_map_swqueue(struct request_queue *q) +static void blk_mq_map_swqueue(struct request_queue *q, + const struct cpumask *online_mask) { unsigned int i; struct blk_mq_hw_ctx *hctx; struct blk_mq_ctx *ctx; struct blk_mq_tag_set *set = q->tag_set; + /* + * Avoid others reading imcomplete hctx->cpumask through sysfs + */ + mutex_lock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { cpumask_clear(hctx->cpumask); hctx->nr_ctx = 0; @@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q) */ queue_for_each_ctx(q, ctx, i) { /* If the cpu isn't online, the cpu is mapped to first hctx */ - if (!cpu_online(i)) + if (!cpumask_test_cpu(i, online_mask)) continue; hctx = q->mq_ops->map_queue(q, i); cpumask_set_cpu(i, hctx->cpumask); - cpumask_set_cpu(i, hctx->tags->cpumask); ctx->index_hw = hctx->nr_ctx; hctx->ctxs[hctx->nr_ctx++] = ctx; } + mutex_unlock(&q->sysfs_lock); + queue_for_each_hw_ctx(q, hctx, i) { struct blk_mq_ctxmap *map = &hctx->ctx_map; @@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q) hctx->next_cpu = cpumask_first(hctx->cpumask); hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; } + + queue_for_each_ctx(q, ctx, i) { + if (!cpumask_test_cpu(i, online_mask)) + continue; + + hctx = q->mq_ops->map_queue(q, i); + cpumask_set_cpu(i, hctx->tags->cpumask); + } } static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set) @@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q) kfree(hctx); } + kfree(q->mq_map); + q->mq_map = NULL; + kfree(q->queue_hw_ctx); /* ctx kobj stays in queue_ctx */ @@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, if (blk_mq_init_hw_queues(q, set)) goto err_hctxs; + get_online_cpus(); mutex_lock(&all_q_mutex); - list_add_tail(&q->all_q_node, &all_q_list); - mutex_unlock(&all_q_mutex); + list_add_tail(&q->all_q_node, &all_q_list); blk_mq_add_queue_tag_set(set, q); + blk_mq_map_swqueue(q, cpu_online_mask); - blk_mq_map_swqueue(q); + mutex_unlock(&all_q_mutex); + put_online_cpus(); return q; @@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q) { struct blk_mq_tag_set *set = q->tag_set; + mutex_lock(&all_q_mutex); + list_del_init(&q->all_q_node); + mutex_unlock(&all_q_mutex); + blk_mq_del_queue_tag_set(q); blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); blk_mq_free_hw_queues(q, set); percpu_ref_exit(&q->mq_usage_counter); - - kfree(q->mq_map); - - q->mq_map = NULL; - - mutex_lock(&all_q_mutex); - list_del_init(&q->all_q_node); - mutex_unlock(&all_q_mutex); } /* Basically redo blk_mq_init_queue with queue frozen */ -static void blk_mq_queue_reinit(struct request_queue *q) +static void blk_mq_queue_reinit(struct request_queue *q, + const struct cpumask *online_mask) { WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); blk_mq_sysfs_unregister(q); - blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues); + blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); /* * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe @@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q) * involves free and re-allocate memory, worthy doing?) */ - blk_mq_map_swqueue(q); + blk_mq_map_swqueue(q, online_mask); blk_mq_sysfs_register(q); } @@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { struct request_queue *q; + int cpu = (unsigned long)hcpu; + /* + * New online cpumask which is going to be set in this hotplug event. + * Declare this cpumasks as global as cpu-hotplug operation is invoked + * one-by-one and dynamically allocating this could result in a failure. + */ + static struct cpumask online_new; /* - * Before new mappings are established, hotadded cpu might already - * start handling requests. This doesn't break anything as we map - * offline CPUs to first hardware queue. We will re-init the queue - * below to get optimal settings. + * Before hotadded cpu starts handling requests, new mappings must + * be established. Otherwise, these requests in hw queue might + * never be dispatched. + * + * For example, there is a single hw queue (hctx) and two CPU queues + * (ctx0 for CPU0, and ctx1 for CPU1). + * + * Now CPU1 is just onlined and a request is inserted into + * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is + * still zero. + * + * And then while running hw queue, flush_busy_ctxs() finds bit0 is + * set in pending bitmap and tries to retrieve requests in + * hctx->ctxs[0]->rq_list. But htx->ctxs[0] is a pointer to ctx0, + * so the request in ctx1->rq_list is ignored. */ - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN && - action != CPU_ONLINE && action != CPU_ONLINE_FROZEN) + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_DEAD: + case CPU_UP_CANCELED: + cpumask_copy(&online_new, cpu_online_mask); + break; + case CPU_UP_PREPARE: + cpumask_copy(&online_new, cpu_online_mask); + cpumask_set_cpu(cpu, &online_new); + break; + default: return NOTIFY_OK; + } mutex_lock(&all_q_mutex); @@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, } list_for_each_entry(q, &all_q_list, all_q_node) - blk_mq_queue_reinit(q); + blk_mq_queue_reinit(q, &online_new); list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_unfreeze_queue(q); @@ -2260,10 +2296,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) int i; for (i = 0; i < set->nr_hw_queues; i++) { - if (set->tags[i]) { + if (set->tags[i]) blk_mq_free_rq_map(set, set->tags[i], i); - free_cpumask_var(set->tags[i]->cpumask); - } } kfree(set->tags); diff --git a/block/blk-mq.h b/block/blk-mq.h index 6a48c4c0d8a2..f4fea7964910 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -51,7 +51,8 @@ void blk_mq_disable_hotplug(void); * CPU -> queue mappings */ extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); -extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues); +extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, + const struct cpumask *online_mask); extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); /* diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 3e44a9da2a13..07b42f5ad797 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -540,6 +540,7 @@ static void blk_release_queue(struct kobject *kobj) struct request_queue *q = container_of(kobj, struct request_queue, kobj); + bdi_exit(&q->backing_dev_info); blkcg_exit_queue(q); if (q->elevator) { diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index b788f169cc98..b4ffc5be1a93 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c @@ -706,7 +706,7 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, err: if (err != -EAGAIN) break; - if (signal_pending(current)) { + if (fatal_signal_pending(current)) { err = -EINTR; break; } diff --git a/crypto/ahash.c b/crypto/ahash.c index 8acb886032ae..9c1dc8d6106a 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg) struct crypto_alg *base = &alg->halg.base; if (alg->halg.digestsize > PAGE_SIZE / 8 || - alg->halg.statesize > PAGE_SIZE / 8) + alg->halg.statesize > PAGE_SIZE / 8 || + alg->halg.statesize == 0) return -EINVAL; base->cra_type = &crypto_ahash_type; diff --git a/crypto/algapi.c b/crypto/algapi.c index d130b41dbaea..59bf491fe3d8 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -345,7 +345,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval) crypto_alg_tested(larval->alg.cra_driver_name, 0); } - err = wait_for_completion_interruptible(&larval->completion); + err = wait_for_completion_killable(&larval->completion); WARN_ON(err); out: diff --git a/crypto/api.c b/crypto/api.c index afe4610afc4b..bbc147cb5dec 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -172,7 +172,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) struct crypto_larval *larval = (void *)alg; long timeout; - timeout = wait_for_completion_interruptible_timeout( + timeout = wait_for_completion_killable_timeout( &larval->completion, 60 * HZ); alg = larval->adult; @@ -445,7 +445,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask) err: if (err != -EAGAIN) break; - if (signal_pending(current)) { + if (fatal_signal_pending(current)) { err = -EINTR; break; } @@ -562,7 +562,7 @@ void *crypto_alloc_tfm(const char *alg_name, err: if (err != -EAGAIN) break; - if (signal_pending(current)) { + if (fatal_signal_pending(current)) { err = -EINTR; break; } diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6d88dd15c98d..197096632412 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -332,10 +332,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) srlen = cert->raw_serial_size; q = cert->raw_serial; } - if (srlen > 1 && *q == 0) { - srlen--; - q++; - } ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index d94d99ffe8b9..237f3795cfaa 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -375,7 +375,7 @@ static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type, err = PTR_ERR(alg); if (err != -EAGAIN) break; - if (signal_pending(current)) { + if (fatal_signal_pending(current)) { err = -EINTR; break; } diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 09f37b516808..4dde37c3d8fc 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -61,6 +61,7 @@ ACPI_GLOBAL(struct acpi_table_header, acpi_gbl_original_dsdt_header); ACPI_INIT_GLOBAL(u32, acpi_gbl_dsdt_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_facs_index, ACPI_INVALID_TABLE_INDEX); ACPI_INIT_GLOBAL(u32, acpi_gbl_xfacs_index, ACPI_INVALID_TABLE_INDEX); +ACPI_INIT_GLOBAL(u32, acpi_gbl_fadt_index, ACPI_INVALID_TABLE_INDEX); #if (!ACPI_REDUCED_HARDWARE) ACPI_GLOBAL(struct acpi_table_facs *, acpi_gbl_FACS); diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h index f7731f260c31..591ea95319e2 100644 --- a/drivers/acpi/acpica/actables.h +++ b/drivers/acpi/acpica/actables.h @@ -85,7 +85,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded); /* * tbfadt - FADT parse/convert/validate */ -void acpi_tb_parse_fadt(u32 table_index); +void acpi_tb_parse_fadt(void); void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length); @@ -138,8 +138,6 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id); */ acpi_status acpi_tb_initialize_facs(void); -u8 acpi_tb_tables_loaded(void); - void acpi_tb_print_table_header(acpi_physical_address address, struct acpi_table_header *header); diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index faad911d46b5..10ce48e16ebf 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -71,7 +71,7 @@ acpi_status acpi_enable(void) /* ACPI tables must be present */ - if (!acpi_tb_tables_loaded()) { + if (acpi_gbl_fadt_index == ACPI_INVALID_TABLE_INDEX) { return_ACPI_STATUS(AE_NO_ACPI_TABLES); } diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 455a0700db39..a6454f4a6fb3 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -298,7 +298,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * * FUNCTION: acpi_tb_parse_fadt * - * PARAMETERS: table_index - Index for the FADT + * PARAMETERS: None * * RETURN: None * @@ -307,7 +307,7 @@ acpi_tb_select_address(char *register_name, u32 address32, u64 address64) * ******************************************************************************/ -void acpi_tb_parse_fadt(u32 table_index) +void acpi_tb_parse_fadt(void) { u32 length; struct acpi_table_header *table; @@ -319,11 +319,11 @@ void acpi_tb_parse_fadt(u32 table_index) * Get a local copy of the FADT and convert it to a common format * Map entire FADT, assumed to be smaller than one page. */ - length = acpi_gbl_root_table_list.tables[table_index].length; + length = acpi_gbl_root_table_list.tables[acpi_gbl_fadt_index].length; table = - acpi_os_map_memory(acpi_gbl_root_table_list.tables[table_index]. - address, length); + acpi_os_map_memory(acpi_gbl_root_table_list. + tables[acpi_gbl_fadt_index].address, length); if (!table) { return; } diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 4337990127cc..d8ddef38c947 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -97,29 +97,6 @@ acpi_status acpi_tb_initialize_facs(void) } #endif /* !ACPI_REDUCED_HARDWARE */ -/******************************************************************************* - * - * FUNCTION: acpi_tb_tables_loaded - * - * PARAMETERS: None - * - * RETURN: TRUE if required ACPI tables are loaded - * - * DESCRIPTION: Determine if the minimum required ACPI tables are present - * (FADT, FACS, DSDT) - * - ******************************************************************************/ - -u8 acpi_tb_tables_loaded(void) -{ - - if (acpi_gbl_root_table_list.current_table_count >= 4) { - return (TRUE); - } - - return (FALSE); -} - /******************************************************************************* * * FUNCTION: acpi_tb_check_dsdt_header @@ -392,7 +369,8 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) ACPI_COMPARE_NAME(&acpi_gbl_root_table_list. tables[table_index].signature, ACPI_SIG_FADT)) { - acpi_tb_parse_fadt(table_index); + acpi_gbl_fadt_index = table_index; + acpi_tb_parse_fadt(); } next_table: diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 2614a839c60d..42c66b64c12c 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) goto err_exit; mutex_lock(&ec->mutex); + result = -ENODATA; list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { + result = 0; q->handler = acpi_ec_get_query_handler(handler); ec_dbg_evt("Query(0x%02x) scheduled", q->handler->query_bit); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 6da0f9beab19..c9336751e5e3 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev) /* Interrupt Line values above 0xF are forbidden */ if (dev->irq > 0 && (dev->irq <= 0xF) && + acpi_isa_irq_available(dev->irq) && (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) { dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n", pin_name(dev->pin), dev->irq); diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 3b4ea98e3ea0..7c8408b946ca 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void) PIRQ_PENALTY_PCI_POSSIBLE; } } - /* Add a penalty for the SCI */ - acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING; + return 0; } @@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link) irq = link->irq.possible[i]; } } + if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) { + printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " + "Try pci=noacpi or acpi=off\n", + acpi_device_name(link->device), + acpi_device_bid(link->device)); + return -ENODEV; + } /* Attempt to enable the link device at this IRQ. */ if (acpi_pci_link_set(link, irq)) { @@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active) } } +bool acpi_isa_irq_available(int irq) +{ + return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) || + acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS); +} + /* * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 950fff9ce453..a12ff9863d7e 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -187,7 +187,7 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, * global one. Requires architecture specific dev_get_cma_area() helper * function. */ -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int align) { if (align > CONFIG_CMA_ALIGNMENT) diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 2a4154a09e4d..85e17bacc834 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c @@ -77,13 +77,16 @@ static bool default_stop_ok(struct device *dev) dev_update_qos_constraint); if (constraint_ns > 0) { - constraint_ns -= td->start_latency_ns; + constraint_ns -= td->save_state_latency_ns + + td->stop_latency_ns + + td->start_latency_ns + + td->restore_state_latency_ns; if (constraint_ns == 0) return false; } td->effective_constraint_ns = constraint_ns; - td->cached_stop_ok = constraint_ns > td->stop_latency_ns || - constraint_ns == 0; + td->cached_stop_ok = constraint_ns >= 0; + /* * The children have been suspended already, so we don't need to take * their stop latencies into account here. @@ -126,18 +129,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) off_on_time_ns = genpd->power_off_latency_ns + genpd->power_on_latency_ns; - /* - * It doesn't make sense to remove power from the domain if saving - * the state of all devices in it and the power off/power on operations - * take too much time. - * - * All devices in this domain have been stopped already at this point. - */ - list_for_each_entry(pdd, &genpd->dev_list, list_node) { - if (pdd->dev->driver) - off_on_time_ns += - to_gpd_data(pdd)->td.save_state_latency_ns; - } min_off_time_ns = -1; /* @@ -193,7 +184,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) * constraint_ns cannot be negative here, because the device has * been suspended. */ - constraint_ns -= td->restore_state_latency_ns; if (constraint_ns <= off_on_time_ns) return false; diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 28cd75c535b0..7ae7cd990fbf 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev) u32 microvolt[3] = {0}; int count, ret; - count = of_property_count_u32_elems(opp->np, "opp-microvolt"); - if (!count) + /* Missing property isn't a problem, but an invalid entry is */ + if (!of_find_property(opp->np, "opp-microvolt", NULL)) return 0; + count = of_property_count_u32_elems(opp->np, "opp-microvolt"); + if (count < 0) { + dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n", + __func__, count); + return count; + } + /* There can be one or three elements here */ if (count != 1 && count != 3) { dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n", @@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); * share a common logic which is isolated here. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. * * Locking: The internal device_opp and opp structures are RCU protected. @@ -1151,7 +1158,7 @@ unlock: * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_enable(struct device *dev, unsigned long freq) @@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); * mutex locking or synchronize_rcu() blocking calls cannot be used. * * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the - * copy operation, returns 0 if no modifcation was done OR modification was + * copy operation, returns 0 if no modification was done OR modification was * successful. */ int dev_pm_opp_disable(struct device *dev, unsigned long freq) diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index f42f2bac6466..4c55cfbad19e 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock); /* Calculate the length of a fixed format */ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) { - snprintf(buf, buf_size, "%x", max_val); - return strlen(buf); + return snprintf(NULL, 0, "%x", max_val); } static ssize_t regmap_name_read_file(struct file *file, @@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file, /* If we're in the region the user is trying to read */ if (p >= *ppos) { /* ...but not beyond it */ - if (buf_pos >= count - 1 - tot_len) + if (buf_pos + tot_len + 1 >= count) break; /* Format the register */ diff --git a/drivers/block/loop.c b/drivers/block/loop.c index f9889b6bc02c..674f800a3b57 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd) { const bool write = cmd->rq->cmd_flags & REQ_WRITE; struct loop_device *lo = cmd->rq->q->queuedata; - int ret = -EIO; + int ret = 0; - if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) + if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { + ret = -EIO; goto failed; + } ret = do_req_filebacked(lo, cmd->rq); - failed: - if (ret) - cmd->rq->errors = -EIO; - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, ret ? -EIO : 0); } static void loop_queue_write_work(struct work_struct *work) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 293495a75d3d..1b87623381e2 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -60,6 +60,7 @@ struct nbd_device { bool disconnect; /* a disconnect has been requested by user */ struct timer_list timeout_timer; + spinlock_t tasks_lock; struct task_struct *task_recv; struct task_struct *task_send; @@ -140,21 +141,23 @@ static void sock_shutdown(struct nbd_device *nbd) static void nbd_xmit_timeout(unsigned long arg) { struct nbd_device *nbd = (struct nbd_device *)arg; - struct task_struct *task; + unsigned long flags; if (list_empty(&nbd->queue_head)) return; nbd->disconnect = true; - task = READ_ONCE(nbd->task_recv); - if (task) - force_sig(SIGKILL, task); + spin_lock_irqsave(&nbd->tasks_lock, flags); + + if (nbd->task_recv) + force_sig(SIGKILL, nbd->task_recv); - task = READ_ONCE(nbd->task_send); - if (task) + if (nbd->task_send) force_sig(SIGKILL, nbd->task_send); + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + dev_err(nbd_to_dev(nbd), "Connection timed out, killed receiver and sender, shutting down connection\n"); } @@ -403,17 +406,24 @@ static int nbd_thread_recv(struct nbd_device *nbd) { struct request *req; int ret; + unsigned long flags; BUG_ON(nbd->magic != NBD_MAGIC); sk_set_memalloc(nbd->sock->sk); + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_recv = current; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); + + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_recv = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + return ret; } @@ -429,7 +439,9 @@ static int nbd_thread_recv(struct nbd_device *nbd) device_remove_file(disk_to_dev(nbd->disk), &pid_attr); + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_recv = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); if (signal_pending(current)) { siginfo_t info; @@ -534,8 +546,11 @@ static int nbd_thread_send(void *data) { struct nbd_device *nbd = data; struct request *req; + unsigned long flags; + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_send = current; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); set_user_nice(current, MIN_NICE); while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { @@ -572,7 +587,15 @@ static int nbd_thread_send(void *data) nbd_handle_req(nbd, req); } + spin_lock_irqsave(&nbd->tasks_lock, flags); nbd->task_send = NULL; + spin_unlock_irqrestore(&nbd->tasks_lock, flags); + + /* Clear maybe pending signals */ + if (signal_pending(current)) { + siginfo_t info; + dequeue_signal_lock(current, ¤t->blocked, &info); + } return 0; } @@ -1052,6 +1075,7 @@ static int __init nbd_init(void) nbd_dev[i].magic = NBD_MAGIC; INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); spin_lock_init(&nbd_dev[i].queue_lock); + spin_lock_init(&nbd_dev[i].tasks_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); mutex_init(&nbd_dev[i].tx_lock); init_timer(&nbd_dev[i].timeout_timer); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a295b98c6bae..1c9e4fe5aa44 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (queue_mode) { case NULL_Q_MQ: - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq, cmd->rq->errors); break; case NULL_Q_RQ: blk_complete_request(cmd->rq); diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b97fc3fe0916..ccc0c1f93daa 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -603,31 +603,34 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, struct nvme_iod *iod = ctx; struct request *req = iod_get_private(iod); struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); - u16 status = le16_to_cpup(&cqe->status) >> 1; + bool requeue = false; + int error = 0; if (unlikely(status)) { if (!(status & NVME_SC_DNR || blk_noretry_request(req)) && (jiffies - req->start_time) < req->timeout) { unsigned long flags; + requeue = true; blk_mq_requeue_request(req); spin_lock_irqsave(req->q->queue_lock, flags); if (!blk_queue_stopped(req->q)) blk_mq_kick_requeue_list(req->q); spin_unlock_irqrestore(req->q->queue_lock, flags); - return; + goto release_iod; } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { if (cmd_rq->ctx == CMD_CTX_CANCELLED) - req->errors = -EINTR; + error = -EINTR; else - req->errors = status; + error = status; } else { - req->errors = nvme_error_status(status); + error = nvme_error_status(status); } - } else - req->errors = 0; + } + if (req->cmd_type == REQ_TYPE_DRV_PRIV) { u32 result = le32_to_cpup(&cqe->result); req->special = (void *)(uintptr_t)result; @@ -636,8 +639,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, if (cmd_rq->aborted) dev_warn(nvmeq->dev->dev, "completing aborted command with status:%04x\n", - status); + error); +release_iod: if (iod->nents) { dma_unmap_sg(nvmeq->dev->dev, iod->sg, iod->nents, rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); @@ -650,7 +654,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, } nvme_free_iod(nvmeq->dev, iod); - blk_mq_complete_request(req); + if (likely(!requeue)) + blk_mq_complete_request(req, error); } /* length is in bytes. gfp flags indicates whether we may sleep. */ @@ -863,8 +868,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (ns && ns->ms && !blk_integrity_rq(req)) { if (!(ns->pi_type && ns->ms == 8) && req->cmd_type != REQ_TYPE_DRV_PRIV) { - req->errors = -EFAULT; - blk_mq_complete_request(req); + blk_mq_complete_request(req, -EFAULT); return BLK_MQ_RQ_QUEUE_OK; } } @@ -1806,7 +1810,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) length = (io.nblocks + 1) << ns->lba_shift; meta_len = (io.nblocks + 1) * ns->ms; - metadata = (void __user *)(unsigned long)io.metadata; + metadata = (void __user *)(uintptr_t)io.metadata; write = io.opcode & 1; if (ns->ext) { @@ -1846,7 +1850,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.metadata = cpu_to_le64(meta_dma); status = __nvme_submit_sync_cmd(ns->queue, &c, NULL, - (void __user *)io.addr, length, NULL, 0); + (void __user *)(uintptr_t)io.addr, length, NULL, 0); unmap: if (meta) { if (status == NVME_SC_SUCCESS && !write) { @@ -1888,7 +1892,7 @@ static int nvme_user_cmd(struct nvme_dev *dev, struct nvme_ns *ns, timeout = msecs_to_jiffies(cmd.timeout_ms); status = __nvme_submit_sync_cmd(ns ? ns->queue : dev->admin_q, &c, - NULL, (void __user *)cmd.addr, cmd.data_len, + NULL, (void __user *)(uintptr_t)cmd.addr, cmd.data_len, &cmd.result, timeout); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -2439,6 +2443,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn) list_sort(NULL, &dev->namespaces, ns_cmp); } +static void nvme_set_irq_hints(struct nvme_dev *dev) +{ + struct nvme_queue *nvmeq; + int i; + + for (i = 0; i < dev->online_queues; i++) { + nvmeq = dev->queues[i]; + + if (!nvmeq->tags || !(*nvmeq->tags)) + continue; + + irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, + blk_mq_tags_cpumask(*nvmeq->tags)); + } +} + static void nvme_dev_scan(struct work_struct *work) { struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work); @@ -2450,6 +2470,7 @@ static void nvme_dev_scan(struct work_struct *work) return; nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn)); kfree(ctrl); + nvme_set_irq_hints(dev); } /* @@ -2953,22 +2974,6 @@ static const struct file_operations nvme_dev_fops = { .compat_ioctl = nvme_dev_ioctl, }; -static void nvme_set_irq_hints(struct nvme_dev *dev) -{ - struct nvme_queue *nvmeq; - int i; - - for (i = 0; i < dev->online_queues; i++) { - nvmeq = dev->queues[i]; - - if (!nvmeq->tags || !(*nvmeq->tags)) - continue; - - irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector, - blk_mq_tags_cpumask(*nvmeq->tags)); - } -} - static int nvme_dev_start(struct nvme_dev *dev) { int result; @@ -3010,8 +3015,6 @@ static int nvme_dev_start(struct nvme_dev *dev) if (result) goto free_tags; - nvme_set_irq_hints(dev); - dev->event_limit = 1; return result; @@ -3062,7 +3065,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); - nvme_set_irq_hints(dev); } return 0; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d93a0372b37b..6f26cf38c6f9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v) #define RBD_MINORS_PER_MAJOR 256 #define RBD_SINGLE_MAJOR_PART_SHIFT 4 +#define RBD_MAX_PARENT_CHAIN_LEN 16 + #define RBD_SNAP_DEV_NAME_PREFIX "snap_" #define RBD_MAX_SNAP_NAME_LEN \ (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1)) @@ -426,7 +428,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf, size_t count); -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth); static void rbd_spec_put(struct rbd_spec *spec); static int rbd_dev_id_to_minor(int dev_id) @@ -1863,9 +1865,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_osd_read_callback(obj_request); break; case CEPH_OSD_OP_SETALLOCHINT: - rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE); + rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE || + osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL); /* fall through */ case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: rbd_osd_write_callback(obj_request); break; case CEPH_OSD_OP_STAT: @@ -2401,7 +2405,10 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, opcode = CEPH_OSD_OP_ZERO; } } else if (op_type == OBJ_OP_WRITE) { - opcode = CEPH_OSD_OP_WRITE; + if (!offset && length == object_size) + opcode = CEPH_OSD_OP_WRITEFULL; + else + opcode = CEPH_OSD_OP_WRITE; osd_req_op_alloc_hint_init(osd_request, num_ops, object_size, object_size); num_ops++; @@ -3760,6 +3767,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) /* set io sizes to object size */ segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + q->limits.max_sectors = queue_max_hw_sectors(q); blk_queue_max_segments(q, segment_size / SECTOR_SIZE); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); @@ -5125,44 +5133,51 @@ out_err: return ret; } -static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) +/* + * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() -> + * rbd_dev_image_probe() recursion depth, which means it's also the + * length of the already discovered part of the parent chain. + */ +static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth) { struct rbd_device *parent = NULL; - struct rbd_spec *parent_spec; - struct rbd_client *rbdc; int ret; if (!rbd_dev->parent_spec) return 0; - /* - * We need to pass a reference to the client and the parent - * spec when creating the parent rbd_dev. Images related by - * parent/child relationships always share both. - */ - parent_spec = rbd_spec_get(rbd_dev->parent_spec); - rbdc = __rbd_get_client(rbd_dev->rbd_client); - ret = -ENOMEM; - parent = rbd_dev_create(rbdc, parent_spec, NULL); - if (!parent) + if (++depth > RBD_MAX_PARENT_CHAIN_LEN) { + pr_info("parent chain is too long (%d)\n", depth); + ret = -EINVAL; goto out_err; + } - ret = rbd_dev_image_probe(parent, false); + parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec, + NULL); + if (!parent) { + ret = -ENOMEM; + goto out_err; + } + + /* + * Images related by parent/child relationships always share + * rbd_client and spec/parent_spec, so bump their refcounts. + */ + __rbd_get_client(rbd_dev->rbd_client); + rbd_spec_get(rbd_dev->parent_spec); + + ret = rbd_dev_image_probe(parent, depth); if (ret < 0) goto out_err; + rbd_dev->parent = parent; atomic_set(&rbd_dev->parent_ref, 1); - return 0; + out_err: - if (parent) { - rbd_dev_unparent(rbd_dev); + rbd_dev_unparent(rbd_dev); + if (parent) rbd_dev_destroy(parent); - } else { - rbd_put_client(rbdc); - rbd_spec_put(parent_spec); - } - return ret; } @@ -5280,7 +5295,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) * parent), initiate a watch on its header object before using that * object to get detailed information about the rbd image. */ -static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth) { int ret; @@ -5298,7 +5313,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) if (ret) goto err_out_format; - if (mapping) { + if (!depth) { ret = rbd_dev_header_watch_sync(rbd_dev); if (ret) { if (ret == -ENOENT) @@ -5319,7 +5334,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) * Otherwise this is a parent image, identified by pool, image * and snap ids - need to fill in names for those ids. */ - if (mapping) + if (!depth) ret = rbd_spec_fill_snap_id(rbd_dev); else ret = rbd_spec_fill_names(rbd_dev); @@ -5341,12 +5356,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) * Need to warn users if this image is the one being * mapped and has a parent. */ - if (mapping && rbd_dev->parent_spec) + if (!depth && rbd_dev->parent_spec) rbd_warn(rbd_dev, "WARNING: kernel layering is EXPERIMENTAL!"); } - ret = rbd_dev_probe_parent(rbd_dev); + ret = rbd_dev_probe_parent(rbd_dev, depth); if (ret) goto err_out_probe; @@ -5357,7 +5372,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: - if (mapping) + if (!depth) rbd_dev_header_unwatch_sync(rbd_dev); out_header_name: kfree(rbd_dev->header_name); @@ -5420,7 +5435,7 @@ static ssize_t do_rbd_add(struct bus_type *bus, spec = NULL; /* rbd_dev now owns this */ rbd_opts = NULL; /* rbd_dev now owns this */ - rc = rbd_dev_image_probe(rbd_dev, true); + rc = rbd_dev_image_probe(rbd_dev, 0); if (rc < 0) goto err_out_rbd_dev; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index e93899cc6f60..6ca35495a5be 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq) do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { - blk_mq_complete_request(vbr->req); + blk_mq_complete_request(vbr->req, vbr->req->errors); req_done = true; } if (unlikely(virtqueue_is_broken(vq))) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index deb3f001791f..767657565de6 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref, static int xen_blkif_disconnect(struct xen_blkif *blkif) { + struct pending_req *req, *n; + int i = 0, j; + if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); wake_up(&blkif->shutdown_wq); @@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) /* Remove all persistent grants and the cache of ballooned pages. */ xen_blkbk_free_caches(blkif); + /* Check that there is no request in use */ + list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { + list_del(&req->free_list); + + for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) + kfree(req->segments[j]); + + for (j = 0; j < MAX_INDIRECT_PAGES; j++) + kfree(req->indirect_pages[j]); + + kfree(req); + i++; + } + + WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); + blkif->nr_ring_pages = 0; + return 0; } static void xen_blkif_free(struct xen_blkif *blkif) { - struct pending_req *req, *n; - int i = 0, j; xen_blkif_disconnect(blkif); xen_vbd_free(&blkif->vbd); @@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif) BUG_ON(!list_empty(&blkif->free_pages)); BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts)); - /* Check that there is no request in use */ - list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) { - list_del(&req->free_list); - - for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) - kfree(req->segments[j]); - - for (j = 0; j < MAX_INDIRECT_PAGES; j++) - kfree(req->indirect_pages[j]); - - kfree(req); - i++; - } - - WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - kmem_cache_free(xen_blkif_cachep, blkif); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 0823a96902f8..a69c02dadec0 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) RING_IDX i, rp; unsigned long flags; struct blkfront_info *info = (struct blkfront_info *)dev_id; + int error; spin_lock_irqsave(&info->io_lock, flags); @@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) continue; } - req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; + error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; queue_flag_clear(QUEUE_FLAG_DISCARD, rq); queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq); } - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { printk(KERN_WARNING "blkfront: %s: %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", info->gd->disk_name, op_name(bret->operation)); - req->errors = -EOPNOTSUPP; + error = -EOPNOTSUPP; } - if (unlikely(req->errors)) { - if (req->errors == -EOPNOTSUPP) - req->errors = 0; + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " "request: %x\n", bret->status); - blk_mq_complete_request(req); + blk_mq_complete_request(req, error); break; default: BUG(); @@ -1955,7 +1956,8 @@ static void blkback_changed(struct xenbus_device *dev, break; /* Missed the backend's Closing state -- fallthrough */ case XenbusStateClosing: - blkfront_closing(info); + if (info) + blkfront_closing(info); break; } } diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1a82f3a17681..0ebca8ba7bc4 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -36,7 +36,6 @@ config ARM_CCI400_PORT_CTRL config ARM_CCI500_PMU bool "ARM CCI500 PMU support" - default y depends on (ARM && CPU_V7) || ARM64 depends on PERF_EVENTS select ARM_CCI_PMU diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 7d9879e166cf..7082c7268845 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -1184,11 +1184,12 @@ static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) break; target = cpumask_any_but(cpu_online_mask, cpu); - if (target < 0) + if (target >= nr_cpu_ids) break; perf_pmu_migrate_context(&dt->pmu, cpu, target); cpumask_set_cpu(target, &dt->cpu); - WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); + if (ccn->irq) + WARN_ON(irq_set_affinity(ccn->irq, &dt->cpu) != 0); default: break; } diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index c0eaf0973bd2..779b6ff0c7ad 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -333,7 +333,8 @@ int clk_add_alias(const char *alias, const char *alias_dev_name, if (IS_ERR(r)) return PTR_ERR(r); - l = clkdev_create(r, alias, "%s", alias_dev_name); + l = clkdev_create(r, alias, alias_dev_name ? "%s" : NULL, + alias_dev_name); clk_put(r); return l ? 0 : -ENODEV; diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c index 5837eb8a212f..85da8b983256 100644 --- a/drivers/clk/mvebu/clk-cpu.c +++ b/drivers/clk/mvebu/clk-cpu.c @@ -197,6 +197,7 @@ static void __init of_cpu_clk_setup(struct device_node *node) for_each_node_by_type(dn, "cpu") { struct clk_init_data init; struct clk *clk; + struct clk *parent_clk; char *clk_name = kzalloc(5, GFP_KERNEL); int cpu, err; @@ -208,8 +209,9 @@ static void __init of_cpu_clk_setup(struct device_node *node) goto bail_out; sprintf(clk_name, "cpu%d", cpu); + parent_clk = of_clk_get(node, 0); - cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0); + cpuclk[cpu].parent_name = __clk_get_name(parent_clk); cpuclk[cpu].clk_name = clk_name; cpuclk[cpu].cpu = cpu; cpuclk[cpu].reg_base = clock_complex_base; diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c index 7c1e1f58e2da..2fe37f708dc7 100644 --- a/drivers/clk/samsung/clk-cpu.c +++ b/drivers/clk/samsung/clk-cpu.c @@ -164,7 +164,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, * the values for DIV_COPY and DIV_HPM dividers need not be set. */ div0 = cfg_data->div0; - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { div1 = cfg_data->div1; if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) div1 = readl(base + E4210_DIV_CPU1) & @@ -185,7 +185,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; WARN_ON(alt_div >= MAX_DIV); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { /* * In Exynos4210, ATB clock parent is also mout_core. So * ATB clock also needs to be mantained at safe speed. @@ -206,7 +206,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, writel(div0, base + E4210_DIV_CPU0); wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { writel(div1, base + E4210_DIV_CPU1); wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, DIV_MASK_ALL); @@ -225,7 +225,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, unsigned long mux_reg; /* find out the divider values to use for clock data */ - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { while ((cfg_data->prate * 1000) != ndata->new_rate) { if (cfg_data->prate == 0) return -EINVAL; @@ -240,7 +240,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); div_mask |= E4210_DIV0_ATB_MASK; } diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 676ee8f6d813..8831e1a05367 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -374,7 +374,6 @@ static struct ti_dt_clk omap3xxx_clks[] = { DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), DT_CLK(NULL, "uart3_ick", "uart3_ick"), - DT_CLK(NULL, "uart4_ick", "uart4_ick"), DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), @@ -519,6 +518,7 @@ static struct ti_dt_clk am35xx_clks[] = { static struct ti_dt_clk omap36xx_clks[] = { DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), DT_CLK(NULL, "uart4_fck", "uart4_fck"), + DT_CLK(NULL, "uart4_ick", "uart4_ick"), { .node_name = NULL }, }; diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 9b5b289e6334..a911d7de3377 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -18,7 +18,6 @@ #include "clock.h" -#define DRA7_DPLL_ABE_DEFFREQ 180633600 #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 #define DRA7_DPLL_USB_DEFFREQ 960000000 @@ -313,27 +312,12 @@ static struct ti_dt_clk dra7xx_clks[] = { int __init dra7xx_dt_clk_init(void) { int rc; - struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck; + struct clk *dpll_ck, *hdcp_ck; ti_dt_clocks_register(dra7xx_clks); omap2_clk_disable_autoidle_all(); - abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux"); - sys_clkin2 = clk_get_sys(NULL, "sys_clkin2"); - dpll_ck = clk_get_sys(NULL, "dpll_abe_ck"); - - rc = clk_set_parent(abe_dpll_mux, sys_clkin2); - if (!rc) - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ); - if (rc) - pr_err("%s: failed to configure ABE DPLL!\n", __func__); - - dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck"); - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2); - if (rc) - pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__); - dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ); if (rc) diff --git a/drivers/clk/ti/clkt_dflt.c b/drivers/clk/ti/clkt_dflt.c index 90d7d8a21c49..1ddc288fce4e 100644 --- a/drivers/clk/ti/clkt_dflt.c +++ b/drivers/clk/ti/clkt_dflt.c @@ -222,7 +222,7 @@ int omap2_dflt_clk_enable(struct clk_hw *hw) } } - if (unlikely(!clk->enable_reg)) { + if (unlikely(IS_ERR(clk->enable_reg))) { pr_err("%s: %s missing enable_reg\n", __func__, clk_hw_get_name(hw)); ret = -EINVAL; @@ -264,7 +264,7 @@ void omap2_dflt_clk_disable(struct clk_hw *hw) u32 v; clk = to_clk_hw_omap(hw); - if (!clk->enable_reg) { + if (IS_ERR(clk->enable_reg)) { /* * 'independent' here refers to a clock which is not * controlled by its parent. diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index bb2c2b050964..d3c1742ded1a 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np) bc_timer.freq = clk_get_rate(timer_clk); irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); return; } diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c index edacf3902e10..1cea08cf603e 100644 --- a/drivers/clocksource/timer-keystone.c +++ b/drivers/clocksource/timer-keystone.c @@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np) int irq, error; irq = irq_of_parse_and_map(np, 0); - if (irq == NO_IRQ) { + if (!irq) { pr_err("%s: failed to map interrupts\n", __func__); return; } diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 798277227de7..cec1ee2d2f74 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -149,6 +149,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) { struct acpi_cpufreq_data *data = policy->driver_data; + if (unlikely(!data)) + return -ENODEV; + return cpufreq_show_cpus(data->freqdomain_cpus, buf); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ef5ed9470de9..25c4c15103a0 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1436,8 +1436,10 @@ static void cpufreq_offline_finish(unsigned int cpu) * since this is a core component, and is essential for the * subsequent light-weight ->init() to succeed. */ - if (cpufreq_driver->exit) + if (cpufreq_driver->exit) { cpufreq_driver->exit(policy); + policy->freq_table = NULL; + } } /** diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3af9dd7332e6..aa33b92b3e3e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -776,6 +776,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu) local_irq_save(flags); rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); + if (cpu->prev_mperf == mperf) { + local_irq_restore(flags); + return; + } + tsc = rdtsc(); local_irq_restore(flags); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 3927ed9fdbd5..ca848cc6a8fd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -492,7 +492,7 @@ struct devfreq *devfreq_add_device(struct device *dev, if (err) { put_device(&devfreq->dev); mutex_unlock(&devfreq->lock); - goto err_dev; + goto err_out; } mutex_unlock(&devfreq->lock); @@ -518,7 +518,6 @@ struct devfreq *devfreq_add_device(struct device *dev, err_init: list_del(&devfreq->node); device_unregister(&devfreq->dev); -err_dev: kfree(devfreq); err_out: return ERR_PTR(err); @@ -795,8 +794,10 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, ret = PTR_ERR(governor); goto out; } - if (df->governor == governor) + if (df->governor == governor) { + ret = 0; goto out; + } if (df->governor) { ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index a165b4bfd330..dd24375b76dd 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan, return desc; } +void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) +{ + memset(&desc->lld, 0, sizeof(desc->lld)); + INIT_LIST_HEAD(&desc->descs_list); + desc->direction = DMA_TRANS_NONE; + desc->xfer_size = 0; + desc->active_xfer = false; +} + /* Call must be protected by lock. */ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) { @@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan) desc = list_first_entry(&atchan->free_descs_list, struct at_xdmac_desc, desc_node); list_del(&desc->desc_node); - desc->active_xfer = false; + at_xdmac_init_used_desc(desc); } return desc; @@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, if (xt->src_inc) { if (xt->src_sgl) - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM; } if (xt->dst_inc) { if (xt->dst_sgl) - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM; + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM; else chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM; } diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 3ff284c8e3d5..09479d4be4db 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) mutex_lock(&dma_list_mutex); if (chan->client_count == 0) { + struct dma_device *device = chan->device; + + dma_cap_set(DMA_PRIVATE, device->cap_mask); + device->privatecnt++; err = dma_chan_get(chan); - if (err) + if (err) { pr_debug("%s: failed to get %s: (%d)\n", __func__, dma_chan_name(chan), err); + chan = NULL; + if (--device->privatecnt == 0) + dma_cap_clear(DMA_PRIVATE, device->cap_mask); + } } else chan = NULL; diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cf1c87fa1edd..bedce038c6e2 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; - int r = nr_channels - i - 1; dwc->chan.device = &dw->dma; dma_cookie_init(&dwc->chan); @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = r; + dwc->priority = nr_channels - i - 1; else dwc->priority = i; @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; void __iomem *addr = chip->regs + r * sizeof(u32); dwc_params = dma_read_byaddr(addr, DWC_PARAMS); diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 18c14e1f1414..48d6d9e94f67 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c) struct idma64_desc *desc = idma64c->desc; struct idma64_hw_desc *hw; size_t bytes = desc->length; - u64 llp; - u32 ctlhi; + u64 llp = channel_readq(idma64c, LLP); + u32 ctlhi = channel_readl(idma64c, CTL_HI); unsigned int i = 0; - llp = channel_readq(idma64c, LLP); do { hw = &desc->hw[i]; - } while ((hw->llp != llp) && (++i < desc->ndesc)); + if (hw->llp == llp) + break; + bytes -= hw->len; + } while (++i < desc->ndesc); if (!i) return bytes; - do { - bytes -= desc->hw[--i].len; - } while (i); + /* The current chunk is not fully transfered yet */ + bytes += desc->hw[--i].len; - ctlhi = channel_readl(idma64c, CTL_HI); return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi); } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 5cb61ce01036..fc4156afa070 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan) return; /* clear the channel mapping in DRCMR */ - reg = pxad_drcmr(chan->drcmr); - writel_relaxed(0, chan->phy->base + reg); + if (chan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(chan->drcmr); + writel_relaxed(0, chan->phy->base + reg); + } spin_lock_irqsave(&pdev->phy_lock, flags); for (i = 0; i < 32; i++) @@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned) "%s(); phy=%p(%d) misaligned=%d\n", __func__, phy, phy->idx, misaligned); - reg = pxad_drcmr(phy->vchan->drcmr); - writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + if (phy->vchan->drcmr <= DRCMR_CHLNUM) { + reg = pxad_drcmr(phy->vchan->drcmr); + writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); + } dalgn = phy_readl_relaxed(phy, DALGN); if (misaligned) @@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, struct dma_async_tx_descriptor *tx; struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); + INIT_LIST_HEAD(&vd->node); tx = vchan_tx_prep(vc, vd, tx_flags); tx->tx_submit = pxad_tx_submit; dev_dbg(&chan->vc.chan.dev->device, @@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan, width = chan->cfg.src_addr_width; dev_addr = chan->cfg.src_addr; *dev_src = dev_addr; - *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; + *dcmd |= PXA_DCMD_INCTRGADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWSRC; } if (dir == DMA_MEM_TO_DEV) { maxburst = chan->cfg.dst_maxburst; width = chan->cfg.dst_addr_width; dev_addr = chan->cfg.dst_addr; *dev_dst = dev_addr; - *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; + *dcmd |= PXA_DCMD_INCSRCADDR; + if (chan->drcmr <= DRCMR_CHLNUM) + *dcmd |= PXA_DCMD_FLOWTRG; } if (dir == DMA_MEM_TO_MEM) *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | @@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan, else curr = phy_readl_relaxed(chan->phy, DTADR); + /* + * curr has to be actually read before checking descriptor + * completion, so that a curr inside a status updater + * descriptor implies the following test returns true, and + * preventing reordering of curr load and the test. + */ + rmb(); + if (is_desc_completed(vd)) + goto out; + for (i = 0; i < sw_desc->nb_desc - 1; i++) { hw_desc = sw_desc->hw_desc[i]; if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index a1a500d96ff2..1661d518224a 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract) static void sun4i_dma_free_contract(struct virt_dma_desc *vd) { struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd); - struct sun4i_dma_promise *promise; + struct sun4i_dma_promise *promise, *tmp; /* Free all the demands and completed demands */ - list_for_each_entry(promise, &contract->demands, list) + list_for_each_entry_safe(promise, tmp, &contract->demands, list) kfree(promise); - list_for_each_entry(promise, &contract->completed_demands, list) + list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list) kfree(promise); kfree(contract); diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index b23e8d52d126..8d57b1b12e41 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -59,7 +59,6 @@ #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF -#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) #define XGENE_DMA_RING_CMD_OFFSET 0x2C @@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) return flyby_type[src_cnt]; } -static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) -{ - u32 __iomem *cmd_base = ring->cmd_base; - u32 ring_state = ioread32(&cmd_base[1]); - - return XGENE_DMA_RING_DESC_CNT(ring_state); -} - static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, dma_addr_t *paddr) { @@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, dma_pool_free(chan->desc_pool, desc, desc->tx.phys); } -static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, - struct xgene_dma_desc_sw *desc_sw) +static void xgene_chan_xfer_request(struct xgene_dma_chan *chan, + struct xgene_dma_desc_sw *desc_sw) { + struct xgene_dma_ring *ring = &chan->tx_ring; struct xgene_dma_desc_hw *desc_hw; - /* Check if can push more descriptor to hw for execution */ - if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) - return -EBUSY; - /* Get hw descriptor from DMA tx ring */ desc_hw = &ring->desc_hw[ring->head]; @@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); } + /* Increment the pending transaction count */ + chan->pending += ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); + /* Notify the hw that we have descriptor ready for execution */ iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? 2 : 1, ring->cmd); - - return 0; } /** @@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) { struct xgene_dma_desc_sw *desc_sw, *_desc_sw; - int ret; /* * If the list of pending descriptors is empty, then we @@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) if (chan->pending >= chan->max_outstanding) return; - ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); - if (ret) - return; + xgene_chan_xfer_request(chan, desc_sw); /* * Delete this element from ld pending queue and append it to * ld running queue */ list_move_tail(&desc_sw->node, &chan->ld_running); - - /* Increment the pending transaction count */ - chan->pending++; } } @@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) * Decrement the pending transaction count * as we have processed one */ - chan->pending--; + chan->pending -= ((desc_sw->flags & + XGENE_DMA_FLAG_64B_DESC) ? 2 : 1); /* * Delete this node from ld running queue and append it to @@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, struct xgene_dma_ring *ring, enum xgene_dma_ring_cfgsize cfgsize) { + int ret; + /* Setup DMA ring descriptor variables */ ring->pdma = chan->pdma; ring->cfgsize = cfgsize; ring->num = chan->pdma->ring_num++; ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); - ring->size = xgene_dma_get_ring_size(chan, cfgsize); - if (ring->size <= 0) - return ring->size; + ret = xgene_dma_get_ring_size(chan, cfgsize); + if (ret <= 0) + return ret; + ring->size = ret; /* Allocate memory for DMA ring descriptor */ ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, @@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); /* Set the max outstanding request possible to this channel */ - chan->max_outstanding = rx_ring->slots; + chan->max_outstanding = tx_ring->slots; return ret; } diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index 39915a6b7986..c017fcd8e07c 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct dma_chan *chan; struct zx_dma_chan *c; - if (request > d->dma_requests) + if (request >= d->dma_requests) return NULL; chan = dma_get_any_slave_channel(&d->slave); diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index e29560e6b40b..950c87f5d279 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c @@ -13,6 +13,7 @@ */ #include +#include #include #include "efistub.h" @@ -305,6 +306,44 @@ fail: */ #define EFI_RT_VIRTUAL_BASE 0x40000000 +static int cmp_mem_desc(const void *l, const void *r) +{ + const efi_memory_desc_t *left = l, *right = r; + + return (left->phys_addr > right->phys_addr) ? 1 : -1; +} + +/* + * Returns whether region @left ends exactly where region @right starts, + * or false if either argument is NULL. + */ +static bool regions_are_adjacent(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + u64 left_end; + + if (left == NULL || right == NULL) + return false; + + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE; + + return left_end == right->phys_addr; +} + +/* + * Returns whether region @left and region @right have compatible memory type + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions. + */ +static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left, + efi_memory_desc_t *right) +{ + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT | + EFI_MEMORY_WC | EFI_MEMORY_UC | + EFI_MEMORY_RUNTIME; + + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0; +} + /* * efi_get_virtmap() - create a virtual mapping for the EFI memory map * @@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size, int *count) { u64 efi_virt_base = EFI_RT_VIRTUAL_BASE; - efi_memory_desc_t *out = runtime_map; + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map; int l; - for (l = 0; l < map_size; l += desc_size) { - efi_memory_desc_t *in = (void *)memory_map + l; + /* + * To work around potential issues with the Properties Table feature + * introduced in UEFI 2.5, which may split PE/COFF executable images + * in memory into several RuntimeServicesCode and RuntimeServicesData + * regions, we need to preserve the relative offsets between adjacent + * EFI_MEMORY_RUNTIME regions with the same memory type attributes. + * The easiest way to find adjacent regions is to sort the memory map + * before traversing it. + */ + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL); + + for (l = 0; l < map_size; l += desc_size, prev = in) { u64 paddr, size; + in = (void *)memory_map + l; if (!(in->attribute & EFI_MEMORY_RUNTIME)) continue; + paddr = in->phys_addr; + size = in->num_pages * EFI_PAGE_SIZE; + /* * Make the mapping compatible with 64k pages: this allows * a 4k page size kernel to kexec a 64k page size kernel and * vice versa. */ - paddr = round_down(in->phys_addr, SZ_64K); - size = round_up(in->num_pages * EFI_PAGE_SIZE + - in->phys_addr - paddr, SZ_64K); - - /* - * Avoid wasting memory on PTEs by choosing a virtual base that - * is compatible with section mappings if this region has the - * appropriate size and physical alignment. (Sections are 2 MB - * on 4k granule kernels) - */ - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) - efi_virt_base = round_up(efi_virt_base, SZ_2M); + if (!regions_are_adjacent(prev, in) || + !regions_have_compatible_memory_type_attrs(prev, in)) { + + paddr = round_down(in->phys_addr, SZ_64K); + size += in->phys_addr - paddr; + + /* + * Avoid wasting memory on PTEs by choosing a virtual + * base that is compatible with section mappings if this + * region has the appropriate size and physical + * alignment. (Sections are 2 MB on 4k granule kernels) + */ + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M) + efi_virt_base = round_up(efi_virt_base, SZ_2M); + else + efi_virt_base = round_up(efi_virt_base, SZ_64K); + } in->virt_addr = efi_virt_base + in->phys_addr - paddr; efi_virt_base += size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6647fb26ef25..0d13e6368b96 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1654,6 +1654,7 @@ struct amdgpu_pm { u8 fan_max_rpm; /* dpm */ bool dpm_enabled; + bool sysfs_initialized; struct amdgpu_dpm dpm; const struct firmware *fw; /* SMC firmware */ uint32_t fw_version; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 77f1d7c6ea3a..9416e0f5c1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -672,8 +672,12 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) /* disp clock */ adev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); - if (adev->clock.default_dispclk == 0) - adev->clock.default_dispclk = 54000; /* 540 Mhz */ + /* set a reasonable default for DP */ + if (adev->clock.default_dispclk < 53900) { + DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 60000; + } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); adev->clock.current_dispclk = adev->clock.default_dispclk; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 1c3fc99c5465..8e995148f56e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -208,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, return ret; } -static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle) -{ - CGS_FUNC_ADEV; - int r; - uint32_t dma_handle; - struct drm_gem_object *obj; - struct amdgpu_bo *bo; - struct drm_device *dev = adev->ddev; - struct drm_file *file_priv = NULL, *priv; - - mutex_lock(&dev->struct_mutex); - list_for_each_entry(priv, &dev->filelist, lhead) { - rcu_read_lock(); - if (priv->pid == get_pid(task_pid(current))) - file_priv = priv; - rcu_read_unlock(); - if (file_priv) - break; - } - mutex_unlock(&dev->struct_mutex); - r = dev->driver->prime_fd_to_handle(dev, - file_priv, dmabuf_fd, - &dma_handle); - spin_lock(&file_priv->table_lock); - - /* Check if we currently have a reference on the object */ - obj = idr_find(&file_priv->object_idr, dma_handle); - if (obj == NULL) { - spin_unlock(&file_priv->table_lock); - return -EINVAL; - } - spin_unlock(&file_priv->table_lock); - bo = gem_to_amdgpu_bo(obj); - *handle = (cgs_handle_t)bo; - return 0; -} - static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) { struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; @@ -810,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { }; static const struct cgs_os_ops amdgpu_cgs_os_ops = { - amdgpu_cgs_import_gpu_mem, amdgpu_cgs_add_irq_source, amdgpu_cgs_irq_get, amdgpu_cgs_irq_put diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 749420f1ea6f..fd16652aa277 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -156,7 +156,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) uint64_t *chunk_array_user; uint64_t *chunk_array; struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - unsigned size, i; + unsigned size; + int i; int ret; if (cs->in.num_chunks == 0) @@ -176,7 +177,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) /* get chunks */ INIT_LIST_HEAD(&p->validated); - chunk_array_user = (uint64_t __user *)(cs->in.chunks); + chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks); if (copy_from_user(chunk_array, chunk_array_user, sizeof(uint64_t)*cs->in.num_chunks)) { ret = -EFAULT; @@ -196,7 +197,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) struct drm_amdgpu_cs_chunk user_chunk; uint32_t __user *cdata; - chunk_ptr = (void __user *)chunk_array[i]; + chunk_ptr = (void __user *)(unsigned long)chunk_array[i]; if (copy_from_user(&user_chunk, chunk_ptr, sizeof(struct drm_amdgpu_cs_chunk))) { ret = -EFAULT; @@ -207,7 +208,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) p->chunks[i].length_dw = user_chunk.length_dw; size = p->chunks[i].length_dw; - cdata = (void __user *)user_chunk.chunk_data; + cdata = (void __user *)(unsigned long)user_chunk.chunk_data; p->chunks[i].user_ptr = cdata; p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index e3d70772b531..6c9e0902a414 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -85,8 +85,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work) /* We borrow the event spin lock for protecting flip_status */ spin_lock_irqsave(&crtc->dev->event_lock, flags); - /* set the proper interrupt */ - amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id); /* do the flip (mmio) */ adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); /* set the flip status */ @@ -186,10 +184,6 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, goto cleanup; } - fence_get(work->excl); - for (i = 0; i < work->shared_count; ++i) - fence_get(work->shared[i]); - amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); amdgpu_bo_unreserve(new_rbo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index adb48353f2e1..b190c2a83680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -242,11 +242,11 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, /* tonga */ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 8a122b1b7786..96290d9cddca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -402,3 +402,19 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) return true; return false; } + +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev) +{ + struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!afbdev) + return; + + fb_helper = &afbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 8c735f544b66..5d11e798230c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -485,7 +485,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * amdgpu_driver_firstopen_kms - drm callback for last close + * amdgpu_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -493,6 +493,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void amdgpu_driver_lastclose_kms(struct drm_device *dev) { + struct amdgpu_device *adev = dev->dev_private; + + amdgpu_fbdev_restore_mode(adev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 64efe5b52e65..7bd470d9ac30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -567,6 +567,7 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev); void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); int amdgpu_fbdev_total_size(struct amdgpu_device *adev); bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); +void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev); void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index efed11509f4a..22a8c7d3a3ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -294,10 +294,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip limit attributes if DPM is not enabled */ + /* Skip attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || + attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; /* Skip fan attributes if fan is not present */ @@ -691,6 +695,9 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) { int ret; + if (adev->pm.sysfs_initialized) + return 0; + if (adev->pm.funcs->get_temperature == NULL) return 0; adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, @@ -719,6 +726,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) return ret; } + adev->pm.sysfs_initialized = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1e14531353e0..53d551f2d839 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -455,8 +455,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, return -ENOMEM; r = amdgpu_ib_get(ring, NULL, ndw * 4, ib); - if (r) + if (r) { + kfree(ib); return r; + } ib->length_dw = 0; /* walk over the address space and update the page directory */ diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index cd6edc40c9cd..1e0bba29e167 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action) amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - amdgpu_atombios_encoder_setup_dig_transmitter(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level); if (ext_encoder) amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 82e8d0730517..a1a35a5df8e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle) if (!amdgpu_dpm) return 0; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + ret = ci_set_temperature_range(adev); if (ret) return ret; @@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 4b6ce74753cd..484710cfdf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1567,6 +1567,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) int ret, i; u16 tmp16; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 44fa96ad4709..2e3373ed4c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (amdgpu_dpm) { + int ret; + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; + /* powerdown unused blocks for now */ cz_dpm_powergate_uvd(adev, true); cz_dpm_powergate_vce(adev, true); @@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle) if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_init_failed; - mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index e4d101b1252a..d4c82b625727 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -255,6 +255,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v10_0_page_flip - pageflip callback. * @@ -2663,9 +2681,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v10_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v10_0_crtc_load_lut(crtc); break; @@ -3025,6 +3044,8 @@ static int dce_v10_0_hw_init(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3039,6 +3060,8 @@ static int dce_v10_0_hw_fini(void *handle) dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3050,6 +3073,8 @@ static int dce_v10_0_suspend(void *handle) dce_v10_0_hpd_fini(adev); + dce_v10_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3075,6 +3100,8 @@ static int dce_v10_0_resume(void *handle) /* initialize hpd */ dce_v10_0_hpd_init(adev); + dce_v10_0_pageflip_interrupt_init(adev); + return 0; } @@ -3369,7 +3396,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 6411e8244671..7e1cf5e4eebf 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v11_0_page_flip - pageflip callback. * @@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v11_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v11_0_crtc_load_lut(crtc); break; @@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle) switch (adev->asic_type) { case CHIP_CARRIZO: - adev->mode_info.num_crtc = 4; + adev->mode_info.num_crtc = 3; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 9; break; @@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle) dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle) dce_v11_0_hpd_fini(adev); + dce_v11_0_pageflip_interrupt_fini(adev); + return 0; } @@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle) /* initialize hpd */ dce_v11_0_hpd_init(adev); + dce_v11_0_pageflip_interrupt_init(adev); + return 0; } @@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index c86911c2ea2a..34b9c2a9d8d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); } +static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev) +{ + unsigned i; + + /* Enable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_get(adev, &adev->pageflip_irq, i); +} + +static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev) +{ + unsigned i; + + /* Disable pflip interrupts */ + for (i = 0; i < adev->mode_info.num_crtc; i++) + amdgpu_irq_put(adev, &adev->pageflip_irq, i); +} + /** * dce_v8_0_page_flip - pageflip callback. * @@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v8_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v8_0_vga_enable(crtc, false); - /* Make sure VBLANK interrupt is still enabled */ + /* Make sure VBLANK and PFLIP interrupts are still enabled */ type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); amdgpu_irq_update(adev, &adev->crtc_irq, type); + amdgpu_irq_update(adev, &adev->pageflip_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v8_0_crtc_load_lut(crtc); break; @@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle) dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); } + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle) dce_v8_0_hpd_fini(adev); + dce_v8_0_pageflip_interrupt_fini(adev); + return 0; } @@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle) /* initialize hpd */ dce_v8_0_hpd_init(adev); + dce_v8_0_pageflip_interrupt_init(adev); + return 0; } @@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev, spin_unlock_irqrestore(&adev->ddev->event_lock, flags); drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 774528ab8704..fab5471d25d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9a07742620d0..7bc9e9fcf3d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); + /* reset addr and status */ + WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); + + if (!addr && !status) + return 0; + dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", entry->src_id, entry->src_data); dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", @@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", status); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); - /* reset addr and status */ - WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 94ec04a9c4d5..7e9154c7f1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2995,6 +2995,15 @@ static int kv_dpm_late_init(void *handle) { /* powerdown unused blocks for now */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret; + + if (!amdgpu_dpm) + return 0; + + /* init the sysfs and debugfs files late */ + ret = amdgpu_pm_sysfs_init(adev); + if (ret) + return ret; kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_samu(adev, true); @@ -3038,9 +3047,6 @@ static int kv_dpm_sw_init(void *handle) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - ret = amdgpu_pm_sysfs_init(adev); - if (ret) - goto dpm_failed; mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index b55ceb14fdcd..0bac8702e934 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1005,6 +1005,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) u32 mask; int ret; + if (pci_is_root_bus(adev->pdev->bus)) + return; + if (amdgpu_pcie_gen2 == 0) return; diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h index 488642f08267..3b47ae313e36 100644 --- a/drivers/gpu/drm/amd/include/cgs_linux.h +++ b/drivers/gpu/drm/amd/include/cgs_linux.h @@ -26,19 +26,6 @@ #include "cgs_common.h" -/** - * cgs_import_gpu_mem() - Import dmabuf handle - * @cgs_device: opaque device handle - * @dmabuf_fd: DMABuf file descriptor - * @handle: memory handle (output) - * - * Must be called in the process context that dmabuf_fd belongs to. - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd, - cgs_handle_t *handle); - /** * cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources * @private_data: private data provided to cgs_add_irq_source @@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type); typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type); struct cgs_os_ops { - cgs_import_gpu_mem_t import_gpu_mem; - /* IRQ handling */ cgs_add_irq_source_t add_irq_source; cgs_irq_get_t irq_get; cgs_irq_put_t irq_put; }; -#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \ - CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle) #define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \ CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \ private_data) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5fd3836..809959d56d78 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) struct drm_dp_mst_port *port, *tmp; bool wake_tx = false; - cancel_work_sync(&mstb->mgr->work); - /* * destroy all ports - don't need lock * as there are no more references to the mst branch @@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref) { struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; + if (!port->input) { port->vcpi.num_slots = 0; kfree(port->cached_edid); - /* we can't destroy the connector here, as - we might be holding the mode_config.mutex - from an EDID retrieval */ + /* + * The only time we don't have a connector + * on an output port is if the connector init + * fails. + */ if (port->connector) { + /* we can't destroy the connector here, as + * we might be holding the mode_config.mutex + * from an EDID retrieval */ + mutex_lock(&mgr->destroy_connector_lock); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); return; } + /* no need to clean up vcpi + * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); - - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kfree(port); - - (*mgr->cbs->hotplug)(mgr); } static void drm_dp_put_port(struct drm_dp_mst_port *port) @@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, } } -static void build_mst_prop_path(struct drm_dp_mst_port *port, - struct drm_dp_mst_branch *mstb, +static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, + int pnum, char *proppath, size_t proppath_size) { @@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port, snprintf(temp, sizeof(temp), "-%d", port_num); strlcat(proppath, temp, proppath_size); } - snprintf(temp, sizeof(temp), "-%d", port->port_num); + snprintf(temp, sizeof(temp), "-%d", pnum); strlcat(proppath, temp, proppath_size); } @@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_port_teardown_pdt(port, old_pdt); ret = drm_dp_port_setup_pdt(port); - if (ret == true) { + if (ret == true) drm_dp_send_link_address(mstb->mgr, port->mstb); - port->mstb->link_address_sent = true; - } } if (created && !port->input) { char proppath[255]; - build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); - if (port->port_num >= 8) { + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); + if (!port->connector) { + /* remove it from the port list */ + mutex_lock(&mstb->mgr->lock); + list_del(&port->next); + mutex_unlock(&mstb->mgr->lock); + /* drop port list reference */ + drm_dp_put_port(port); + goto out; + } + if (port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); + drm_mode_connector_set_tile_property(port->connector); } + (*mstb->mgr->cbs->register_connector)(port->connector); } +out: /* put reference to this port */ drm_dp_put_port(port); } @@ -1182,17 +1194,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_ list_for_each_entry(port, &mstb->ports, next) { if (port->port_num == port_num) { - if (!port->mstb) { + mstb = port->mstb; + if (!mstb) { DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); - return NULL; + goto out; } - mstb = port->mstb; break; } } } kref_get(&mstb->kref); +out: mutex_unlock(&mgr->lock); return mstb; } @@ -1202,10 +1215,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m { struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) { + if (!mstb->link_address_sent) drm_dp_send_link_address(mgr, mstb); - mstb->link_address_sent = true; - } + list_for_each_entry(port, &mstb->ports, next) { if (port->input) continue; @@ -1458,8 +1470,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, mutex_unlock(&mgr->qlock); } -static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb) +static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) { int len; struct drm_dp_sideband_msg_tx *txmsg; @@ -1467,11 +1479,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return -ENOMEM; + return; txmsg->dst = mstb; len = build_link_address(txmsg); + mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); @@ -1499,11 +1512,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } - } else + } else { + mstb->link_address_sent = false; DRM_DEBUG_KMS("link address failed %d\n", ret); + } kfree(txmsg); - return 0; } static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, @@ -1978,6 +1992,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->work); + flush_work(&mgr->destroy_connector_work); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); @@ -2263,10 +2279,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else + else { edid = drm_get_edid(connector, &port->aux.ddc); - - drm_mode_connector_set_tile_property(connector); + drm_mode_connector_set_tile_property(connector); + } drm_dp_put_port(port); return edid; } @@ -2671,7 +2687,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); struct drm_dp_mst_port *port; - + bool send_hotplug = false; /* * Not a regular list traverse as we have to drop the destroy * connector lock before destroying the connector, to avoid AB->BA @@ -2694,7 +2710,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) if (!port->input && port->vcpi.vcpi > 0) drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); kfree(port); + send_hotplug = true; } + if (send_hotplug) + (*mgr->cbs->hotplug)(mgr); } /** @@ -2747,6 +2766,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); */ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { + flush_work(&mgr->work); flush_work(&mgr->destroy_connector_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); @@ -2782,12 +2802,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs if (msgs[num - 1].flags & I2C_M_RD) reading = true; - if (!reading) { + if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) { DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); ret = -EIO; goto out; } + memset(&msg, 0, sizeof(msg)); msg.req_type = DP_REMOTE_I2C_READ; msg.u.i2c_read.num_transactions = num - 1; msg.u.i2c_read.port_number = port->port_num; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 418d299f3b12..ca08c472311b 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper) struct drm_crtc *crtc = mode_set->crtc; int ret; - if (crtc->funcs->cursor_set) { + if (crtc->funcs->cursor_set2) { + ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); + if (ret) + error = true; + } else if (crtc->funcs->cursor_set) { ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0); if (ret) error = true; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d734780b31c0..a18164f2f6d2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) } #define DRM_OUTPUT_POLL_PERIOD (10*HZ) -static void __drm_kms_helper_poll_enable(struct drm_device *dev) +/** + * drm_kms_helper_poll_enable_locked - re-enable output polling. + * @dev: drm_device + * + * This function re-enables the output polling work without + * locking the mode_config mutex. + * + * This is like drm_kms_helper_poll_enable() however it is to be + * called from a context where the mode_config mutex is locked + * already. + */ +void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; @@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev) if (poll) schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); } +EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked); + static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector, uint32_t maxX, uint32_t maxY, bool merge_type_bits) @@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect /* Re-enable polling in case the global poll config changed. */ if (drm_kms_helper_poll != dev->mode_config.poll_running) - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); dev->mode_config.poll_running = drm_kms_helper_poll; @@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable); void drm_kms_helper_poll_enable(struct drm_device *dev) { mutex_lock(&dev->mode_config.mutex); - __drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_kms_helper_poll_enable); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 0f6cd33b531f..684bd4a13843 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device, char *buf) { struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - uint64_t dpms_status; - int ret; + int dpms; - ret = drm_object_property_get_value(&connector->base, - dev->mode_config.dpms_property, - &dpms_status); - if (ret) - return 0; + dpms = READ_ONCE(connector->dpms); return snprintf(buf, PAGE_SIZE, "%s\n", - drm_get_dpms_name((int)dpms_status)); + drm_get_dpms_name(dpms)); } static ssize_t enabled_show(struct device *device, diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index cbdb78ef3bac..e6cbaca821a4 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -37,7 +37,6 @@ * DECON stands for Display and Enhancement controller. */ -#define DECON_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 #define WINDOWS_NR 2 @@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool decon_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE; - - return true; -} - static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; @@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc) static const struct exynos_drm_crtc_ops decon_crtc_ops = { .enable = decon_enable, .disable = decon_disable, - .mode_fixup = decon_mode_fixup, .commit = decon_commit, .enable_vblank = decon_enable_vblank, .disable_vblank = decon_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index d66ade0efac8..124fb9a56f02 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_disable(&dp->encoder); - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_enable(&dp->encoder); - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume) -}; - static const struct of_device_id exynos_dp_match[] = { { .compatible = "samsung,exynos5-dp" }, {}, @@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = { .driver = { .name = "exynos-dp", .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, .of_match_table = exynos_dp_match, }, }; diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index c68a6a2a9b57..7f55ba6771c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { @@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_device_subdrv_probe(struct drm_device *dev) { @@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe); int exynos_drm_device_subdrv_remove(struct drm_device *dev) { @@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev) return 0; } -EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { @@ -111,7 +107,6 @@ err: } return ret; } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { @@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) subdrv->close(dev, subdrv->dev, file); } } -EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 0872aa2f450f..ed28823d3b35 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc) exynos_crtc->ops->disable(exynos_crtc); } -static bool -exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); - - if (exynos_crtc->ops->mode_fixup) - return exynos_crtc->ops->mode_fixup(exynos_crtc, mode, - adjusted_mode); - - return true; -} - static void exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) { @@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { .enable = exynos_drm_crtc_enable, .disable = exynos_drm_crtc_disable, - .mode_fixup = exynos_drm_crtc_mode_fixup, .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, .atomic_begin = exynos_crtc_atomic_begin, .atomic_flush = exynos_crtc_atomic_flush, diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 831d2e4cacf9..ae9e6b2d3758 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } +#ifdef CONFIG_PM_SLEEP static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) { struct drm_connector *connector; @@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev) return 0; } +#endif static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index b7ba21dfb696..6c717ba672db 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -82,7 +82,6 @@ struct exynos_drm_plane { * * @enable: enable the device * @disable: disable the device - * @mode_fixup: fix mode data before applying it * @commit: set current hw specific display mode to hw. * @enable_vblank: specific driver callback for enabling vblank interrupt. * @disable_vblank: specific driver callback for disabling vblank interrupt. @@ -103,9 +102,6 @@ struct exynos_drm_crtc; struct exynos_drm_crtc_ops { void (*enable)(struct exynos_drm_crtc *crtc); void (*disable)(struct exynos_drm_crtc *crtc); - bool (*mode_fixup)(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); void (*commit)(struct exynos_drm_crtc *crtc); int (*enable_vblank)(struct exynos_drm_crtc *crtc); void (*disable_vblank)(struct exynos_drm_crtc *crtc); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 2a652359af64..dd3a5e6d58c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = { .set_addr = fimc_dst_set_addr, }; -static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = true; - } - - return 0; -} - static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; @@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) +{ + DRM_DEBUG_KMS("enable[%d]\n", enable); + + if (enable) { + clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); + clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = false; + } else { + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); + clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); + ctx->suspended = true; + } + + return 0; +} + #ifdef CONFIG_PM_SLEEP static int fimc_suspend(struct device *dev) { @@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 750a9e6b9e8d..3d1aba67758b 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -41,7 +41,6 @@ * CPU Interface. */ -#define FIMD_DEFAULT_FRAMERATE 60 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 /* position control register for hardware window 0, 2 ~ 4.*/ @@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx, return (clkdiv < 0x100) ? clkdiv : 0xff; } -static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - if (adjusted_mode->vrefresh == 0) - adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE; - - return true; -} - static void fimd_commit(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable) return; val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; - writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); + writel(val, ctx->regs + DP_MIE_CLKCON); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { .enable = fimd_enable, .disable = fimd_disable, - .mode_fixup = fimd_mode_fixup, .commit = fimd_commit, .enable_vblank = fimd_enable_vblank, .disable_vblank = fimd_disable_vblank, diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3734c34aed16..c17efdb238a6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data, return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl); int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1230,7 +1229,6 @@ err: g2d_put_cmdlist(g2d, node); return ret; } -EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl); int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, struct drm_file *file) @@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, out: return 0; } -EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index f12fbc36b120..407afedb6003 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj) nr_pages = obj->size >> PAGE_SHIFT; if (!is_drm_iommu_supported(dev)) { - dma_addr_t start_addr; - unsigned int i = 0; - obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *)); if (!obj->pages) { DRM_ERROR("failed to allocate pages.\n"); return -ENOMEM; } + } - obj->cookie = dma_alloc_attrs(dev->dev, - obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->cookie) { - DRM_ERROR("failed to allocate buffer.\n"); + obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr, + GFP_KERNEL, &obj->dma_attrs); + if (!obj->cookie) { + DRM_ERROR("failed to allocate buffer.\n"); + if (obj->pages) drm_free_large(obj->pages); - return -ENOMEM; - } + return -ENOMEM; + } + + if (obj->pages) { + dma_addr_t start_addr; + unsigned int i = 0; start_addr = obj->dma_addr; while (i < nr_pages) { - obj->pages[i] = phys_to_page(start_addr); + obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev, + start_addr)); start_addr += PAGE_SIZE; i++; } } else { - obj->pages = dma_alloc_attrs(dev->dev, obj->size, - &obj->dma_addr, GFP_KERNEL, - &obj->dma_attrs); - if (!obj->pages) { - DRM_ERROR("failed to allocate buffer.\n"); - return -ENOMEM; - } + obj->pages = obj->cookie; } DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", @@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj) DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n", (unsigned long)obj->dma_addr, obj->size); - if (!is_drm_iommu_supported(dev)) { - dma_free_attrs(dev->dev, obj->size, obj->cookie, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - drm_free_large(obj->pages); - } else - dma_free_attrs(dev->dev, obj->size, obj->pages, - (dma_addr_t)obj->dma_addr, &obj->dma_attrs); + dma_free_attrs(dev->dev, obj->size, obj->cookie, + (dma_addr_t)obj->dma_addr, &obj->dma_attrs); - obj->dma_addr = (dma_addr_t)NULL; + if (!is_drm_iommu_supported(dev)) + drm_free_large(obj->pages); } static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, @@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj) * once dmabuf's refcount becomes 0. */ if (obj->import_attach) - goto out; - - exynos_drm_free_buf(exynos_gem_obj); - -out: - drm_gem_free_mmap_offset(obj); + drm_prime_gem_destroy(obj, exynos_gem_obj->sgt); + else + exynos_drm_free_buf(exynos_gem_obj); /* release file pointer to gem object. */ drm_gem_object_release(obj); kfree(exynos_gem_obj); - exynos_gem_obj = NULL; } unsigned long exynos_drm_gem_get_size(struct drm_device *dev, @@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev, return exynos_gem_obj->size; } - -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, +static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, unsigned long size) { struct exynos_drm_gem_obj *exynos_gem_obj; @@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } + ret = drm_gem_create_mmap_offset(obj); + if (ret < 0) { + drm_gem_object_release(obj); + kfree(exynos_gem_obj); + return ERR_PTR(ret); + } + DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); return exynos_gem_obj; @@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); } -int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, +static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, struct vm_area_struct *vma) { struct drm_device *drm_dev = exynos_gem_obj->base.dev; @@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj, int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ struct exynos_drm_gem_obj *exynos_gem_obj; +{ + struct exynos_drm_gem_obj *exynos_gem_obj; struct drm_exynos_gem_info *args = data; struct drm_gem_object *obj; @@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args) { struct exynos_drm_gem_obj *exynos_gem_obj; + unsigned int flags; int ret; /* @@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - if (is_drm_iommu_supported(dev)) { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC, - args->size); - } else { - exynos_gem_obj = exynos_drm_gem_create(dev, - EXYNOS_BO_CONTIG | EXYNOS_BO_WC, - args->size); - } + if (is_drm_iommu_supported(dev)) + flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC; + else + flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC; + exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size); if (IS_ERR(exynos_gem_obj)) { dev_warn(dev->dev, "FB allocation failed.\n"); return PTR_ERR(exynos_gem_obj); @@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv, goto unlock; } - ret = drm_gem_create_mmap_offset(obj); - if (ret) - goto out; - *offset = drm_vma_node_offset_addr(&obj->vma_node); DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); -out: drm_gem_object_unreference(obj); unlock: mutex_unlock(&dev->struct_mutex); @@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) err_close_vm: drm_gem_vm_close(vma); - drm_gem_free_mmap_offset(obj); return ret; } @@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev, if (ret < 0) goto err_free_large; + exynos_gem_obj->sgt = sgt; + if (sgt->nents == 1) { /* always physically continuous memory if sgt->nents is 1. */ exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index cd62f8410d1e..b62d1007c0e0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -39,6 +39,7 @@ * - this address could be physical address without IOMMU and * device address with IOMMU. * @pages: Array of backing pages. + * @sgt: Imported sg_table. * * P.S. this object would be transferred to user as kms_bo.handle so * user can access the buffer through kms_bo.handle. @@ -52,6 +53,7 @@ struct exynos_drm_gem_obj { dma_addr_t dma_addr; struct dma_attrs dma_attrs; struct page **pages; + struct sg_table *sgt; }; struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); @@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); /* destroy a buffer with gem object */ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj); -/* create a private gem object and initialize it. */ -struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, - unsigned long size); - /* create a new buffer with gem object */ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev, unsigned int flags, diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 425e70625388..2f5c118f4c8e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM static int rotator_clk_crtl(struct rot_context *rot, bool enable) { if (enable) { @@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev) } #endif -#ifdef CONFIG_PM static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index f6ecbda2c604..674341708033 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -143,7 +143,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, } /** - * i915_gem_shrink - Shrink buffer object caches completely + * i915_gem_shrink_all - Shrink buffer object caches completely * @dev_priv: i915 device * * This is a simple wraper around i915_gem_shrink() to aggressively shrink all diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 8fd431bcdfd3..a96b9006a51e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -804,7 +804,10 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { * Also note, that the object created here is not currently a "first class" * object, in that several ioctls are banned. These are the CPU access * ioctls: mmap(), pwrite and pread. In practice, you are expected to use - * direct access via your pointer rather than use those ioctls. + * direct access via your pointer rather than use those ioctls. Another + * restriction is that we do not allow userptr surfaces to be pinned to the + * hardware and so we reject any attempt to create a framebuffer out of a + * userptr. * * If you think this is a good interface to use to pass GPU memory between * drivers, please use dma-buf instead. In fact, wherever possible use diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index cf418be7d30a..b2270d576979 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1724,6 +1724,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); } + /* + * Apparently we need to have VGA mode enabled prior to changing + * the P1/P2 dividers. Otherwise the DPLL will keep using the old + * dividers, even though the register value does change. + */ + I915_WRITE(reg, 0); + + I915_WRITE(reg, dpll); + /* Wait for the clocks to stabilize. */ POSTING_READ(reg); udelay(150); @@ -14107,6 +14116,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct drm_i915_gem_object *obj = intel_fb->obj; + if (obj->userptr.mm) { + DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); + return -EINVAL; + } + return drm_gem_handle_create(file, &obj->base, handle); } @@ -14897,9 +14911,19 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) /* restore vblank interrupts to correct state */ drm_crtc_vblank_reset(&crtc->base); if (crtc->active) { + struct intel_plane *plane; + drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); update_scanline_offset(crtc); drm_crtc_vblank_on(&crtc->base); + + /* Disable everything but the primary plane */ + for_each_intel_plane_on_crtc(dev, crtc, plane) { + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + continue; + + plane->disable_plane(&plane->base, &crtc->base); + } } /* We need to sanitize the plane -> pipe mapping first because this will @@ -15067,38 +15091,25 @@ void i915_redisable_vga(struct drm_device *dev) i915_redisable_vga_power_on(dev); } -static bool primary_get_hw_state(struct intel_crtc *crtc) +static bool primary_get_hw_state(struct intel_plane *plane) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); + return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; } -static void readout_plane_state(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) +/* FIXME read out full plane state for all planes */ +static void readout_plane_state(struct intel_crtc *crtc) { - struct intel_plane *p; - struct intel_plane_state *plane_state; - bool active = crtc_state->base.active; - - for_each_intel_plane(crtc->base.dev, p) { - if (crtc->pipe != p->pipe) - continue; - - plane_state = to_intel_plane_state(p->base.state); + struct drm_plane *primary = crtc->base.primary; + struct intel_plane_state *plane_state = + to_intel_plane_state(primary->state); - if (p->base.type == DRM_PLANE_TYPE_PRIMARY) { - plane_state->visible = primary_get_hw_state(crtc); - if (plane_state->visible) - crtc->base.state->plane_mask |= - 1 << drm_plane_index(&p->base); - } else { - if (active) - p->disable_plane(&p->base, &crtc->base); + plane_state->visible = + primary_get_hw_state(to_intel_plane(primary)); - plane_state->visible = false; - } - } + if (plane_state->visible) + crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); } static void intel_modeset_readout_hw_state(struct drm_device *dev) @@ -15121,34 +15132,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->base.state->active = crtc->active; crtc->base.enabled = crtc->active; - memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); - if (crtc->base.state->active) { - intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); - intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); - WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); - - /* - * The initial mode needs to be set in order to keep - * the atomic core happy. It wants a valid mode if the - * crtc's enabled, so we do the above call. - * - * At this point some state updated by the connectors - * in their ->detect() callback has not run yet, so - * no recalculation can be done yet. - * - * Even if we could do a recalculation and modeset - * right now it would cause a double modeset if - * fbdev or userspace chooses a different initial mode. - * - * If that happens, someone indicated they wanted a - * mode change, which means it's safe to do a full - * recalculation. - */ - crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; - } - - crtc->base.hwmode = crtc->config->base.adjusted_mode; - readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state)); + readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", crtc->base.base.id, @@ -15207,6 +15191,36 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) connector->base.name, connector->base.encoder ? "enabled" : "disabled"); } + + for_each_intel_crtc(dev, crtc) { + crtc->base.hwmode = crtc->config->base.adjusted_mode; + + memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); + if (crtc->base.state->active) { + intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); + intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); + WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); + + /* + * The initial mode needs to be set in order to keep + * the atomic core happy. It wants a valid mode if the + * crtc's enabled, so we do the above call. + * + * At this point some state updated by the connectors + * in their ->detect() callback has not run yet, so + * no recalculation can be done yet. + * + * Even if we could do a recalculation and modeset + * right now it would cause a double modeset if + * fbdev or userspace chooses a different initial mode. + * + * If that happens, someone indicated they wanted a + * mode change, which means it's safe to do a full + * recalculation. + */ + crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; + } + } } /* Scan out the current hw modeset state, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 3e4be5a3becd..6ade06888432 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void intel_dp_register_mst_connector(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_device *dev = connector->dev; drm_modeset_lock_all(dev); intel_connector_add_to_fbdev(intel_connector); drm_modeset_unlock_all(dev); drm_connector_register(&intel_connector->base); - return connector; } static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = intel_dp_add_mst_connector, + .register_connector = intel_dp_register_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector, .hotplug = intel_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 53c0173a39fe..b17785719598 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_enable(dev); + drm_kms_helper_poll_enable_locked(dev); mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 72e0edd7bbde..29dd4488dc49 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); read_pointer = ring->next_context_status_buffer; - write_pointer = status_pointer & 0x07; + write_pointer = status_pointer & GEN8_CSB_PTR_MASK; if (read_pointer > write_pointer) - write_pointer += 6; + write_pointer += GEN8_CSB_ENTRIES; spin_lock(&ring->execlist_lock); while (read_pointer < write_pointer) { read_pointer++; status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8); + (read_pointer % GEN8_CSB_ENTRIES) * 8); status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + - (read_pointer % 6) * 8 + 4); + (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4); if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) continue; @@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring) spin_unlock(&ring->execlist_lock); WARN(submit_contexts > 2, "More than two context complete events?\n"); - ring->next_context_status_buffer = write_pointer % 6; + ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8)); + _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8, + ((u32)ring->next_context_status_buffer & + GEN8_CSB_PTR_MASK) << 8)); } static int execlists_context_queue(struct drm_i915_gem_request *request) @@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; + u8 next_context_status_buffer_hw; I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); @@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); POSTING_READ(RING_MODE_GEN7(ring)); - ring->next_context_status_buffer = 0; + + /* + * Instead of resetting the Context Status Buffer (CSB) read pointer to + * zero, we need to read the write pointer from hardware and use its + * value because "this register is power context save restored". + * Effectively, these states have been observed: + * + * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) | + * BDW | CSB regs not reset | CSB regs reset | + * CHT | CSB regs not reset | CSB regs not reset | + */ + next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring)) + & GEN8_CSB_PTR_MASK); + + /* + * When the CSB registers are reset (also after power-up / gpu reset), + * CSB write pointer is set to all 1's, which is not valid, use '5' in + * this special case, so the first element read is CSB[0]. + */ + if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) + next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); + + ring->next_context_status_buffer = next_context_status_buffer_hw; DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); @@ -1634,6 +1659,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 64f89f9982a2..3c63bb32ad81 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,8 @@ #define _INTEL_LRC_H_ #define GEN8_LR_CONTEXT_ALIGN 4096 +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x07 /* Execlists regs */ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e6b8db996ef..61b451fbd09e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { flags |= PIPE_CONTROL_TLB_INVALIDATE; @@ -418,6 +419,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { flags |= PIPE_CONTROL_TLB_INVALIDATE; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index af7fdb3bd663..7401cf90b0db 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, } if (power_well->data == SKL_DISP_PW_1) { - intel_prepare_ddi(dev); + if (!dev_priv->power_domains.initializing) + intel_prepare_ddi(dev); gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); } } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cc6c228e11c8..e905c00acf1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -469,9 +469,13 @@ nouveau_display_create(struct drm_device *dev) if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; - } else { + } else + if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; + } else { + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; } dev->mode_config.preferred_depth = 24; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2791701685dc..59f27e774acb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info) return 0; } +static int +nouveau_fbcon_open(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + int ret = pm_runtime_get_sync(drm->dev->dev); + if (ret < 0 && ret != -EACCES) + return ret; + return 0; +} + +static int +nouveau_fbcon_release(struct fb_info *info, int user) +{ + struct nouveau_fbdev *fbcon = info->par; + struct nouveau_drm *drm = nouveau_drm(fbcon->dev); + pm_runtime_put(drm->dev->dev); + return 0; +} + static struct fb_ops nouveau_fbcon_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = nouveau_fbcon_fillrect, @@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = { static struct fb_ops nouveau_fbcon_sw_ops = { .owner = THIS_MODULE, + .fb_open = nouveau_fbcon_open, + .fb_release = nouveau_fbcon_release, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 2c9981512d27..41be584147b9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -227,11 +227,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct nvkm_vma *vma; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) + if (is_power_of_2(nvbo->valid_domains)) + rep->domain = nvbo->valid_domains; + else if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; - rep->offset = nvbo->bo.offset; if (cli->vm) { vma = nouveau_bo_vma_find(nvbo, cli->vm); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index 65af31441e9c..a7d69ce7abc1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -267,6 +267,12 @@ init_i2c(struct nvbios_init *init, int index) index = NVKM_I2C_BUS_PRI; if (init->outp && init->outp->i2c_upper_default) index = NVKM_I2C_BUS_SEC; + } else + if (index == 0x80) { + index = NVKM_I2C_BUS_PRI; + } else + if (index == 0x81) { + index = NVKM_I2C_BUS_SEC; } bus = nvkm_i2c_bus_find(i2c, index); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h index e0ec2a6b7b79..212800ecdce9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h @@ -8,7 +8,10 @@ struct nvbios_source { void *(*init)(struct nvkm_bios *, const char *); void (*fini)(void *); u32 (*read)(void *, u32 offset, u32 length, struct nvkm_bios *); + u32 (*size)(void *); bool rw; + bool ignore_checksum; + bool no_pcir; }; int nvbios_extend(struct nvkm_bios *, u32 length); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c index 792f017525f6..b2557e87afdd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c @@ -45,7 +45,7 @@ shadow_fetch(struct nvkm_bios *bios, struct shadow *mthd, u32 upto) u32 read = mthd->func->read(data, start, limit - start, bios); bios->size = start + read; } - return bios->size >= limit; + return bios->size >= upto; } static int @@ -55,14 +55,22 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) struct nvbios_image image; int score = 1; - if (!shadow_fetch(bios, mthd, offset + 0x1000)) { - nvkm_debug(subdev, "%08x: header fetch failed\n", offset); - return 0; - } + if (mthd->func->no_pcir) { + image.base = 0; + image.type = 0; + image.size = mthd->func->size(mthd->data); + image.last = 1; + } else { + if (!shadow_fetch(bios, mthd, offset + 0x1000)) { + nvkm_debug(subdev, "%08x: header fetch failed\n", + offset); + return 0; + } - if (!nvbios_image(bios, idx, &image)) { - nvkm_debug(subdev, "image %d invalid\n", idx); - return 0; + if (!nvbios_image(bios, idx, &image)) { + nvkm_debug(subdev, "image %d invalid\n", idx); + return 0; + } } nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", image.base, image.type, image.size); @@ -74,7 +82,8 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd) switch (image.type) { case 0x00: - if (nvbios_checksum(&bios->data[image.base], image.size)) { + if (!mthd->func->ignore_checksum && + nvbios_checksum(&bios->data[image.base], image.size)) { nvkm_debug(subdev, "%08x: checksum failed\n", image.base); if (mthd->func->rw) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c index bd60d7dd09f5..4bf486b57101 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c @@ -21,6 +21,7 @@ * */ #include "priv.h" + #include #if defined(__powerpc__) @@ -33,17 +34,26 @@ static u32 of_read(void *data, u32 offset, u32 length, struct nvkm_bios *bios) { struct priv *priv = data; - if (offset + length <= priv->size) { + if (offset < priv->size) { + length = min_t(u32, length, priv->size - offset); memcpy_fromio(bios->data + offset, priv->data + offset, length); return length; } return 0; } +static u32 +of_size(void *data) +{ + struct priv *priv = data; + return priv->size; +} + static void * of_init(struct nvkm_bios *bios, const char *name) { - struct pci_dev *pdev = bios->subdev.device->func->pci(bios->subdev.device)->pdev; + struct nvkm_device *device = bios->subdev.device; + struct pci_dev *pdev = device->func->pci(device)->pdev; struct device_node *dn; struct priv *priv; if (!(dn = pci_device_to_OF_node(pdev))) @@ -62,7 +72,10 @@ nvbios_of = { .init = of_init, .fini = (void(*)(void *))kfree, .read = of_read, + .size = of_size, .rw = false, + .ignore_checksum = true, + .no_pcir = true, }; #else const struct nvbios_source diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c index 814cb51cc873..385a90f91ed6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.c @@ -35,6 +35,8 @@ static const struct nvkm_device_agp_quirk nvkm_device_agp_quirks[] = { /* VIA Apollo PRO133x / GeForce FX 5600 Ultra - fdo#20341 */ { PCI_VENDOR_ID_VIA, 0x0691, PCI_VENDOR_ID_NVIDIA, 0x0311, 2 }, + /* SiS 761 does not support AGP cards, use PCI mode */ + { PCI_VENDOR_ID_SI, 0x0761, PCI_ANY_ID, PCI_ANY_ID, 0 }, {}, }; @@ -137,8 +139,10 @@ nvkm_agp_ctor(struct nvkm_pci *pci) while (quirk->hostbridge_vendor) { if (info.device->vendor == quirk->hostbridge_vendor && info.device->device == quirk->hostbridge_device && - pci->pdev->vendor == quirk->chip_vendor && - pci->pdev->device == quirk->chip_device) { + (quirk->chip_vendor == (u16)PCI_ANY_ID || + pci->pdev->vendor == quirk->chip_vendor) && + (quirk->chip_device == (u16)PCI_ANY_ID || + pci->pdev->device == quirk->chip_device)) { nvkm_info(subdev, "forcing default agp mode to %dX, " "use NvAGP= to override\n", quirk->mode); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index dd845f82cc24..183aea1abebc 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -242,6 +242,10 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, bo->is_primary = true; ret = qxl_bo_reserve(bo, false); + if (ret) + return ret; + ret = qxl_bo_pin(bo, bo->type, NULL); + qxl_bo_unreserve(bo); if (ret) return ret; @@ -257,7 +261,11 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc, } drm_vblank_put(dev, qcrtc->index); - qxl_bo_unreserve(bo); + ret = qxl_bo_reserve(bo, false); + if (!ret) { + qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + } return 0; } @@ -618,7 +626,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, adjusted_mode->hdisplay, adjusted_mode->vdisplay); - if (qcrtc->index == 0) + if (bo->is_primary == false) recreate_primary = true; if (bo->surf.stride * bo->surf.height > qdev->vram_size) { diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 41c422fee31a..c4a552637c93 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev, spin_lock_irqsave(&qfbdev->dirty.lock, flags); - if (qfbdev->dirty.y1 < y) - y = qfbdev->dirty.y1; - if (qfbdev->dirty.y2 > y2) - y2 = qfbdev->dirty.y2; - if (qfbdev->dirty.x1 < x) - x = qfbdev->dirty.x1; - if (qfbdev->dirty.x2 > x2) - x2 = qfbdev->dirty.x2; + if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) && + (qfbdev->dirty.x2 - qfbdev->dirty.x1)) { + if (qfbdev->dirty.y1 < y) + y = qfbdev->dirty.y1; + if (qfbdev->dirty.y2 > y2) + y2 = qfbdev->dirty.y2; + if (qfbdev->dirty.x1 < x) + x = qfbdev->dirty.x1; + if (qfbdev->dirty.x2 > x2) + x2 = qfbdev->dirty.x2; + } qfbdev->dirty.x1 = x; qfbdev->dirty.x2 = x2; diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b66ec331c17c..4efa8e261baf 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -307,7 +307,7 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); if (idr_ret < 0) return idr_ret; - bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); + bo = to_qxl_bo(entry->tv.bo); (*release)->release_offset = create_rel->release_offset + 64; @@ -316,8 +316,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, info = qxl_release_map(qdev, *release); info->id = idr_ret; qxl_release_unmap(qdev, *release, info); - - qxl_bo_unref(&bo); return 0; } diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index c3872598b85a..65adb9c72377 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode) } else atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { - args.ucAction = ATOM_LCD_BLON; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); } break; case DRM_MODE_DPMS_STANDBY: @@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0); } if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) - atombios_dig_transmitter_setup(encoder, - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); + atombios_set_backlight_level(radeon_encoder, dig->backlight_level); if (ext_encoder) atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); break; diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index f03b7eb15233..b6cbd816537e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1658,6 +1658,7 @@ struct radeon_pm { u8 fan_max_rpm; /* dpm */ bool dpm_enabled; + bool sysfs_initialized; struct radeon_dpm dpm; }; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index d2e9e9efc159..6743174acdbc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev) radeon_fbdev_init(rdev); drm_kms_helper_poll_init(rdev->ddev); - if (rdev->pm.dpm_enabled) { - /* do dpm late init */ - ret = radeon_pm_late_init(rdev); - if (ret) { - rdev->pm.dpm_enabled = false; - DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); - } - /* set the dpm state for PX since there won't be - * a modeset to call this. - */ - radeon_pm_compute_clocks(rdev); - } + /* do pm late init */ + ret = radeon_pm_late_init(rdev); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 5e09c061847f..744f5c49c664 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol { struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr); struct drm_device *dev = master->base.dev; - struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector; struct drm_connector *connector; @@ -284,14 +283,22 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master); drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0); drm_mode_connector_set_path_property(connector, pathprop); + return connector; +} + +static void radeon_dp_register_mst_connector(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + drm_modeset_lock_all(dev); radeon_fb_add_connector(rdev, connector); drm_modeset_unlock_all(dev); drm_connector_register(connector); - return connector; } static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, @@ -324,6 +331,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_mst_topology_cbs mst_cbs = { .add_connector = radeon_dp_add_mst_connector, + .register_connector = radeon_dp_register_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector, .hotplug = radeon_dp_mst_hotplug, }; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 7214858ffcea..26da2f4d7b4f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -48,40 +48,10 @@ struct radeon_fbdev { struct radeon_device *rdev; }; -/** - * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev. - * - * @info: fbdev info - * - * This function hides the cursor on all CRTCs used by fbdev. - */ -static int radeon_fb_helper_set_par(struct fb_info *info) -{ - int ret; - - ret = drm_fb_helper_set_par(info); - - /* XXX: with universal plane support fbdev will automatically disable - * all non-primary planes (including the cursor) - */ - if (ret == 0) { - struct drm_fb_helper *fb_helper = info->par; - int i; - - for (i = 0; i < fb_helper->crtc_count; i++) { - struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc; - - radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0); - } - } - - return ret; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, - .fb_set_par = radeon_fb_helper_set_par, + .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = drm_fb_helper_cfb_fillrect, .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, @@ -427,3 +397,19 @@ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector { drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector); } + +void radeon_fbdev_restore_mode(struct radeon_device *rdev) +{ + struct radeon_fbdev *rfbdev = rdev->mode_info.rfbdev; + struct drm_fb_helper *fb_helper; + int ret; + + if (!rfbdev) + return; + + fb_helper = &rfbdev->helper; + + ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); + if (ret) + DRM_DEBUG("failed to restore crtc mode\n"); +} diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 4a119c255ba9..0e932bf932c1 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -598,7 +598,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file * Outdated mess for old drm with Xorg being in charge (void function now). */ /** - * radeon_driver_firstopen_kms - drm callback for last close + * radeon_driver_lastclose_kms - drm callback for last close * * @dev: drm dev pointer * @@ -606,6 +606,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file */ void radeon_driver_lastclose_kms(struct drm_device *dev) { + struct radeon_device *rdev = dev->dev_private; + + radeon_fbdev_restore_mode(rdev); vga_switcheroo_process_delayed_switch(); } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index aecc3e3dec0c..457b026a0972 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -980,6 +980,7 @@ int radeon_fbdev_init(struct radeon_device *rdev); void radeon_fbdev_fini(struct radeon_device *rdev); void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state); bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj); +void radeon_fbdev_restore_mode(struct radeon_device *rdev); void radeon_fb_output_poll_changed(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 05751f3f8444..5feee3b4c557 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -717,10 +717,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct radeon_device *rdev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip limit attributes if DPM is not enabled */ + /* Skip attributes if DPM is not enabled */ if (rdev->pm.pm_method != PM_METHOD_DPM && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || - attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || + attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; /* Skip fan attributes if fan is not present */ @@ -1326,14 +1330,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev) INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); if (rdev->pm.num_power_states > 1) { - /* where's the best place to put these? */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } @@ -1391,20 +1387,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev) goto dpm_failed; rdev->pm.dpm_enabled = true; - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); - if (ret) - DRM_ERROR("failed to create device file for dpm state\n"); - /* XXX: these are noops for dpm but are here for backwards compat */ - ret = device_create_file(rdev->dev, &dev_attr_power_profile); - if (ret) - DRM_ERROR("failed to create device file for power profile\n"); - ret = device_create_file(rdev->dev, &dev_attr_power_method); - if (ret) - DRM_ERROR("failed to create device file for power method\n"); - if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); } @@ -1545,9 +1527,51 @@ int radeon_pm_late_init(struct radeon_device *rdev) int ret = 0; if (rdev->pm.pm_method == PM_METHOD_DPM) { - mutex_lock(&rdev->pm.mutex); - ret = radeon_dpm_late_enable(rdev); - mutex_unlock(&rdev->pm.mutex); + if (rdev->pm.dpm_enabled) { + if (!rdev->pm.sysfs_initialized) { + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); + if (ret) + DRM_ERROR("failed to create device file for dpm state\n"); + /* XXX: these are noops for dpm but are here for backwards compat */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + if (!ret) + rdev->pm.sysfs_initialized = true; + } + + mutex_lock(&rdev->pm.mutex); + ret = radeon_dpm_late_enable(rdev); + mutex_unlock(&rdev->pm.mutex); + if (ret) { + rdev->pm.dpm_enabled = false; + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); + } else { + /* set the dpm state for PX since there won't be + * a modeset to call this. + */ + radeon_pm_compute_clocks(rdev); + } + } + } else { + if ((rdev->pm.num_power_states > 1) && + (!rdev->pm.sysfs_initialized)) { + /* where's the best place to put these? */ + ret = device_create_file(rdev->dev, &dev_attr_power_profile); + if (ret) + DRM_ERROR("failed to create device file for power profile\n"); + ret = device_create_file(rdev->dev, &dev_attr_power_method); + if (ret) + DRM_ERROR("failed to create device file for power method\n"); + if (!ret) + rdev->pm.sysfs_initialized = true; + } } return ret; } diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index e9115d3f67b0..e72bf46042e0 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2928,6 +2928,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = { { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, { PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 }, { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 }, + { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 }, { 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index db8b49101a8b..512263919282 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -34,8 +34,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; - seq_printf(m, "fence %ld %lld\n", - atomic64_read(&vgdev->fence_drv.last_seq), + seq_printf(m, "fence %llu %lld\n", + (u64)atomic64_read(&vgdev->fence_drv.last_seq), vgdev->fence_drv.sync_seq); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 1da632631dac..67097c9ce9c1 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -61,7 +61,7 @@ static void virtio_timeline_value_str(struct fence *f, char *str, int size) { struct virtio_gpu_fence *fence = to_virtio_fence(f); - snprintf(str, size, "%lu", atomic64_read(&fence->drv->last_seq)); + snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); } static const struct fence_ops virtio_fence_ops = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 5ae8f921da2a..6377e8151000 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, * * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has * command buffers left that are not submitted to hardware, Make sure - * IRQ handling is turned on. Otherwise, make sure it's turned off. This - * function may return -EAGAIN to indicate it should be rerun due to - * possibly missed IRQs if IRQs has just been turned on. + * IRQ handling is turned on. Otherwise, make sure it's turned off. */ -static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) +static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) { - int notempty = 0; + int notempty; struct vmw_cmdbuf_context *ctx; int i; +retry: + notempty = 0; for_each_cmdbuf_ctx(man, i, ctx) vmw_cmdbuf_ctx_process(man, ctx, ¬empty); @@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) man->irq_on = true; /* Rerun in case we just missed an irq. */ - return -EAGAIN; + goto retry; } - - return 0; } /** @@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, header->cb_context = cb_context; list_add_tail(&header->list, &man->ctx[cb_context].submitted); - if (vmw_cmdbuf_man_process(man) == -EAGAIN) - vmw_cmdbuf_man_process(man); + vmw_cmdbuf_man_process(man); } /** @@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data) struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; spin_lock(&man->lock); - if (vmw_cmdbuf_man_process(man) == -EAGAIN) - (void) vmw_cmdbuf_man_process(man); + vmw_cmdbuf_man_process(man); spin_unlock(&man->lock); } @@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) struct vmw_cmdbuf_man *man = container_of(work, struct vmw_cmdbuf_man, work); struct vmw_cmdbuf_header *entry, *next; + uint32_t dummy; bool restart = false; spin_lock_bh(&man->lock); @@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) if (restart && vmw_cmdbuf_startstop(man, true)) DRM_ERROR("Failed restarting command buffer context 0.\n"); + /* Send a new fence in case one was removed */ + vmw_fifo_send_fence(man->dev_priv, &dummy); } /** @@ -681,6 +680,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, 0, 0, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); + if (ret) { + vmw_cmdbuf_man_process(man); + ret = drm_mm_insert_node_generic(&man->mm, info->node, + info->page_size, 0, 0, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + } + spin_unlock_bh(&man->lock); info->done = !ret; @@ -1160,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); man->has_pool = true; - man->default_size = default_size; + + /* + * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to + * prevent deadlocks from happening when vmw_cmdbuf_space_pool() + * needs to wait for space and we block on further command + * submissions to be able to free up space. + */ + man->default_size = VMW_CMDBUF_INLINE_SIZE; DRM_INFO("Using command buffers with %s pool.\n", (man->using_mob) ? "MOB" : "DMA"); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 64b50409fa07..03f63c749c02 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -657,7 +657,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) struct vmw_resource *res = &user_srf->srf.res; *p_base = NULL; - ttm_base_object_unref(&user_srf->backup_base); + if (user_srf->backup_base) + ttm_base_object_unref(&user_srf->backup_base); vmw_resource_unreference(&res); } diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c index 6cb89c0ebab6..1fd46859ed29 100644 --- a/drivers/hwmon/abx500.c +++ b/drivers/hwmon/abx500.c @@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = { { .compatible = "stericsson,abx500-temp" }, {}, }; +MODULE_DEVICE_TABLE(of, abx500_temp_match); #endif static struct platform_driver abx500_temp_driver = { diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index a3dae6d0082a..82de3deeb18a 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = { { .compatible = "gpio-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_gpio_fan_match); #endif /* CONFIG_OF_GPIO */ static int gpio_fan_probe(struct platform_device *pdev) diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 2d9a712699ff..3e23003f78b0 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = { { .compatible = "pwm-fan", }, {}, }; +MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 3dd2de31a2f8..472b88285c75 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) } #ifdef CONFIG_ACPI +/* + * The HCNT/LCNT information coming from ACPI should be the most accurate + * for given platform. However, some systems get it wrong. On such systems + * we get better results by calculating those based on the input clock. + */ +static const struct dmi_system_id dw_i2c_no_acpi_params[] = { + { + .ident = "Dell Inspiron 7348", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"), + }, + }, + { } +}; + static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], u16 *hcnt, u16 *lcnt, u32 *sda_hold) { @@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], acpi_handle handle = ACPI_HANDLE(&pdev->dev); union acpi_object *obj; + if (dmi_check_system(dw_i2c_no_acpi_params)) + return; + if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) return; @@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev) adap->dev.parent = &pdev->dev; adap->dev.of_node = pdev->dev.of_node; - r = i2c_add_numbered_adapter(adap); - if (r) { - dev_err(&pdev->dev, "failure adding adapter\n"); - return r; - } - if (dev->pm_runtime_disabled) { pm_runtime_forbid(&pdev->dev); } else { @@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); } + r = i2c_add_numbered_adapter(adap); + if (r) { + dev_err(&pdev->dev, "failure adding adapter\n"); + pm_runtime_disable(&pdev->dev); + return r; + } + return 0; } diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 30059c1df2a3..5801227b97ab 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -669,8 +669,6 @@ mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data) struct i2c_msg *msgs = drv_data->msgs; int num = drv_data->num_msgs; - return false; - if (!drv_data->offload_enabled) return false; diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index e814a36d9b78..6f8b446be5b0 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -600,7 +600,7 @@ static int i2c_pnx_controller_suspend(struct device *dev) { struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); - clk_disable(alg_data->clk); + clk_disable_unprepare(alg_data->clk); return 0; } @@ -609,7 +609,7 @@ static int i2c_pnx_controller_resume(struct device *dev) { struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev); - return clk_enable(alg_data->clk); + return clk_prepare_enable(alg_data->clk); } static SIMPLE_DEV_PM_OPS(i2c_pnx_pm, @@ -672,7 +672,7 @@ static int i2c_pnx_probe(struct platform_device *pdev) if (IS_ERR(alg_data->ioaddr)) return PTR_ERR(alg_data->ioaddr); - ret = clk_enable(alg_data->clk); + ret = clk_prepare_enable(alg_data->clk); if (ret) return ret; @@ -726,7 +726,7 @@ static int i2c_pnx_probe(struct platform_device *pdev) return 0; out_clock: - clk_disable(alg_data->clk); + clk_disable_unprepare(alg_data->clk); return ret; } @@ -735,7 +735,7 @@ static int i2c_pnx_remove(struct platform_device *pdev) struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); i2c_del_adapter(&alg_data->adapter); - clk_disable(alg_data->clk); + clk_disable_unprepare(alg_data->clk); return 0; } diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d8361dada584..d8b5a8fee1e6 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev) return ret; } + pm_runtime_enable(dev); + platform_set_drvdata(pdev, priv); + ret = i2c_add_numbered_adapter(adap); if (ret < 0) { dev_err(dev, "reg adap failed: %d\n", ret); + pm_runtime_disable(dev); return ret; } - pm_runtime_enable(dev); - platform_set_drvdata(pdev, priv); - dev_info(dev, "probed\n"); return 0; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 50bfd8cef5f2..5df819610d52 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) i2c->adap.nr = i2c->pdata->bus_num; i2c->adap.dev.of_node = pdev->dev.of_node; + platform_set_drvdata(pdev, i2c); + + pm_runtime_enable(&pdev->dev); + ret = i2c_add_numbered_adapter(&i2c->adap); if (ret < 0) { dev_err(&pdev->dev, "failed to add bus to i2c core\n"); + pm_runtime_disable(&pdev->dev); s3c24xx_i2c_deregister_cpufreq(i2c); clk_unprepare(i2c->clk); return ret; } - platform_set_drvdata(pdev, i2c); - - pm_runtime_enable(&pdev->dev); pm_runtime_enable(&i2c->adap.dev); dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev)); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 5f89f1e3c2f2..a59c3111f7fb 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -694,12 +694,12 @@ static int i2c_device_probe(struct device *dev) goto err_clear_wakeup_irq; status = dev_pm_domain_attach(&client->dev, true); - if (status != -EPROBE_DEFER) { - status = driver->probe(client, i2c_match_id(driver->id_table, - client)); - if (status) - goto err_detach_pm_domain; - } + if (status == -EPROBE_DEFER) + goto err_clear_wakeup_irq; + + status = driver->probe(client, i2c_match_id(driver->id_table, client)); + if (status) + goto err_detach_pm_domain; return 0; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3a3738fe016b..cd4510a63375 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = { .name = "C6-SKL", .desc = "MWAIT 0x20", .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 75, + .exit_latency = 85, .target_residency = 200, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, @@ -636,10 +636,18 @@ static struct cpuidle_state skl_cstates[] = { .name = "C8-SKL", .desc = "MWAIT 0x40", .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, - .exit_latency = 174, + .exit_latency = 200, .target_residency = 800, .enter = &intel_idle, .enter_freeze = intel_idle_freeze, }, + { + .name = "C9-SKL", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 480, + .target_residency = 5000, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, { .name = "C10-SKL", .desc = "MWAIT 0x60", diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index ff30f8806880..fb9311110424 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -149,8 +149,6 @@ #define ST_ACCEL_4_BDU_MASK 0x40 #define ST_ACCEL_4_DRDY_IRQ_ADDR 0x21 #define ST_ACCEL_4_DRDY_IRQ_INT1_MASK 0x04 -#define ST_ACCEL_4_IG1_EN_ADDR 0x21 -#define ST_ACCEL_4_IG1_EN_MASK 0x08 #define ST_ACCEL_4_MULTIREAD_BIT true /* CUSTOM VALUES FOR SENSOR 5 */ @@ -489,10 +487,6 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = { .drdy_irq = { .addr = ST_ACCEL_4_DRDY_IRQ_ADDR, .mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK, - .ig1 = { - .en_addr = ST_ACCEL_4_IG1_EN_ADDR, - .en_mask = ST_ACCEL_4_IG1_EN_MASK, - }, }, .multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT, .bootime = 2, /* guess */ diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c index ebe415f10640..0c74869a540a 100644 --- a/drivers/iio/adc/twl4030-madc.c +++ b/drivers/iio/adc/twl4030-madc.c @@ -45,13 +45,18 @@ #include #include #include +#include #include +#define TWL4030_USB_SEL_MADC_MCPC (1<<3) +#define TWL4030_USB_CARKIT_ANA_CTRL 0xBB + /** * struct twl4030_madc_data - a container for madc info * @dev: Pointer to device structure for madc * @lock: Mutex protecting this data structure + * @regulator: Pointer to bias regulator for madc * @requests: Array of request struct corresponding to SW1, SW2 and RT * @use_second_irq: IRQ selection (main or co-processor) * @imr: Interrupt mask register of MADC @@ -60,6 +65,7 @@ struct twl4030_madc_data { struct device *dev; struct mutex lock; /* mutex protecting this data structure */ + struct regulator *usb3v1; struct twl4030_madc_request requests[TWL4030_MADC_NUM_METHODS]; bool use_second_irq; u8 imr; @@ -841,6 +847,32 @@ static int twl4030_madc_probe(struct platform_device *pdev) } twl4030_madc = madc; + /* Configure MADC[3:6] */ + ret = twl_i2c_read_u8(TWL_MODULE_USB, ®val, + TWL4030_USB_CARKIT_ANA_CTRL); + if (ret) { + dev_err(&pdev->dev, "unable to read reg CARKIT_ANA_CTRL 0x%X\n", + TWL4030_USB_CARKIT_ANA_CTRL); + goto err_i2c; + } + regval |= TWL4030_USB_SEL_MADC_MCPC; + ret = twl_i2c_write_u8(TWL_MODULE_USB, regval, + TWL4030_USB_CARKIT_ANA_CTRL); + if (ret) { + dev_err(&pdev->dev, "unable to write reg CARKIT_ANA_CTRL 0x%X\n", + TWL4030_USB_CARKIT_ANA_CTRL); + goto err_i2c; + } + + /* Enable 3v1 bias regulator for MADC[3:6] */ + madc->usb3v1 = devm_regulator_get(madc->dev, "vusb3v1"); + if (IS_ERR(madc->usb3v1)) + return -ENODEV; + + ret = regulator_enable(madc->usb3v1); + if (ret) + dev_err(madc->dev, "could not enable 3v1 bias regulator\n"); + ret = iio_device_register(iio_dev); if (ret) { dev_err(&pdev->dev, "could not register iio device\n"); @@ -866,6 +898,8 @@ static int twl4030_madc_remove(struct platform_device *pdev) twl4030_madc_set_current_generator(madc, 0, 0); twl4030_madc_set_power(madc, 0); + regulator_disable(madc->usb3v1); + return 0; } diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 8f66c67ff0df..87471ef37198 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -508,12 +508,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, memset(&gid_attr, 0, sizeof(gid_attr)); gid_attr.ndev = ndev; + mutex_lock(&table->lock); ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT); /* Coudn't find default GID location */ WARN_ON(ix < 0); - mutex_lock(&table->lock); if (!__ib_cache_gid_get(ib_dev, port, ix, ¤t_gid, ¤t_gid_attr) && mode == IB_CACHE_GID_DEFAULT_MODE_SET && diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ea4db9c1d44f..4f918b929eca 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -835,6 +835,11 @@ retest: case IB_CM_SIDR_REQ_RCVD: spin_unlock_irq(&cm_id_priv->lock); cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); + spin_lock_irq(&cm.lock); + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) + rb_erase(&cm_id_priv->sidr_id_node, + &cm.remote_sidr_table); + spin_unlock_irq(&cm.lock); break; case IB_CM_REQ_SENT: case IB_CM_MRA_REQ_RCVD: @@ -3172,7 +3177,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_lock_irqsave(&cm.lock, flags); - rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); + if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) { + rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table); + RB_CLEAR_NODE(&cm_id_priv->sidr_id_node); + } spin_unlock_irqrestore(&cm.lock, flags); return 0; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b1ab13f3e182..36b12d560e17 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1067,14 +1067,14 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event, sizeof(req->local_gid)); req->has_gid = true; req->service_id = req_param->primary_path->service_id; - req->pkey = req_param->bth_pkey; + req->pkey = be16_to_cpu(req_param->primary_path->pkey); break; case IB_CM_SIDR_REQ_RECEIVED: req->device = sidr_param->listen_id->device; req->port = sidr_param->port; req->has_gid = false; req->service_id = sidr_param->service_id; - req->pkey = sidr_param->bth_pkey; + req->pkey = sidr_param->pkey; break; default: return -EINVAL; @@ -1232,14 +1232,32 @@ static bool cma_match_private_data(struct rdma_id_private *id_priv, return true; } +static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num) +{ + enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num); + enum rdma_transport_type transport = + rdma_node_get_transport(device->node_type); + + return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB; +} + +static bool cma_protocol_roce(const struct rdma_cm_id *id) +{ + struct ib_device *device = id->device; + const int port_num = id->port_num ?: rdma_start_port(device); + + return cma_protocol_roce_dev_port(device, port_num); +} + static bool cma_match_net_dev(const struct rdma_id_private *id_priv, const struct net_device *net_dev) { const struct rdma_addr *addr = &id_priv->id.route.addr; if (!net_dev) - /* This request is an AF_IB request */ - return addr->src_addr.ss_family == AF_IB; + /* This request is an AF_IB request or a RoCE request */ + return addr->src_addr.ss_family == AF_IB || + cma_protocol_roce(&id_priv->id); return !addr->dev_addr.bound_dev_if || (net_eq(dev_net(net_dev), &init_net) && @@ -1294,6 +1312,10 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { /* Assuming the protocol is AF_IB */ *net_dev = NULL; + } else if (cma_protocol_roce_dev_port(req.device, req.port)) { + /* TODO find the net dev matching the request parameters + * through the RoCE GID table */ + *net_dev = NULL; } else { return ERR_CAST(*net_dev); } @@ -1302,7 +1324,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id), cma_port_from_service_id(req.service_id)); id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); - if (IS_ERR(id_priv)) { + if (IS_ERR(id_priv) && *net_dev) { dev_put(*net_dev); *net_dev = NULL; } @@ -1593,11 +1615,16 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, if (ret) goto err; } else { - /* An AF_IB connection */ - WARN_ON_ONCE(ss_family != AF_IB); - - cma_translate_ib((struct sockaddr_ib *)cma_src_addr(id_priv), - &rt->addr.dev_addr); + if (!cma_protocol_roce(listen_id) && + cma_any_addr(cma_src_addr(id_priv))) { + rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; + rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); + ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); + } else if (!cma_any_addr(cma_src_addr(id_priv))) { + ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); + if (ret) + goto err; + } } rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); @@ -1635,13 +1662,12 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, if (ret) goto err; } else { - /* An AF_IB connection */ - WARN_ON_ONCE(ss_family != AF_IB); - - if (!cma_any_addr(cma_src_addr(id_priv))) - cma_translate_ib((struct sockaddr_ib *) - cma_src_addr(id_priv), - &id->route.addr.dev_addr); + if (!cma_any_addr(cma_src_addr(id_priv))) { + ret = cma_translate_addr(cma_src_addr(id_priv), + &id->route.addr.dev_addr); + if (ret) + goto err; + } } id_priv->state = RDMA_CM_CONNECT; diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index 6b24cba1e474..178f98482e13 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -250,25 +250,44 @@ static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, u8 port, struct net_device *ndev) { struct in_device *in_dev; + struct sin_list { + struct list_head list; + struct sockaddr_in ip; + }; + struct sin_list *sin_iter; + struct sin_list *sin_temp; + LIST_HEAD(sin_list); if (ndev->reg_state >= NETREG_UNREGISTERING) return; - in_dev = in_dev_get(ndev); - if (!in_dev) + rcu_read_lock(); + in_dev = __in_dev_get_rcu(ndev); + if (!in_dev) { + rcu_read_unlock(); return; + } for_ifa(in_dev) { - struct sockaddr_in ip; + struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); - ip.sin_family = AF_INET; - ip.sin_addr.s_addr = ifa->ifa_address; - update_gid_ip(GID_ADD, ib_dev, port, ndev, - (struct sockaddr *)&ip); + if (!entry) { + pr_warn("roce_gid_mgmt: couldn't allocate entry for IPv4 update\n"); + continue; + } + entry->ip.sin_family = AF_INET; + entry->ip.sin_addr.s_addr = ifa->ifa_address; + list_add_tail(&entry->list, &sin_list); } endfor_ifa(in_dev); + rcu_read_unlock(); - in_dev_put(in_dev); + list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) { + update_gid_ip(GID_ADD, ib_dev, port, ndev, + (struct sockaddr *)&sin_iter->ip); + list_del(&sin_iter->list); + kfree(sin_iter); + } } static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index a53fc9b01c69..30467d10df91 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1624,11 +1624,16 @@ static int ucma_open(struct inode *inode, struct file *filp) if (!file) return -ENOMEM; + file->close_wq = create_singlethread_workqueue("ucma_close_id"); + if (!file->close_wq) { + kfree(file); + return -ENOMEM; + } + INIT_LIST_HEAD(&file->event_list); INIT_LIST_HEAD(&file->ctx_list); init_waitqueue_head(&file->poll_wait); mutex_init(&file->mut); - file->close_wq = create_singlethread_workqueue("ucma_close_id"); filp->private_data = file; file->filp = filp; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 41d6911e244e..f1ccd40beae9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; if (MLX5_CAP_GEN(mdev, apm)) props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; - props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; @@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm return 0; } -static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) -{ - struct mlx5_create_mkey_mbox_in *in; - struct mlx5_mkey_seg *seg; - struct mlx5_core_mr mr; - int err; - - in = kzalloc(sizeof(*in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - seg = &in->seg; - seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; - seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - seg->start_addr = 0; - - err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), - NULL, NULL, NULL); - if (err) { - mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); - goto err_in; - } - - kfree(in); - *key = mr.key; - - return 0; - -err_in: - kfree(in); - - return err; -} - -static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) -{ - struct mlx5_core_mr mr; - int err; - - memset(&mr, 0, sizeof(mr)); - mr.key = key; - err = mlx5_core_destroy_mkey(dev->mdev, &mr); - if (err) - mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); -} - static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) @@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, kfree(pd); return ERR_PTR(-EFAULT); } - } else { - err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); - if (err) { - mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); - kfree(pd); - return ERR_PTR(err); - } } return &pd->ibpd; @@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd) struct mlx5_ib_dev *mdev = to_mdev(pd->device); struct mlx5_ib_pd *mpd = to_mpd(pd); - if (!pd->uobject) - free_pa_mkey(mdev, mpd->pa_lkey); - mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); kfree(mpd); @@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; - u32 rsvd_lkey; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); - ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey); - if (ret) { - pr_err("Failed to query special context %d\n", ret); - return ret; - } - dev->ib_dev.local_dma_lkey = rsvd_lkey; - devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->p0)) { ret = PTR_ERR(devr->p0); @@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); dev->ib_dev.owner = THIS_MODULE; dev->ib_dev.node_type = RDMA_NODE_IB_CA; + dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); dev->ib_dev.phys_port_cnt = dev->num_ports; dev->ib_dev.num_comp_vectors = diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bb8cda79e881..22123b79d550 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte struct mlx5_ib_pd { struct ib_pd ibpd; u32 pdn; - u32 pa_lkey; }; /* Use macros here so that don't have to duplicate @@ -213,7 +212,6 @@ struct mlx5_ib_qp { int uuarn; int create_type; - u32 pa_lkey; /* Store signature errors */ bool signature_en; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index c745c6c5e10d..6f521a3418e8 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); if (err) mlx5_ib_dbg(dev, "err %d\n", err); - else - qp->pa_lkey = to_mpd(pd)->pa_lkey; } if (err) @@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); dseg->addr = cpu_to_be64(mfrpl->map); dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); - dseg->lkey = cpu_to_be32(pd->pa_lkey); + dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); } static __be32 send_ieth(struct ib_send_wr *wr) diff --git a/drivers/infiniband/hw/usnic/usnic.h b/drivers/infiniband/hw/usnic/usnic.h index 5be13d8991bc..f903502d3883 100644 --- a/drivers/infiniband/hw/usnic/usnic.h +++ b/drivers/infiniband/hw/usnic/usnic.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h index 04a66229584e..7fe9502ce8d3 100644 --- a/drivers/infiniband/hw/usnic/usnic_abi.h +++ b/drivers/infiniband/hw/usnic/usnic_abi.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h index 393567266142..596e0ed49a8e 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h +++ b/drivers/infiniband/hw/usnic/usnic_common_pkt_hdr.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_common_util.h b/drivers/infiniband/hw/usnic/usnic_common_util.h index 9d737ed5e55d..b54986de5f0c 100644 --- a/drivers/infiniband/hw/usnic/usnic_common_util.h +++ b/drivers/infiniband/hw/usnic/usnic_common_util.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c index 5d13860161a4..5e55b8bc6fe4 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.c +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.h b/drivers/infiniband/hw/usnic/usnic_debugfs.h index 4087d24a88f6..98453e91daa6 100644 --- a/drivers/infiniband/hw/usnic/usnic_debugfs.h +++ b/drivers/infiniband/hw/usnic/usnic_debugfs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c index e3c9bd9d3ba3..3c37dd59c04e 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.c +++ b/drivers/infiniband/hw/usnic/usnic_fwd.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h index 93713a2230b3..3a8add9ddf46 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.h +++ b/drivers/infiniband/hw/usnic/usnic_fwd.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h index e5a9297dd1bd..525bf272671e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib.h +++ b/drivers/infiniband/hw/usnic/usnic_ib.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 34c49b8105fe..0c15bd885035 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index db3588df3546..85dc3f989ff7 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h index b0aafe8db0c3..b1458be1d402 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c index 27dc67c1689f..3412ea06116e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h index 0d09b493cd02..3d98e16cfeaf 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 7df43827cb29..f8e3211689a3 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h index 0bd04efa16f3..414eaa566bd9 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.h +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_log.h b/drivers/infiniband/hw/usnic/usnic_log.h index 75777a66c684..183fcb6a952f 100644 --- a/drivers/infiniband/hw/usnic/usnic_log.h +++ b/drivers/infiniband/hw/usnic/usnic_log.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c index ddef6f77a78c..de318389a301 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.c +++ b/drivers/infiniband/hw/usnic/usnic_transport.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_transport.h b/drivers/infiniband/hw/usnic/usnic_transport.h index 7e5dc6d9f462..9a7a2d9755c0 100644 --- a/drivers/infiniband/hw/usnic/usnic_transport.h +++ b/drivers/infiniband/hw/usnic/usnic_transport.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index cb2337f0532b..645a5f6e6c88 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -7,7 +7,7 @@ * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: + * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 70440996e8f2..45ca7c1613a7 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c index 3a4288e0fbac..42b4b4c4e452 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h index d4f752e258fd..c0b0b876ab90 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.c b/drivers/infiniband/hw/usnic/usnic_vnic.c index 656b88c39eda..66de93fb8ea9 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.c +++ b/drivers/infiniband/hw/usnic/usnic_vnic.c @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/hw/usnic/usnic_vnic.h b/drivers/infiniband/hw/usnic/usnic_vnic.h index 14d931a8829d..a08423e478af 100644 --- a/drivers/infiniband/hw/usnic/usnic_vnic.h +++ b/drivers/infiniband/hw/usnic/usnic_vnic.h @@ -1,9 +1,24 @@ /* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index ca2873698d75..edc5b8565d6d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -80,7 +80,7 @@ enum { IPOIB_NUM_WC = 4, IPOIB_MAX_PATH_REC_QUEUE = 3, - IPOIB_MAX_MCAST_QUEUE = 3, + IPOIB_MAX_MCAST_QUEUE = 64, IPOIB_FLAG_OPER_UP = 0, IPOIB_FLAG_INITIALIZED = 1, @@ -495,6 +495,7 @@ void ipoib_dev_cleanup(struct net_device *dev); void ipoib_mcast_join_task(struct work_struct *work); void ipoib_mcast_carrier_on_task(struct work_struct *work); void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb); +void ipoib_mcast_free(struct ipoib_mcast *mc); void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); @@ -548,6 +549,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter, int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid, int set_qkey); +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast); +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid); int ipoib_init_qp(struct net_device *dev); int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 36536ce5a3e2..babba05d7a0e 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) unsigned long dt; unsigned long flags; int i; + LIST_HEAD(remove_list); + struct ipoib_mcast *mcast, *tmcast; + struct net_device *dev = priv->dev; if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return; @@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { + u8 *mgid = neigh->daddr + 4; + + /* Is this multicast ? */ + if (*mgid == 0xff) { + mcast = __ipoib_mcast_find(dev, mgid); + + if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { + list_del(&mcast->list); + rb_erase(&mcast->rb_node, &priv->multicast_tree); + list_add_tail(&mcast->list, &remove_list); + } + } + rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, lockdep_is_held(&priv->lock))); @@ -1191,6 +1207,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) out_unlock: spin_unlock_irqrestore(&priv->lock, flags); + list_for_each_entry_safe(mcast, tmcast, &remove_list, list) { + ipoib_mcast_leave(dev, mcast); + ipoib_mcast_free(mcast); + } } static void ipoib_reap_neigh(struct work_struct *work) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 09a1748f9d13..d750a86042f3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -106,7 +106,7 @@ static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv, queue_delayed_work(priv->wq, &priv->mcast_task, 0); } -static void ipoib_mcast_free(struct ipoib_mcast *mcast) +void ipoib_mcast_free(struct ipoib_mcast *mcast) { struct net_device *dev = mcast->dev; int tx_dropped = 0; @@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, return mcast; } -static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) +struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct rb_node *n = priv->multicast_tree.rb_node; @@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) rec.hop_limit = priv->broadcast->mcmember.hop_limit; /* - * Historically Linux IPoIB has never properly supported SEND - * ONLY join. It emulated it by not providing all the required - * attributes, which is enough to prevent group creation and - * detect if there are full members or not. A major problem - * with supporting SEND ONLY is detecting when the group is - * auto-destroyed as IPoIB will cache the MLID.. + * Send-only IB Multicast joins do not work at the core + * IB layer yet, so we can't use them here. However, + * we are emulating an Ethernet multicast send, which + * does not require a multicast subscription and will + * still send properly. The most appropriate thing to + * do is to create the group if it doesn't exist as that + * most closely emulates the behavior, from a user space + * application perspecitive, of Ethernet multicast + * operation. For now, we do a full join, maybe later + * when the core IB layers support send only joins we + * will use them. */ -#if 1 - if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) - comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; -#else +#if 0 if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) rec.join_state = 4; #endif @@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev) return 0; } -static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) +int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret = 0; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 1ace5d83a4d7..f58ff96b6cbb 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS; module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024"); +bool iser_always_reg = true; +module_param_named(always_register, iser_always_reg, bool, S_IRUGO); +MODULE_PARM_DESC(always_register, + "Always register memory, even for continuous memory regions (default:true)"); + bool iser_pi_enable = false; module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 86f6583485ef..a5edd6ede692 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -611,6 +611,7 @@ extern int iser_debug_level; extern bool iser_pi_enable; extern int iser_pi_guard; extern unsigned int iser_max_sectors; +extern bool iser_always_reg; int iser_assign_reg_ops(struct iser_device *device); diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 2493cc748db8..4c46d67d37a1 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -803,11 +803,12 @@ static int iser_reg_prot_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg); @@ -817,11 +818,12 @@ static int iser_reg_data_sg(struct iscsi_iser_task *task, struct iser_data_buf *mem, struct iser_fr_desc *desc, + bool use_dma_key, struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; - if (mem->dma_nents == 1) + if (use_dma_key) return iser_reg_dma(device, mem, reg); return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg); @@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, struct iser_mem_reg *reg = &task->rdma_reg[dir]; struct iser_mem_reg *data_reg; struct iser_fr_desc *desc = NULL; + bool use_dma_key; int err; err = iser_handle_unaligned_buf(task, mem, dir); if (unlikely(err)) return err; - if (mem->dma_nents != 1 || - scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) { + use_dma_key = (mem->dma_nents == 1 && !iser_always_reg && + scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL); + + if (!use_dma_key) { desc = device->reg_ops->reg_desc_get(ib_conn); reg->mem_h = desc; } @@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, else data_reg = &task->desc.data_reg; - err = iser_reg_data_sg(task, mem, desc, data_reg); + err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg); if (unlikely(err)) goto err_reg; @@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task, if (unlikely(err)) goto err_reg; - err = iser_reg_prot_sg(task, mem, desc, prot_reg); + err = iser_reg_prot_sg(task, mem, desc, + use_dma_key, prot_reg); if (unlikely(err)) goto err_reg; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index ae70cc1463ac..85132d867bc8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device) (unsigned long)comp); } - device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ); - if (IS_ERR(device->mr)) - goto dma_mr_err; + if (!iser_always_reg) { + int access = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_REMOTE_READ; + + device->mr = ib_get_dma_mr(device->pd, access); + if (IS_ERR(device->mr)) + goto dma_mr_err; + } INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device, iser_event_handler); @@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device) return 0; handler_err: - ib_dereg_mr(device->mr); + if (device->mr) + ib_dereg_mr(device->mr); dma_mr_err: for (i = 0; i < device->comps_used; i++) tasklet_kill(&device->comps[i].tasklet); @@ -173,7 +178,6 @@ comps_err: static void iser_free_device_ib_res(struct iser_device *device) { int i; - BUG_ON(device->mr == NULL); for (i = 0; i < device->comps_used; i++) { struct iser_comp *comp = &device->comps[i]; @@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device) } (void)ib_unregister_event_handler(&device->event_handler); - (void)ib_dereg_mr(device->mr); + if (device->mr) + (void)ib_dereg_mr(device->mr); ib_dealloc_pd(device->pd); kfree(device->comps); diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 56eb471b5576..4215b5382092 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY config JOYSTICK_ZHENHUA tristate "5-byte Zhenhua RC transmitter" select SERIO + select BITREVERSE help Say Y here if you have a Zhen Hua PPM-4CH transmitter which is supplied with a ready to fly micro electric indoor helicopters diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index b76ac580703c..a8bc2fe170dd 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c @@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data) if (w->counter == 24) { /* full frame */ walkera0701_parse_frame(w); w->counter = NO_SYNC; - if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ + if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */ w->counter = 0; } else { if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE) @@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data) } else w->counter = NO_SYNC; } - } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) < + } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) < RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */ w->counter = 0; diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c index b052afec9a11..6639b2b8528a 100644 --- a/drivers/input/keyboard/omap4-keypad.c +++ b/drivers/input/keyboard/omap4-keypad.c @@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev) error = omap4_keypad_parse_dt(&pdev->dev, keypad_data); if (error) - return error; + goto err_free_keypad; res = request_mem_region(res->start, resource_size(res), pdev->name); if (!res) { diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c index 867db8a91372..e317b75357a0 100644 --- a/drivers/input/misc/pm8941-pwrkey.c +++ b/drivers/input/misc/pm8941-pwrkey.c @@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb, default: reset_type = PON_PS_HOLD_TYPE_HARD_RESET; break; - }; + } error = regmap_update_bits(pwrkey->regmap, pwrkey->baseaddr + PON_PS_HOLD_RST_CTL, diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 345df9b03aed..5adbcedcb81c 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev, dev->id.product = user_dev->id.product; dev->id.version = user_dev->id.version; - for_each_set_bit(i, dev->absbit, ABS_CNT) { + for (i = 0; i < ABS_CNT; i++) { input_abs_set_max(dev, i, user_dev->absmax[i]); input_abs_set_min(dev, i, user_dev->absmin[i]); input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]); diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index 5f191071d44a..e4eb048d1bf6 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c @@ -241,14 +241,10 @@ static int cyapa_gen6_read_sys_info(struct cyapa *cyapa) memcpy(&cyapa->product_id[13], &resp_data[62], 2); cyapa->product_id[15] = '\0'; + /* Get the number of Rx electrodes. */ rotat_align = resp_data[68]; - if (rotat_align) { - cyapa->electrodes_rx = cyapa->electrodes_y; - cyapa->electrodes_rx = cyapa->electrodes_y; - } else { - cyapa->electrodes_rx = cyapa->electrodes_x; - cyapa->electrodes_rx = cyapa->electrodes_y; - } + cyapa->electrodes_rx = + rotat_align ? cyapa->electrodes_y : cyapa->electrodes_x; cyapa->aligned_electrodes_rx = (cyapa->electrodes_rx + 3) & ~3u; if (!cyapa->electrodes_x || !cyapa->electrodes_y || diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index 73670f2aebfd..c0ec26118732 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -60,7 +60,7 @@ struct elan_transport_ops { int (*get_sm_version)(struct i2c_client *client, u8* ic_type, u8 *version); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); - int (*get_product_id)(struct i2c_client *client, u8 *id); + int (*get_product_id)(struct i2c_client *client, u16 *id); int (*get_max)(struct i2c_client *client, unsigned int *max_x, unsigned int *max_y); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index fa945304b9a5..5e1665bbaa0b 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -40,7 +40,7 @@ #include "elan_i2c.h" #define DRIVER_NAME "elan_i2c" -#define ELAN_DRIVER_VERSION "1.6.0" +#define ELAN_DRIVER_VERSION "1.6.1" #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 #define ETP_FINGER_WIDTH 15 @@ -76,7 +76,7 @@ struct elan_tp_data { unsigned int x_res; unsigned int y_res; - u8 product_id; + u16 product_id; u8 fw_version; u8 sm_version; u8 iap_version; @@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count, u16 *signature_address) { switch (iap_version) { + case 0x00: + case 0x06: case 0x08: *validpage_count = 512; break; + case 0x03: + case 0x07: case 0x09: + case 0x0A: + case 0x0B: + case 0x0C: *validpage_count = 768; break; case 0x0D: *validpage_count = 896; break; + case 0x0E: + *validpage_count = 640; + break; default: /* unknown ic type clear value */ *validpage_count = 0; @@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data) error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count, &data->fw_signature_address); - if (error) { - dev_err(&data->client->dev, - "unknown iap version %d\n", data->iap_version); - return error; - } + if (error) + dev_warn(&data->client->dev, + "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", + data->iap_version, data->ic_type); return 0; } @@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev, const u8 *fw_signature; static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; + if (data->fw_validpage_count == 0) + return -EINVAL; + /* Look for a firmware with the product id appended. */ fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); if (!fw_name) { diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 683c840c9dd7..a679e56c44cd 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, return 0; } -static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) +static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[0]; + *id = le16_to_cpup((__le16 *)val); return 0; } diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index ff36a366b2aa..cb6aecbc1dc2 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client, return 0; } -static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) +static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id) { int error; u8 val[3]; @@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id) return error; } - *id = val[1]; + *id = be16_to_cpup((__be16 *)val); return 0; } diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 994ae7886156..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse) struct synaptics_data *priv = psmouse->private; priv->mode = 0; - - if (priv->absolute_mode) { + if (priv->absolute_mode) priv->mode |= SYN_BIT_ABSOLUTE_MODE; - if (SYN_CAP_EXTENDED(priv->capabilities)) - priv->mode |= SYN_BIT_W_MODE; - } - - if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture) + if (priv->disable_gesture) priv->mode |= SYN_BIT_DISABLE_GESTURE; - if (psmouse->rate >= 80) priv->mode |= SYN_BIT_HIGH_RATE; + if (SYN_CAP_EXTENDED(priv->capabilities)) + priv->mode |= SYN_BIT_W_MODE; if (synaptics_mode_cmd(psmouse, priv->mode)) return -1; diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index 75516996db20..316f2c897101 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) * time before the ACK arrives. */ if (ps2_sendbyte(ps2dev, command & 0xff, - command == PS2_CMD_RESET_BAT ? 1000 : 200)) - goto out; + command == PS2_CMD_RESET_BAT ? 1000 : 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } - for (i = 0; i < send; i++) - if (ps2_sendbyte(ps2dev, param[i], 200)) - goto out; + for (i = 0; i < send; i++) { + if (ps2_sendbyte(ps2dev, param[i], 200)) { + serio_pause_rx(ps2dev->serio); + goto out_reset_flags; + } + } /* * The reset command takes a long time to execute. @@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command) !(ps2dev->flags & PS2_FLAG_CMD), timeout); } + serio_pause_rx(ps2dev->serio); + if (param) for (i = 0; i < receive; i++) param[i] = ps2dev->cmdbuf[(receive - 1) - i]; if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) - goto out; + goto out_reset_flags; rc = 0; - out: - serio_pause_rx(ps2dev->serio); + out_reset_flags: ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 26b45936f9fd..1e8cd6f1fe9e 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c @@ -194,6 +194,7 @@ static int __init parkbd_init(void) parkbd_port = parkbd_allocate_serio(); if (!parkbd_port) { parport_release(parkbd_dev); + parport_unregister_device(parkbd_dev); return -ENOMEM; } diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 0f5f968592bd..04edc8f7122f 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -668,18 +668,22 @@ static int ads7846_no_filter(void *ads, int data_idx, int *val) static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) { + int value; struct spi_transfer *t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); if (ts->model == 7845) { - return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3; + value = be16_to_cpup((__be16 *)&(((char *)t->rx_buf)[1])); } else { /* * adjust: on-wire is a must-ignore bit, a BE12 value, then * padding; built from two 8 bit values written msb-first. */ - return be16_to_cpup((__be16 *)t->rx_buf) >> 3; + value = be16_to_cpup((__be16 *)t->rx_buf); } + + /* enforce ADC output is 12 bits width */ + return (value >> 3) & 0xfff; } static void ads7846_update_value(struct spi_message *m, int val) diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c index ff0b75813daa..8275267eac25 100644 --- a/drivers/input/touchscreen/imx6ul_tsc.c +++ b/drivers/input/touchscreen/imx6ul_tsc.c @@ -94,7 +94,7 @@ struct imx6ul_tsc { * TSC module need ADC to get the measure value. So * before config TSC, we should initialize ADC module. */ -static void imx6ul_adc_init(struct imx6ul_tsc *tsc) +static int imx6ul_adc_init(struct imx6ul_tsc *tsc) { int adc_hc = 0; int adc_gc; @@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc) timeout = wait_for_completion_timeout (&tsc->completion, ADC_TIMEOUT); - if (timeout == 0) + if (timeout == 0) { dev_err(tsc->dev, "Timeout for adc calibration\n"); + return -ETIMEDOUT; + } adc_gs = readl(tsc->adc_regs + REG_ADC_GS); - if (adc_gs & ADC_CALF) + if (adc_gs & ADC_CALF) { dev_err(tsc->dev, "ADC calibration failed\n"); + return -EINVAL; + } /* TSC need the ADC work in hardware trigger */ adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG); adc_cfg |= ADC_HARDWARE_TRIGGER; writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG); + + return 0; } /* @@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc) writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL); } -static void imx6ul_tsc_init(struct imx6ul_tsc *tsc) +static int imx6ul_tsc_init(struct imx6ul_tsc *tsc) { - imx6ul_adc_init(tsc); + int err; + + err = imx6ul_adc_init(tsc); + if (err) + return err; imx6ul_tsc_channel_config(tsc); imx6ul_tsc_set(tsc); + + return 0; } static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc) @@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev) return err; } - imx6ul_tsc_init(tsc); - - return 0; + return imx6ul_tsc_init(tsc); } static void imx6ul_tsc_close(struct input_dev *input_dev) @@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) int tsc_irq; int adc_irq; - tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL); + tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL); if (!tsc) return -ENOMEM; @@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) if (!input_dev) return -ENOMEM; - input_dev->name = "iMX6UL TouchScreen Controller"; + input_dev->name = "iMX6UL Touchscreen Controller"; input_dev->id.bustype = BUS_HOST; input_dev->open = imx6ul_tsc_open; @@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev) } adc_irq = platform_get_irq(pdev, 1); - if (adc_irq <= 0) { + if (adc_irq < 0) { dev_err(&pdev->dev, "no adc irq resource?\n"); return adc_irq; } @@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev) goto out; } - imx6ul_tsc_init(tsc); + retval = imx6ul_tsc_init(tsc); } out: diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c index 7cce87650fc8..1fafc9f57af6 100644 --- a/drivers/input/touchscreen/mms114.c +++ b/drivers/input/touchscreen/mms114.c @@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev) if (of_property_read_u32(np, "x-size", &pdata->x_size)) { dev_err(dev, "failed to get x-size property\n"); return NULL; - }; + } if (of_property_read_u32(np, "y-size", &pdata->y_size)) { dev_err(dev, "failed to get y-size property\n"); return NULL; - }; + } of_property_read_u32(np, "contact-threshold", &pdata->contact_threshold); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..cbe6a890a93a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE - # SWIOTLB guarantees a dma_to_phys() implementation - depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB) + depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page @@ -43,7 +42,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST endmenu config IOMMU_IOVA - bool + tristate config OF_IOMMU def_bool y diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f82060e778a2..532e2a211fe1 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1974,8 +1974,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) static void clear_dte_entry(u16 devid) { /* remove entry from the device table seen by the hardware */ - amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; - amd_iommu_dev_table[devid].data[1] = 0; + amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV; + amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK; amd_iommu_apply_erratum_63(devid); } @@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data) { struct amd_iommu *iommu; + /* + * First check if the device is still attached. It might already + * be detached from its domain because the generic + * iommu_detach_group code detached it and we try again here in + * our alias handling. + */ + if (!dev_data->domain) + return; + iommu = amd_iommu_rlookup_table[dev_data->devid]; /* decrease reference counters */ diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5ef347a13cb5..1b066e7d144d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu) if (!iommu->dev) return -ENODEV; + /* Prevent binding other PCI device drivers to IOMMU devices */ + iommu->dev->match_driver = false; + pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, &iommu->cap); pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index f65908841be0..c9b64722f623 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -295,6 +295,7 @@ #define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IW (1ULL << 62) +#define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_FLAG_IOTLB (0x01UL << 32) #define DTE_FLAG_GV (0x01ULL << 55) #define DTE_GLX_SHIFT (56) diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 1131664b918b..d21d4edf7236 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -516,6 +516,13 @@ static void do_fault(struct work_struct *work) goto out; } + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { + /* handle_mm_fault would BUG_ON() */ + up_read(&mm->mmap_sem); + handle_fault_error(fault); + goto out; + } + ret = handle_mm_fault(mm, vma, address, write); if (ret & VM_FAULT_ERROR) { /* failed to service fault */ diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index dafaf59dc3b8..286e890e7d64 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -56,6 +56,7 @@ #define IDR0_TTF_SHIFT 2 #define IDR0_TTF_MASK 0x3 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) +#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT) #define IDR0_S1P (1 << 1) #define IDR0_S2P (1 << 0) @@ -342,7 +343,8 @@ #define CMDQ_TLBI_0_VMID_SHIFT 32 #define CMDQ_TLBI_0_ASID_SHIFT 48 #define CMDQ_TLBI_1_LEAF (1UL << 0) -#define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL +#define CMDQ_TLBI_1_VA_MASK ~0xfffUL +#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL #define CMDQ_PRI_0_SSID_SHIFT 12 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL @@ -770,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) break; case CMDQ_OP_TLBI_NH_VA: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; - /* Fallthrough */ + cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; + break; case CMDQ_OP_TLBI_S2_IPA: cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; - cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK; + cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; break; case CMDQ_OP_TLBI_NH_ASID: cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; @@ -2460,7 +2464,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) } /* We only support the AArch64 table format at present */ - if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) { + switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) { + case IDR0_TTF_AARCH32_64: + smmu->ias = 40; + /* Fallthrough */ + case IDR0_TTF_AARCH64: + break; + default: dev_err(smmu->dev, "AArch64 table format not supported!\n"); return -ENXIO; } @@ -2541,8 +2551,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) dev_warn(smmu->dev, "failed to set DMA mask for table walker\n"); - if (!smmu->ias) - smmu->ias = smmu->oas; + smmu->ias = max(smmu->ias, smmu->oas); dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", smmu->ias, smmu->oas, smmu->features); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..d65cf42399e8 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2115,15 +2115,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return -ENOMEM; /* It is large page*/ if (largepage_lvl > 1) { + unsigned long nr_superpages, end_pfn; + pteval |= DMA_PTE_LARGE_PAGE; lvl_pages = lvl_to_nr_pages(largepage_lvl); + + nr_superpages = sg_res / lvl_pages; + end_pfn = iov_pfn + nr_superpages * lvl_pages - 1; + /* * Ensure that old small page tables are - * removed to make room for superpage, - * if they exist. + * removed to make room for superpage(s). */ - dma_pte_free_pagetable(domain, iov_pfn, - iov_pfn + lvl_pages - 1); + dma_pte_free_pagetable(domain, iov_pfn, end_pfn); } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; } @@ -2301,6 +2305,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); + free_devinfo_mem(info); return NULL; } @@ -3215,6 +3220,8 @@ static struct iova *intel_alloc_iova(struct device *dev, /* Restrict dma_mask to the width that the iommu can handle */ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); + /* Ensure we reserve the whole size-aligned region */ + nrpages = __roundup_pow_of_two(nrpages); if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { /* @@ -3711,7 +3718,7 @@ static inline int iommu_devinfo_cache_init(void) static int __init iommu_init_mempool(void) { int ret; - ret = iommu_iova_cache_init(); + ret = iova_cache_get(); if (ret) return ret; @@ -3725,7 +3732,7 @@ static int __init iommu_init_mempool(void) kmem_cache_destroy(iommu_domain_cache); domain_error: - iommu_iova_cache_destroy(); + iova_cache_put(); return -ENOMEM; } @@ -3734,7 +3741,7 @@ static void __init iommu_exit_mempool(void) { kmem_cache_destroy(iommu_devinfo_cache); kmem_cache_destroy(iommu_domain_cache); - iommu_iova_cache_destroy(); + iova_cache_put(); } static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 73c07482f487..7df97777662d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte; static bool selftest_running = false; -static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages) +static dma_addr_t __arm_lpae_dma_addr(void *pages) { - return phys_to_dma(dev, virt_to_phys(pages)); + return (dma_addr_t)virt_to_phys(pages); } static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, @@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, goto out_free; /* * We depend on the IOMMU being able to work with any physical - * address directly, so if the DMA layer suggests it can't by - * giving us back some translation, that bodes very badly... + * address directly, so if the DMA layer suggests otherwise by + * translating or truncating them, that bodes very badly... */ - if (dma != __arm_lpae_dma_addr(dev, pages)) + if (dma != virt_to_phys(pages)) goto out_unmap; } @@ -243,10 +243,8 @@ out_free: static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - if (!selftest_running) - dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages), + dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages_exact(pages, size); } @@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size, static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, struct io_pgtable_cfg *cfg) { - struct device *dev = cfg->iommu_dev; - *ptep = pte; if (!selftest_running) - dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep), + dma_sync_single_for_device(cfg->iommu_dev, + __arm_lpae_dma_addr(ptep), sizeof(pte), DMA_TO_DEVICE); } @@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) return NULL; + if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { + dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); + return NULL; + } + data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7c3d923f3e1..fa0adef32bd6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -18,42 +18,9 @@ */ #include +#include #include -static struct kmem_cache *iommu_iova_cache; - -int iommu_iova_cache_init(void) -{ - int ret = 0; - - iommu_iova_cache = kmem_cache_create("iommu_iova", - sizeof(struct iova), - 0, - SLAB_HWCACHE_ALIGN, - NULL); - if (!iommu_iova_cache) { - pr_err("Couldn't create iova cache\n"); - ret = -ENOMEM; - } - - return ret; -} - -void iommu_iova_cache_destroy(void) -{ - kmem_cache_destroy(iommu_iova_cache); -} - -struct iova *alloc_iova_mem(void) -{ - return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); -} - -void free_iova_mem(struct iova *iova) -{ - kmem_cache_free(iommu_iova_cache, iova); -} - void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn, unsigned long pfn_32bit) @@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->start_pfn = start_pfn; iovad->dma_32bit_pfn = pfn_32bit; } +EXPORT_SYMBOL_GPL(init_iova_domain); static struct rb_node * __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) @@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) } } -/* Computes the padding size required, to make the - * the start address naturally aligned on its size +/* + * Computes the padding size required, to make the start address + * naturally aligned on the power-of-two order of its size */ -static int -iova_get_pad_size(int size, unsigned int limit_pfn) +static unsigned int +iova_get_pad_size(unsigned int size, unsigned int limit_pfn) { - unsigned int pad_size = 0; - unsigned int order = ilog2(size); - - if (order) - pad_size = (limit_pfn + 1) % (1 << order); - - return pad_size; + return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, @@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) rb_insert_color(&iova->node, root); } +static struct kmem_cache *iova_cache; +static unsigned int iova_cache_users; +static DEFINE_MUTEX(iova_cache_mutex); + +struct iova *alloc_iova_mem(void) +{ + return kmem_cache_alloc(iova_cache, GFP_ATOMIC); +} +EXPORT_SYMBOL(alloc_iova_mem); + +void free_iova_mem(struct iova *iova) +{ + kmem_cache_free(iova_cache, iova); +} +EXPORT_SYMBOL(free_iova_mem); + +int iova_cache_get(void) +{ + mutex_lock(&iova_cache_mutex); + if (!iova_cache_users) { + iova_cache = kmem_cache_create( + "iommu_iova", sizeof(struct iova), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!iova_cache) { + mutex_unlock(&iova_cache_mutex); + printk(KERN_ERR "Couldn't create iova cache\n"); + return -ENOMEM; + } + } + + iova_cache_users++; + mutex_unlock(&iova_cache_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(iova_cache_get); + +void iova_cache_put(void) +{ + mutex_lock(&iova_cache_mutex); + if (WARN_ON(!iova_cache_users)) { + mutex_unlock(&iova_cache_mutex); + return; + } + iova_cache_users--; + if (!iova_cache_users) + kmem_cache_destroy(iova_cache); + mutex_unlock(&iova_cache_mutex); +} +EXPORT_SYMBOL_GPL(iova_cache_put); + /** * alloc_iova - allocates an iova * @iovad: - iova domain in question @@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; - /* If size aligned is set then round the size to - * to next power of two. - */ - if (size_aligned) - size = __roundup_pow_of_two(size); - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, new_iova, size_aligned); @@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, return new_iova; } +EXPORT_SYMBOL_GPL(alloc_iova); /** * find_iova - find's an iova for a given pfn @@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return NULL; } +EXPORT_SYMBOL_GPL(find_iova); /** * __free_iova - frees the given iova @@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); free_iova_mem(iova); } +EXPORT_SYMBOL_GPL(__free_iova); /** * free_iova - finds and frees the iova for a given pfn @@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) __free_iova(iovad, iova); } +EXPORT_SYMBOL_GPL(free_iova); /** * put_iova_domain - destroys the iova doamin @@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad) } spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(put_iova_domain); static int __is_range_overlap(struct rb_node *node, @@ -467,6 +480,7 @@ finish: spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova; } +EXPORT_SYMBOL_GPL(reserve_iova); /** * copy_reserved_iova - copies the reserved between domains @@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) } spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); } +EXPORT_SYMBOL_GPL(copy_reserved_iova); struct iova * split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, @@ -534,3 +549,6 @@ error: free_iova_mem(prev); return NULL; } + +MODULE_AUTHOR("Anil S Keshavamurthy "); +MODULE_LICENSE("GPL"); diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c index cf351c637464..a7c8c9ffbafd 100644 --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c @@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) dev_alias->dev_id = alias; if (pdev != dev_alias->pdev) - dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + dev_alias->count += its_pci_msi_vec_count(pdev); return 0; } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index ac7ae2b3cb83..25ceae9f7348 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) out: spin_unlock(&lpi_lock); + if (!bitmap) + *base = *nr_ids = 0; + return bitmap; } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index af2f16bb8a94..aeaa061f0dbf 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained) intrmask[i] = gic_read(intrmask_reg); pending_reg += gic_reg_step; intrmask_reg += gic_reg_step; + + if (!config_enabled(CONFIG_64BIT) || mips_cm_is64) + continue; + + pending[i] |= (u64)gic_read(pending_reg) << 32; + intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; + pending_reg += gic_reg_step; + intrmask_reg += gic_reg_step; } bitmap_and(pending, pending, intrmask, gic_shared_intrs); @@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, spin_lock_irqsave(&gic_lock, flags); /* Re-route this IRQ */ - gic_map_to_vpe(irq, cpumask_first(&tmp)); + gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); /* Update the pcpu_masks */ for (i = 0; i < NR_CPUS; i++) @@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu, GIC_SHARED_TO_HWIRQ(intr)); int i; - gic_map_to_vpe(intr, cpu); + gic_map_to_vpe(intr, mips_cm_vp_id(cpu)); for (i = 0; i < NR_CPUS; i++) clear_bit(intr, pcpu_masks[i].pcpu_mask); set_bit(intr, pcpu_masks[cpu].pcpu_mask); diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index de36237d7c6b..051645498b53 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c @@ -74,7 +74,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = -ENOTSUPP; dev_err(&pdev->dev, "IO mapped PCI devices are not supported\n"); - goto out_release; + goto out_iounmap; } pci_set_drvdata(pdev, priv); @@ -89,7 +89,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); if (ret < 0) - goto out_iounmap; + goto out_mcb_bus; num_cells = ret; dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); @@ -98,6 +98,8 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +out_mcb_bus: + mcb_release_bus(priv->bus); out_iounmap: iounmap(priv->base); out_release: diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e51de52eeb94..48b5890c28e3 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks, if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) ret = bitmap_storage_alloc(&store, chunks, !bitmap->mddev->bitmap_info.external, - bitmap->cluster_slot); + mddev_is_clustered(bitmap->mddev) + ? bitmap->cluster_slot : 0); if (ret) goto err; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 20cc36b01b77..0a17d1b91a81 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -634,10 +634,10 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, disk_super = dm_block_data(sblock); + disk_super->flags = cpu_to_le32(cmd->flags); if (mutator) update_flags(disk_super, mutator); - disk_super->flags = cpu_to_le32(cmd->flags); disk_super->mapping_root = cpu_to_le64(cmd->root); disk_super->hint_root = cpu_to_le64(cmd->hint_root); disk_super->discard_root = cpu_to_le64(cmd->discard_root); diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index 240c9f0e85e7..8a096456579b 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c @@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, static struct dm_cache_policy_type wb_policy_type = { .name = "cleaner", .version = {1, 0, 0}, - .hint_size = 0, + .hint_size = 4, .owner = THIS_MODULE, .create = wb_create }; diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index ebaa4f803eec..192bb8beeb6b 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -203,7 +203,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, return -EINVAL; } - tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); + tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL); if (!tmp_store) { ti->error = "Exception store allocation failed"; return -ENOMEM; @@ -215,7 +215,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, else if (persistent == 'N') type = get_type("N"); else { - ti->error = "Persistent flag is not P or N"; + ti->error = "Exception store type is not P or N"; r = -EINVAL; goto bad_type; } @@ -233,7 +233,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, if (r) goto bad; - r = type->ctr(tmp_store, 0, NULL); + r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL)); if (r) { ti->error = "Exception store type constructor failed"; goto bad; diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 0b2536247cf5..fae34e7a0b1e 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -42,8 +42,7 @@ struct dm_exception_store_type { const char *name; struct module *module; - int (*ctr) (struct dm_exception_store *store, - unsigned argc, char **argv); + int (*ctr) (struct dm_exception_store *store, char *options); /* * Destroys this object when you've finished with it. @@ -123,6 +122,8 @@ struct dm_exception_store { unsigned chunk_shift; void *context; + + bool userspace_supports_overflow; }; /* diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 97e165183e79..a0901214aef5 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) */ if (min_region_size > (1 << 13)) { /* If not a power of 2, make it the next power of 2 */ - if (min_region_size & (min_region_size - 1)) - region_size = 1 << fls(region_size); + region_size = roundup_pow_of_two(min_region_size); DMINFO("Choosing default region size of %lu sectors", region_size); } else { diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index bf71583296f7..117a05e40090 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -7,6 +7,7 @@ #include "dm-exception-store.h" +#include #include #include #include @@ -843,10 +844,10 @@ static void persistent_drop_snapshot(struct dm_exception_store *store) DMWARN("write header failed"); } -static int persistent_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int persistent_ctr(struct dm_exception_store *store, char *options) { struct pstore *ps; + int r; /* allocate the pstore */ ps = kzalloc(sizeof(*ps), GFP_KERNEL); @@ -868,14 +869,32 @@ static int persistent_ctr(struct dm_exception_store *store, ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); if (!ps->metadata_wq) { - kfree(ps); DMERR("couldn't start header metadata update thread"); - return -ENOMEM; + r = -ENOMEM; + goto err_workqueue; + } + + if (options) { + char overflow = toupper(options[0]); + if (overflow == 'O') + store->userspace_supports_overflow = true; + else { + DMERR("Unsupported persistent store option: %s", options); + r = -EINVAL; + goto err_options; + } } store->context = ps; return 0; + +err_options: + destroy_workqueue(ps->metadata_wq); +err_workqueue: + kfree(ps); + + return r; } static unsigned persistent_status(struct dm_exception_store *store, @@ -888,7 +907,8 @@ static unsigned persistent_status(struct dm_exception_store *store, case STATUSTYPE_INFO: break; case STATUSTYPE_TABLE: - DMEMIT(" P %llu", (unsigned long long)store->chunk_size); + DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", + (unsigned long long)store->chunk_size); } return sz; diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c index 1ce9a2586e41..9b7c8c8049d6 100644 --- a/drivers/md/dm-snap-transient.c +++ b/drivers/md/dm-snap-transient.c @@ -70,8 +70,7 @@ static void transient_usage(struct dm_exception_store *store, *metadata_sectors = 0; } -static int transient_ctr(struct dm_exception_store *store, - unsigned argc, char **argv) +static int transient_ctr(struct dm_exception_store *store, char *options) { struct transient_c *tc; diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index c0bcd6516dfe..c06b74e91cd6 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1098,7 +1098,7 @@ static void stop_merge(struct dm_snapshot *s) } /* - * Construct a snapshot mapping:

+ * Construct a snapshot mapping: */ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) { @@ -1302,6 +1302,7 @@ static void __handover_exceptions(struct dm_snapshot *snap_src, u.store_swap = snap_dest->store; snap_dest->store = snap_src->store; + snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; snap_src->store = u.store_swap; snap_dest->store->snap = snap_dest; @@ -1739,8 +1740,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) pe = __find_pending_exception(s, pe, chunk); if (!pe) { - s->snapshot_overflowed = 1; - DMERR("Snapshot overflowed: Unable to allocate exception."); + if (s->store->userspace_supports_overflow) { + s->snapshot_overflowed = 1; + DMERR("Snapshot overflowed: Unable to allocate exception."); + } else + __invalidate_snapshot(s, -ENOMEM); r = -EIO; goto out_unlock; } @@ -2365,7 +2369,7 @@ static struct target_type origin_target = { static struct target_type snapshot_target = { .name = "snapshot", - .version = {1, 14, 0}, + .version = {1, 15, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, @@ -2379,7 +2383,7 @@ static struct target_type snapshot_target = { static struct target_type merge_target = { .name = dm_snapshot_merge_target_name, - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = snapshot_ctr, .dtr = snapshot_dtr, diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 6fcbfb063366..3897b90bd462 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -3201,7 +3201,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) metadata_low_callback, pool); if (r) - goto out_free_pt; + goto out_flags_changed; pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6264781dc69a..1b5c6047e4f1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1001,6 +1001,7 @@ static void end_clone_bio(struct bio *clone) struct dm_rq_target_io *tio = info->tio; struct bio *bio = info->orig; unsigned int nr_bytes = info->orig->bi_iter.bi_size; + int error = clone->bi_error; bio_put(clone); @@ -1011,13 +1012,13 @@ static void end_clone_bio(struct bio *clone) * the remainder. */ return; - else if (bio->bi_error) { + else if (error) { /* * Don't notice the error to the upper layer yet. * The error handling decision is made by the target driver, * when the request is completed. */ - tio->error = bio->bi_error; + tio->error = error; return; } @@ -2837,8 +2838,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait) might_sleep(); - map = dm_get_live_table(md, &srcu_idx); - spin_lock(&_minor_lock); idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); set_bit(DMF_FREEING, &md->flags); @@ -2852,14 +2851,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait) * do not race with internal suspend. */ mutex_lock(&md->suspend_lock); + map = dm_get_live_table(md, &srcu_idx); if (!dm_suspended_md(md)) { dm_table_presuspend_targets(map); dm_table_postsuspend_targets(map); } - mutex_unlock(&md->suspend_lock); - /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ dm_put_live_table(md, srcu_idx); + mutex_unlock(&md->suspend_lock); /* * Rare, but there may be I/O requests still going to complete, diff --git a/drivers/md/md.c b/drivers/md/md.c index 4f5ecbe94ccb..c702de18207a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * which will now never happen */ wake_up_process(mddev->sync_thread->tsk); + if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) + return -EBUSY; mddev_unlock(mddev); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); + wait_event(mddev->sb_wait, + !test_bit(MD_CHANGE_PENDING, &mddev->flags)); mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); @@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev) md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + clear_bit(MD_CHANGE_PENDING, &mddev->flags); goto unlock; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d222522c52e0..d132f06afdd1 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev) return 0; out_free_conf: - if (conf->pool) - mempool_destroy(conf->pool); + mempool_destroy(conf->pool); kfree(conf->multipaths); kfree(conf); mddev->private = NULL; diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index 421a36c593e3..2e4c4cb79e4d 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c @@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, { int s; uint32_t max_entries = le32_to_cpu(left->header.max_entries); - unsigned target = (nr_left + nr_center + nr_right) / 3; - BUG_ON(target > max_entries); + unsigned total = nr_left + nr_center + nr_right; + unsigned target_right = total / 3; + unsigned remainder = (target_right * 3) != total; + unsigned target_left = target_right + remainder; + + BUG_ON(target_left > max_entries); + BUG_ON(target_right > max_entries); if (nr_left < nr_right) { - s = nr_left - target; + s = nr_left - target_left; if (s < 0 && nr_center < -s) { /* not enough in central node */ @@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, } else shift(left, center, s); - shift(center, right, target - nr_right); + shift(center, right, target_right - nr_right); } else { - s = target - nr_right; + s = target_right - nr_right; if (s > 0 && nr_center < s) { /* not enough in central node */ shift(center, right, nr_center); @@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent, } else shift(center, right, s); - shift(left, center, nr_left - target); + shift(left, center, nr_left - target_left); } *key_ptr(parent, c->index) = center->keys[0]; diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c index b6cec258cc21..0e09aef43998 100644 --- a/drivers/md/persistent-data/dm-btree.c +++ b/drivers/md/persistent-data/dm-btree.c @@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key) r = new_block(s->info, &right); if (r < 0) { - /* FIXME: put left */ + unlock_block(s->info, left); return r; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 63e619b2f44e..f8e5db0cb5aa 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev) struct md_rdev *rdev; bool discard_supported = false; - rdev_for_each(rdev, mddev) { - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; - } blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); @@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev) blk_queue_io_opt(mddev->queue, (mddev->chunk_sectors << 9) * mddev->raid_disks); + rdev_for_each(rdev, mddev) { + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); + if (blk_queue_discard(bdev_get_queue(rdev->bdev))) + discard_supported = true; + } if (!discard_supported) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); else diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4517f06c41ba..d9d031ede4bf 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) } if (bio && bio_data_dir(bio) == WRITE) { - if (bio->bi_iter.bi_sector >= - conf->mddev->curr_resync_completed) { + if (bio->bi_iter.bi_sector >= conf->next_resync) { if (conf->start_next_window == MaxSector) conf->start_next_window = conf->next_resync + @@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf) conf->r1buf_pool = NULL; spin_lock_irq(&conf->resync_lock); - conf->next_resync = 0; + conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE; conf->start_next_window = MaxSector; conf->current_window_requests += conf->next_window_requests; @@ -2196,7 +2195,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) bio_trim(wbio, sector - r1_bio->sector, sectors); wbio->bi_iter.bi_sector += rdev->data_offset; wbio->bi_bdev = rdev->bdev; - if (submit_bio_wait(WRITE, wbio) == 0) + if (submit_bio_wait(WRITE, wbio) < 0) /* failure! */ ok = rdev_set_badblocks(rdev, sector, sectors, 0) @@ -2259,15 +2258,16 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio) rdev_dec_pending(conf->mirrors[m].rdev, conf->mddev); } - if (test_bit(R1BIO_WriteError, &r1_bio->state)) - close_write(r1_bio); if (fail) { spin_lock_irq(&conf->device_lock); list_add(&r1_bio->retry_list, &conf->bio_end_io_list); spin_unlock_irq(&conf->device_lock); md_wakeup_thread(conf->mddev->thread); - } else + } else { + if (test_bit(R1BIO_WriteError, &r1_bio->state)) + close_write(r1_bio); raid_end_bio_io(r1_bio); + } } static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) @@ -2383,9 +2383,13 @@ static void raid1d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r1_bio = list_first_entry(&conf->bio_end_io_list, - struct r1bio, retry_list); + r1_bio = list_first_entry(&tmp, struct r1bio, + retry_list); list_del(&r1_bio->retry_list); + if (mddev->degraded) + set_bit(R1BIO_Degraded, &r1_bio->state); + if (test_bit(R1BIO_WriteError, &r1_bio->state)) + close_write(r1_bio); raid_end_bio_io(r1_bio); } } @@ -2843,8 +2847,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) abort: if (conf) { - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); @@ -2946,8 +2949,7 @@ static void raid1_free(struct mddev *mddev, void *priv) { struct r1conf *conf = priv; - if (conf->r1bio_pool) - mempool_destroy(conf->r1bio_pool); + mempool_destroy(conf->r1bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf->poolinfo); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0fc33eb88855..96f365968306 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -39,6 +39,7 @@ * far_copies (stored in second byte of layout) * far_offset (stored in bit 16 of layout ) * use_far_sets (stored in bit 17 of layout ) + * use_far_sets_bugfixed (stored in bit 18 of layout ) * * The data to be stored is divided into chunks using chunksize. Each device * is divided into far_copies sections. In each section, chunks are laid out @@ -1497,6 +1498,8 @@ static void status(struct seq_file *seq, struct mddev *mddev) seq_printf(seq, " %d offset-copies", conf->geo.far_copies); else seq_printf(seq, " %d far-copies", conf->geo.far_copies); + if (conf->geo.far_set_size != conf->geo.raid_disks) + seq_printf(seq, " %d devices per set", conf->geo.far_set_size); } seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks, conf->geo.raid_disks - mddev->degraded); @@ -2467,7 +2470,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) choose_data_offset(r10_bio, rdev) + (sector - r10_bio->sector)); wbio->bi_bdev = rdev->bdev; - if (submit_bio_wait(WRITE, wbio) == 0) + if (submit_bio_wait(WRITE, wbio) < 0) /* Failure! */ ok = rdev_set_badblocks(rdev, sector, sectors, 0) @@ -2654,16 +2657,17 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) rdev_dec_pending(rdev, conf->mddev); } } - if (test_bit(R10BIO_WriteError, - &r10_bio->state)) - close_write(r10_bio); if (fail) { spin_lock_irq(&conf->device_lock); list_add(&r10_bio->retry_list, &conf->bio_end_io_list); spin_unlock_irq(&conf->device_lock); md_wakeup_thread(conf->mddev->thread); - } else + } else { + if (test_bit(R10BIO_WriteError, + &r10_bio->state)) + close_write(r10_bio); raid_end_bio_io(r10_bio); + } } } @@ -2688,9 +2692,15 @@ static void raid10d(struct md_thread *thread) } spin_unlock_irqrestore(&conf->device_lock, flags); while (!list_empty(&tmp)) { - r10_bio = list_first_entry(&conf->bio_end_io_list, - struct r10bio, retry_list); + r10_bio = list_first_entry(&tmp, struct r10bio, + retry_list); list_del(&r10_bio->retry_list); + if (mddev->degraded) + set_bit(R10BIO_Degraded, &r10_bio->state); + + if (test_bit(R10BIO_WriteError, + &r10_bio->state)) + close_write(r10_bio); raid_end_bio_io(r10_bio); } } @@ -3387,7 +3397,7 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) disks = mddev->raid_disks + mddev->delta_disks; break; } - if (layout >> 18) + if (layout >> 19) return -1; if (chunk < (PAGE_SIZE >> 9) || !is_power_of_2(chunk)) @@ -3399,7 +3409,22 @@ static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new) geo->near_copies = nc; geo->far_copies = fc; geo->far_offset = fo; - geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks; + switch (layout >> 17) { + case 0: /* original layout. simple but not always optimal */ + geo->far_set_size = disks; + break; + case 1: /* "improved" layout which was buggy. Hopefully no-one is + * actually using this, but leave code here just in case.*/ + geo->far_set_size = disks/fc; + WARN(geo->far_set_size < fc, + "This RAID10 layout does not provide data safety - please backup and create new array\n"); + break; + case 2: /* "improved" layout fixed to match documentation */ + geo->far_set_size = fc * nc; + break; + default: /* Not a valid layout */ + return -1; + } geo->chunk_mask = chunk - 1; geo->chunk_shift = ffz(~chunk); return nc*fc; @@ -3486,8 +3511,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n", mdname(mddev)); if (conf) { - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); safe_put_page(conf->tmppage); kfree(conf); @@ -3682,8 +3706,7 @@ static int run(struct mddev *mddev) out_free_conf: md_unregister_thread(&mddev->thread); - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); @@ -3696,8 +3719,7 @@ static void raid10_free(struct mddev *mddev, void *priv) { struct r10conf *conf = priv; - if (conf->r10bio_pool) - mempool_destroy(conf->r10bio_pool); + mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf->mirrors_old); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 15ef2c641b2b..49bb8d3ff9be 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf) drop_one_stripe(conf)) ; - if (conf->slab_cache) - kmem_cache_destroy(conf->slab_cache); + kmem_cache_destroy(conf->slab_cache); conf->slab_cache = NULL; } @@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, spin_unlock_irq(&sh->stripe_lock); if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) wake_up(&conf->wait_for_overlap); + if (bi) + s->to_read--; while (bi && bi->bi_iter.bi_sector < sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = @@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, */ clear_bit(R5_LOCKED, &sh->dev[i].flags); } + s->to_write = 0; + s->written = 0; if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) if (atomic_dec_and_test(&conf->pending_full_writes)) @@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (fdev[i]->towrite && !test_bit(R5_UPTODATE, &fdev[i]->flags) && !test_bit(R5_OVERWRITE, &fdev[i]->flags)) @@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, sh->sector < sh->raid_conf->mddev->recovery_cp) /* reconstruct-write isn't being forced */ return 0; - for (i = 0; i < s->failed; i++) { + for (i = 0; i < s->failed && i < 2; i++) { if (s->failed_num[i] != sh->pd_idx && s->failed_num[i] != sh->qd_idx && !test_bit(R5_UPTODATE, &fdev[i]->flags) && diff --git a/drivers/media/dvb-frontends/horus3a.h b/drivers/media/dvb-frontends/horus3a.h index b055319d532e..c1e2d1834b78 100644 --- a/drivers/media/dvb-frontends/horus3a.h +++ b/drivers/media/dvb-frontends/horus3a.h @@ -46,8 +46,8 @@ extern struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe, const struct horus3a_config *config, struct i2c_adapter *i2c); #else -static inline struct dvb_frontend *horus3a_attach( - const struct cxd2820r_config *config, +static inline struct dvb_frontend *horus3a_attach(struct dvb_frontend *fe, + const struct horus3a_config *config, struct i2c_adapter *i2c) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); diff --git a/drivers/media/dvb-frontends/lnbh25.h b/drivers/media/dvb-frontends/lnbh25.h index 69f30e21f6b3..1f329ef05acc 100644 --- a/drivers/media/dvb-frontends/lnbh25.h +++ b/drivers/media/dvb-frontends/lnbh25.h @@ -43,7 +43,7 @@ struct dvb_frontend *lnbh25_attach( struct lnbh25_config *cfg, struct i2c_adapter *i2c); #else -static inline dvb_frontend *lnbh25_attach( +static inline struct dvb_frontend *lnbh25_attach( struct dvb_frontend *fe, struct lnbh25_config *cfg, struct i2c_adapter *i2c) diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index ff31e7a01ca9..feeeb70d841e 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -18,6 +18,27 @@ static struct dvb_frontend_ops m88ds3103_ops; +/* write single register with mask */ +static int m88ds3103_update_bits(struct m88ds3103_dev *dev, + u8 reg, u8 mask, u8 val) +{ + int ret; + u8 tmp; + + /* no need for read if whole reg is written */ + if (mask != 0xff) { + ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1); + if (ret) + return ret; + + val &= mask; + tmp &= ~mask; + val |= tmp; + } + + return regmap_bulk_write(dev->regmap, reg, &val, 1); +} + /* write reg val table using reg addr auto increment */ static int m88ds3103_wr_reg_val_tab(struct m88ds3103_dev *dev, const struct m88ds3103_reg_val *tab, int tab_len) @@ -394,10 +415,10 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) u8tmp2 = 0x00; /* 0b00 */ break; } - ret = regmap_update_bits(dev->regmap, 0x22, 0xc0, u8tmp1 << 6); + ret = m88ds3103_update_bits(dev, 0x22, 0xc0, u8tmp1 << 6); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x24, 0xc0, u8tmp2 << 6); + ret = m88ds3103_update_bits(dev, 0x24, 0xc0, u8tmp2 << 6); if (ret) goto err; } @@ -455,13 +476,13 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) if (ret) goto err; } - ret = regmap_update_bits(dev->regmap, 0x9d, 0x08, 0x08); + ret = m88ds3103_update_bits(dev, 0x9d, 0x08, 0x08); if (ret) goto err; ret = regmap_write(dev->regmap, 0xf1, 0x01); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x30, 0x80, 0x80); + ret = m88ds3103_update_bits(dev, 0x30, 0x80, 0x80); if (ret) goto err; } @@ -498,7 +519,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) switch (dev->cfg->ts_mode) { case M88DS3103_TS_SERIAL: case M88DS3103_TS_SERIAL_D7: - ret = regmap_update_bits(dev->regmap, 0x29, 0x20, u8tmp1); + ret = m88ds3103_update_bits(dev, 0x29, 0x20, u8tmp1); if (ret) goto err; u8tmp1 = 0; @@ -567,11 +588,11 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe) if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x4d, 0x02, dev->cfg->spec_inv << 1); + ret = m88ds3103_update_bits(dev, 0x4d, 0x02, dev->cfg->spec_inv << 1); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x30, 0x10, dev->cfg->agc_inv << 4); + ret = m88ds3103_update_bits(dev, 0x30, 0x10, dev->cfg->agc_inv << 4); if (ret) goto err; @@ -625,13 +646,13 @@ static int m88ds3103_init(struct dvb_frontend *fe) dev->warm = false; /* wake up device from sleep */ - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x01); + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x01); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x00); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x00); + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x00); if (ret) goto err; @@ -749,18 +770,18 @@ static int m88ds3103_sleep(struct dvb_frontend *fe) utmp = 0x29; else utmp = 0x27; - ret = regmap_update_bits(dev->regmap, utmp, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, utmp, 0x01, 0x00); if (ret) goto err; /* sleep */ - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01); if (ret) goto err; - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10); if (ret) goto err; @@ -992,12 +1013,12 @@ static int m88ds3103_set_tone(struct dvb_frontend *fe, } utmp = tone << 7 | dev->cfg->envelope_mode << 5; - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); if (ret) goto err; utmp = 1 << 2; - ret = regmap_update_bits(dev->regmap, 0xa1, reg_a1_mask, utmp); + ret = m88ds3103_update_bits(dev, 0xa1, reg_a1_mask, utmp); if (ret) goto err; @@ -1047,7 +1068,7 @@ static int m88ds3103_set_voltage(struct dvb_frontend *fe, voltage_dis ^= dev->cfg->lnb_en_pol; utmp = voltage_dis << 1 | voltage_sel << 0; - ret = regmap_update_bits(dev->regmap, 0xa2, 0x03, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0x03, utmp); if (ret) goto err; @@ -1080,7 +1101,7 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe, } utmp = dev->cfg->envelope_mode << 5; - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); if (ret) goto err; @@ -1115,12 +1136,12 @@ static int m88ds3103_diseqc_send_master_cmd(struct dvb_frontend *fe, } else { dev_dbg(&client->dev, "diseqc tx timeout\n"); - ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); + ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40); if (ret) goto err; } - ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); + ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80); if (ret) goto err; @@ -1152,7 +1173,7 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe, } utmp = dev->cfg->envelope_mode << 5; - ret = regmap_update_bits(dev->regmap, 0xa2, 0xe0, utmp); + ret = m88ds3103_update_bits(dev, 0xa2, 0xe0, utmp); if (ret) goto err; @@ -1194,12 +1215,12 @@ static int m88ds3103_diseqc_send_burst(struct dvb_frontend *fe, } else { dev_dbg(&client->dev, "diseqc tx timeout\n"); - ret = regmap_update_bits(dev->regmap, 0xa1, 0xc0, 0x40); + ret = m88ds3103_update_bits(dev, 0xa1, 0xc0, 0x40); if (ret) goto err; } - ret = regmap_update_bits(dev->regmap, 0xa2, 0xc0, 0x80); + ret = m88ds3103_update_bits(dev, 0xa2, 0xc0, 0x80); if (ret) goto err; @@ -1435,13 +1456,13 @@ static int m88ds3103_probe(struct i2c_client *client, goto err_kfree; /* sleep */ - ret = regmap_update_bits(dev->regmap, 0x08, 0x01, 0x00); + ret = m88ds3103_update_bits(dev, 0x08, 0x01, 0x00); if (ret) goto err_kfree; - ret = regmap_update_bits(dev->regmap, 0x04, 0x01, 0x01); + ret = m88ds3103_update_bits(dev, 0x04, 0x01, 0x01); if (ret) goto err_kfree; - ret = regmap_update_bits(dev->regmap, 0x23, 0x10, 0x10); + ret = m88ds3103_update_bits(dev, 0x23, 0x10, 0x10); if (ret) goto err_kfree; diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 81788c5a44d8..821a8f481507 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -502,6 +502,10 @@ static int si2168_init(struct dvb_frontend *fe) /* firmware is in the new format */ for (remaining = fw->size; remaining > 0; remaining -= 17) { len = fw->data[fw->size - remaining]; + if (len > SI2168_ARGLEN) { + ret = -EINVAL; + break; + } memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); cmd.wlen = len; cmd.rlen = 1; diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c index f55b3276f28d..56773f3893d4 100644 --- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c +++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c @@ -80,11 +80,9 @@ irqreturn_t netup_spi_interrupt(struct netup_spi *spi) u16 reg; unsigned long flags; - if (!spi) { - dev_dbg(&spi->master->dev, - "%s(): SPI not initialized\n", __func__); + if (!spi) return IRQ_NONE; - } + spin_lock_irqsave(&spi->lock, flags); reg = readw(&spi->regs->control_stat); if (!(reg & NETUP_SPI_CTRL_IRQ)) { @@ -234,11 +232,9 @@ void netup_spi_release(struct netup_unidvb_dev *ndev) unsigned long flags; struct netup_spi *spi = ndev->spi; - if (!spi) { - dev_dbg(&spi->master->dev, - "%s(): SPI not initialized\n", __func__); + if (!spi) return; - } + spin_lock_irqsave(&spi->lock, flags); reg = readw(&spi->regs->control_stat); writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 486aef50d99b..f922f2e827bc 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -1097,7 +1097,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context) Elf32_Ehdr *ehdr; Elf32_Phdr *phdr; u8 __iomem *dst; - int err, i; + int err = 0, i; if (!fw || !context) return -EINVAL; @@ -1106,7 +1106,7 @@ static int load_slim_core_fw(const struct firmware *fw, void *context) phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff); /* go through the available ELF segments */ - for (i = 0; i < ehdr->e_phnum && !err; i++, phdr++) { + for (i = 0; i < ehdr->e_phnum; i++, phdr++) { /* Only consider LOAD segments */ if (phdr->p_type != PT_LOAD) @@ -1192,7 +1192,6 @@ err: static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) { - int ret; int err; dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA); @@ -1207,7 +1206,7 @@ static int load_c8sectpfe_fw_step1(struct c8sectpfei *fei) if (err) { dev_err(fei->dev, "request_firmware_nowait err: %d.\n", err); complete_all(&fei->fw_ack); - return ret; + return err; } return 0; diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c index 1c087cb76815..d0549fba711c 100644 --- a/drivers/media/rc/ir-hix5hd2.c +++ b/drivers/media/rc/ir-hix5hd2.c @@ -257,7 +257,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev) goto clkerr; if (devm_request_irq(dev, priv->irq, hix5hd2_ir_rx_interrupt, - IRQF_NO_SUSPEND, pdev->name, priv) < 0) { + 0, pdev->name, priv) < 0) { dev_err(dev, "IRQ %d register failed\n", priv->irq); ret = -EINVAL; goto regerr; diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c index 507382160e5e..ce157edd45fa 100644 --- a/drivers/media/tuners/si2157.c +++ b/drivers/media/tuners/si2157.c @@ -166,6 +166,10 @@ static int si2157_init(struct dvb_frontend *fe) for (remaining = fw->size; remaining > 0; remaining -= 17) { len = fw->data[fw->size - remaining]; + if (len > SI2157_ARGLEN) { + dev_err(&client->dev, "Bad firmware length\n"); + goto err_release_firmware; + } memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len); cmd.wlen = len; cmd.rlen = 1; diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index c3cac4c12fb3..197a4f2e54d2 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -34,6 +34,14 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req) unsigned int pipe; u8 requesttype; + mutex_lock(&d->usb_mutex); + + if (req->size > sizeof(dev->buf)) { + dev_err(&d->intf->dev, "too large message %u\n", req->size); + ret = -EINVAL; + goto err_mutex_unlock; + } + if (req->index & CMD_WR_FLAG) { /* write */ memcpy(dev->buf, req->data, req->size); @@ -50,14 +58,17 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req) dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value, req->index, dev->buf, req->size); if (ret < 0) - goto err; + goto err_mutex_unlock; /* read request, copy returned data to return buf */ if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN)) memcpy(req->data, dev->buf, req->size); + mutex_unlock(&d->usb_mutex); + return 0; -err: +err_mutex_unlock: + mutex_unlock(&d->usb_mutex); dev_dbg(&d->intf->dev, "failed=%d\n", ret); return ret; } diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h index 9f6115a2ee01..138062960a73 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h @@ -71,7 +71,7 @@ struct rtl28xxu_dev { - u8 buf[28]; + u8 buf[128]; u8 chip_id; u8 tuner; char *tuner_name; diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index 82876a67f144..9beece00869b 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -47,7 +47,7 @@ config V4L2_MEM2MEM_DEV # Used by LED subsystem flash drivers config V4L2_FLASH_LED_CLASS tristate "V4L2 flash API for LED flash class devices" - depends on VIDEO_V4L2_SUBDEV_API + depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on LEDS_CLASS_FLASH ---help--- Say Y here to enable V4L2 flash API support for LED flash diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index c6a644b22af4..6f3154613dc7 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -58,12 +58,18 @@ config OMAP_GPMC memory drives like NOR, NAND, OneNAND, SRAM. config OMAP_GPMC_DEBUG - bool + bool "Enable GPMC debug output and skip reset of GPMC during init" depends on OMAP_GPMC help Enables verbose debugging mostly to decode the bootloader provided - timings. Enable this during development to configure devices - connected to the GPMC bus. + timings. To preserve the bootloader provided timings, the reset + of GPMC is skipped during init. Enable this during development to + configure devices connected to the GPMC bus. + + NOTE: In addition to matching the register setup with the bootloader + you also need to match the GPMC FCLK frequency used by the + bootloader or else the GPMC timings won't be identical with the + bootloader timings. config MVEBU_DEVBUS bool "Marvell EBU Device Bus Controller" diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 32ac049f2bc4..6515dfc2b805 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -696,7 +696,6 @@ int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t, int div; u32 l; - gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings"); div = gpmc_calc_divider(t->sync_clk); if (div < 0) return div; @@ -1988,6 +1987,7 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, if (ret < 0) goto err; + gpmc_cs_show_timings(cs, "before gpmc_cs_program_settings"); ret = gpmc_cs_program_settings(cs, &gpmc_s); if (ret < 0) goto err; diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h index f28cb28a62f8..2c7f8d7c0595 100644 --- a/drivers/mfd/intel-lpss.h +++ b/drivers/mfd/intel-lpss.h @@ -42,6 +42,8 @@ int intel_lpss_resume(struct device *dev); .thaw = intel_lpss_resume, \ .poweroff = intel_lpss_suspend, \ .restore = intel_lpss_resume, +#else +#define INTEL_LPSS_SLEEP_PM_OPS #endif #define INTEL_LPSS_RUNTIME_PM_OPS \ diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c index c52162ea3d0a..586098f1b233 100644 --- a/drivers/mfd/max77843.c +++ b/drivers/mfd/max77843.c @@ -80,7 +80,7 @@ static int max77843_chg_init(struct max77693_dev *max77843) if (!max77843->i2c_chg) { dev_err(&max77843->i2c->dev, "Cannot allocate I2C device for Charger\n"); - return PTR_ERR(max77843->i2c_chg); + return -ENODEV; } i2c_set_clientdata(max77843->i2c_chg, max77843); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index 8eec887c8f70..6d7c188fb65c 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1209,7 +1209,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) * after the host receives the enum_resp * message clients may be added or removed */ - if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && + if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || dev->hbm_state >= MEI_HBM_STOPPED) { dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", dev->dev_state, dev->hbm_state); diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index b78cf5d403a3..7fc9174d4619 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2263,15 +2263,12 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) /* * eMMC hardware reset. */ -static int mmc_test_hw_reset(struct mmc_test_card *test) +static int mmc_test_reset(struct mmc_test_card *test) { struct mmc_card *card = test->card; struct mmc_host *host = card->host; int err; - if (!mmc_card_mmc(card) || !mmc_can_reset(card)) - return RESULT_UNSUP_CARD; - err = mmc_hw_reset(host); if (!err) return RESULT_OK; @@ -2605,8 +2602,8 @@ static const struct mmc_test_case mmc_test_cases[] = { }, { - .name = "eMMC hardware reset", - .run = mmc_test_hw_reset, + .name = "Reset test", + .run = mmc_test_reset, }, }; diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 0520064dc33b..a3eb20bdcd97 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) int err = cmd->error; /* Flag re-tuning needed on CRC errors */ - if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || + if ((cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) && + (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) || (mrq->data && mrq->data->error == -EILSEQ) || - (mrq->stop && mrq->stop->error == -EILSEQ)) + (mrq->stop && mrq->stop->error == -EILSEQ))) mmc_retune_needed(host); if (err && cmd->retries && mmc_host_is_spi(host)) { diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index abd933b7029b..5466f25f0281 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host) 0, &cd_gpio_invert); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; /* @@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host) ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); - else if (ret != -ENOENT) + else if (ret != -ENOENT && ret != -ENOSYS) return ret; if (of_property_read_bool(np, "disable-wp")) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index e726903170a8..f6cd995dbe92 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1924,7 +1924,6 @@ EXPORT_SYMBOL(mmc_can_reset); static int mmc_reset(struct mmc_host *host) { struct mmc_card *card = host->card; - u32 status; if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) return -EOPNOTSUPP; @@ -1937,12 +1936,6 @@ static int mmc_reset(struct mmc_host *host) host->ops->hw_reset(host); - /* If the reset has happened, then a status command will fail */ - if (!mmc_send_status(card, &status)) { - mmc_host_clk_release(host); - return -ENOSYS; - } - /* Set initial state and call mmc_set_ios */ mmc_set_initial_state(host); mmc_host_clk_release(host); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 781e4db31767..7fb0753abe30 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -182,6 +182,7 @@ struct omap_hsmmc_host { struct clk *fclk; struct clk *dbclk; struct regulator *pbias; + bool pbias_enabled; void __iomem *base; int vqmmc_enabled; resource_size_t mapbase; @@ -328,20 +329,22 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return ret; } - if (!regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 0) { ret = regulator_enable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg enable fail\n"); return ret; } + host->pbias_enabled = 1; } } else { - if (regulator_is_enabled(host->pbias)) { + if (host->pbias_enabled == 1) { ret = regulator_disable(host->pbias); if (ret) { dev_err(host->dev, "pbias reg disable fail\n"); return ret; } + host->pbias_enabled = 0; } } @@ -475,7 +478,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); if (IS_ERR(mmc->supply.vmmc)) { ret = PTR_ERR(mmc->supply.vmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", PTR_ERR(mmc->supply.vmmc)); @@ -490,7 +493,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); if (IS_ERR(mmc->supply.vqmmc)) { ret = PTR_ERR(mmc->supply.vqmmc); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", PTR_ERR(mmc->supply.vqmmc)); @@ -500,7 +503,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) host->pbias = devm_regulator_get_optional(host->dev, "pbias"); if (IS_ERR(host->pbias)) { ret = PTR_ERR(host->pbias); - if (ret != -ENODEV) + if ((ret != -ENODEV) && host->dev->of_node) return ret; dev_dbg(host->dev, "unable to get pbias regulator %ld\n", PTR_ERR(host->pbias)); @@ -2053,6 +2056,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) host->base = base + pdata->reg_offset; host->power_mode = MMC_POWER_OFF; host->next_data.cookie = 1; + host->pbias_enabled = 0; host->vqmmc_enabled = 0; ret = omap_hsmmc_gpio_init(mmc, host, pdata); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 1420f29628c7..8cadd74e8407 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc) { struct pxamci_host *host = mmc_priv(mmc); - if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { - if (host->pdata->gpio_card_ro_invert) - return !gpio_get_value(host->pdata->gpio_card_ro); - else - return gpio_get_value(host->pdata->gpio_card_ro); - } + if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) + return mmc_gpio_get_ro(mmc); if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* @@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable) static const struct mmc_host_ops pxamci_ops = { .request = pxamci_request, + .get_cd = mmc_gpio_get_cd, .get_ro = pxamci_get_ro, .set_ios = pxamci_set_ios, .enable_sdio_irq = pxamci_enable_sdio_irq, @@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev) gpio_power = host->pdata->gpio_power; } if (gpio_is_valid(gpio_power)) { - ret = gpio_request(gpio_power, "mmc card power"); + ret = devm_gpio_request(&pdev->dev, gpio_power, + "mmc card power"); if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); + dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", + gpio_power); goto out; } gpio_direction_output(gpio_power, host->pdata->gpio_power_invert); } - if (gpio_is_valid(gpio_ro)) { - ret = gpio_request(gpio_ro, "mmc card read only"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); - goto err_gpio_ro; - } - gpio_direction_input(gpio_ro); + if (gpio_is_valid(gpio_ro)) + ret = mmc_gpio_request_ro(mmc, gpio_ro); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); + goto out; + } else { + mmc->caps |= host->pdata->gpio_card_ro_invert ? + MMC_CAP2_RO_ACTIVE_HIGH : 0; } - if (gpio_is_valid(gpio_cd)) { - ret = gpio_request(gpio_cd, "mmc card detect"); - if (ret) { - dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); - goto err_gpio_cd; - } - gpio_direction_input(gpio_cd); - ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "mmc card detect", mmc); - if (ret) { - dev_err(&pdev->dev, "failed to request card detect IRQ\n"); - goto err_request_irq; - } + if (gpio_is_valid(gpio_cd)) + ret = mmc_gpio_request_cd(mmc, gpio_cd, 0); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd); + goto out; } if (host->pdata && host->pdata->init) @@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev) return 0; -err_request_irq: - gpio_free(gpio_cd); -err_gpio_cd: - gpio_free(gpio_ro); -err_gpio_ro: - gpio_free(gpio_power); - out: +out: if (host) { if (host->dma_chan_rx) dma_release_channel(host->dma_chan_rx); @@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev) gpio_ro = host->pdata->gpio_card_ro; gpio_power = host->pdata->gpio_power; } - if (gpio_is_valid(gpio_cd)) { - free_irq(gpio_to_irq(gpio_cd), mmc); - gpio_free(gpio_cd); - } - if (gpio_is_valid(gpio_ro)) - gpio_free(gpio_ro); - if (gpio_is_valid(gpio_power)) - gpio_free(gpio_power); if (host->vcc) regulator_put(host->vcc); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index d1556643a41d..a0f05de5409f 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -43,6 +43,7 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { static const struct sdhci_pltfm_data soc_data_sama5d2 = { .ops = &sdhci_at91_sama5d2_ops, + .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST, }; static const struct of_device_id sdhci_at91_dt_match[] = { diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 946d37f94a31..f5edf9d3a18a 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev, struct sdhci_pxa *pxa = pltfm_host->priv; struct resource *res; + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; host->quirks |= SDHCI_QUIRK_MISSING_CAPS; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "conf-sdio3"); @@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) uhs == MMC_TIMING_UHS_DDR50) { reg_val &= ~SDIO3_CONF_CLK_INV; reg_val |= SDIO3_CONF_SD_FB_CLK; + } else if (uhs == MMC_TIMING_MMC_HS) { + reg_val &= ~SDIO3_CONF_CLK_INV; + reg_val &= ~SDIO3_CONF_SD_FB_CLK; } else { reg_val |= SDIO3_CONF_CLK_INV; reg_val &= ~SDIO3_CONF_SD_FB_CLK; @@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { ret = armada_38x_quirks(pdev, host); if (ret < 0) - goto err_clk_get; + goto err_mbus_win; ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); if (ret < 0) goto err_mbus_win; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 64b7fdbd1a9c..fbc7efdddcb5 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1160,6 +1160,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST) + mdelay(1); if (clock == 0) return; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7c02ff46c8ac..9d4aa31b683a 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -412,6 +412,11 @@ struct sdhci_host { #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) /* Broken Clock divider zero in controller */ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) +/* + * When internal clock is disabled, a delay is needed before modifying the + * SD clock frequency or enabling back the internal clock. + */ +#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16) int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index a7b7a6771598..b981b8552e43 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -210,6 +210,16 @@ #define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ #define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ +#define SDXC_CLK_400K 0 +#define SDXC_CLK_25M 1 +#define SDXC_CLK_50M 2 +#define SDXC_CLK_50M_DDR 3 + +struct sunxi_mmc_clk_delay { + u32 output; + u32 sample; +}; + struct sunxi_idma_des { u32 config; u32 buf_size; @@ -229,6 +239,7 @@ struct sunxi_mmc_host { struct clk *clk_mmc; struct clk *clk_sample; struct clk *clk_output; + const struct sunxi_mmc_clk_delay *clk_delays; /* irq */ spinlock_t lock; @@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, /* determine delays */ if (rate <= 400000) { - oclk_dly = 180; - sclk_dly = 42; + oclk_dly = host->clk_delays[SDXC_CLK_400K].output; + sclk_dly = host->clk_delays[SDXC_CLK_400K].sample; } else if (rate <= 25000000) { - oclk_dly = 180; - sclk_dly = 75; + oclk_dly = host->clk_delays[SDXC_CLK_25M].output; + sclk_dly = host->clk_delays[SDXC_CLK_25M].sample; } else if (rate <= 50000000) { if (ios->timing == MMC_TIMING_UHS_DDR50) { - oclk_dly = 60; - sclk_dly = 120; + oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample; } else { - oclk_dly = 90; - sclk_dly = 150; + oclk_dly = host->clk_delays[SDXC_CLK_50M].output; + sclk_dly = host->clk_delays[SDXC_CLK_50M].sample; } - } else if (rate <= 100000000) { - oclk_dly = 6; - sclk_dly = 24; - } else if (rate <= 200000000) { - oclk_dly = 3; - sclk_dly = 12; } else { return -EINVAL; } @@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun4i-a10-mmc", }, { .compatible = "allwinner,sun5i-a13-mmc", }, + { .compatible = "allwinner,sun9i-a80-mmc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); @@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = { .hw_reset = sunxi_mmc_hw_reset, }; +static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 90, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 }, +}; + +static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { + [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, + [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, + [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, + [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, +}; + static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, struct platform_device *pdev) { @@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, else host->idma_des_size_bits = 16; + if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc")) + host->clk_delays = sun9i_mmc_clk_delays; + else + host->clk_delays = sunxi_mmc_clk_delays; + ret = mmc_regulator_get_supply(host->mmc); if (ret) { if (ret != -EPROBE_DEFER) diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 2426db88db36..f04445b992f5 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom) oob_chunk_size); /* the last chunk */ - memcpy16_toio(&s[oob_chunk_size * sparebuf_size], + memcpy16_toio(&s[i * sparebuf_size], &d[i * oob_chunk_size], host->used_oobsize - i * oob_chunk_size); } diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index f97a58d6aae1..e7d333c162be 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -147,6 +147,10 @@ #define NFC_ECC_MODE GENMASK(15, 12) #define NFC_RANDOM_SEED GENMASK(30, 16) +/* NFC_USER_DATA helper macros */ +#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \ + ((buf)[2] << 16) | ((buf)[3] << 24)) + #define NFC_DEFAULT_TIMEOUT_MS 1000 #define NFC_SRAM_SIZE 1024 @@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, - chip->oob_poi + offset - mtd->writesize, - 4); - } + writel(NFC_BUF_TO_USER_DATA(chip->oob_poi + + layout->oobfree[i].offset), + nfc->regs + NFC_REG_USER_DATA_BASE); chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); @@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd, offset += ecc->size; /* Fill OOB data in */ - if (oob_required) { - tmp = 0xffffffff; - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, - 4); - } else { - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob, - 4); - } + writel(NFC_BUF_TO_USER_DATA(oob), + nfc->regs + NFC_REG_USER_DATA_BASE); tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | (1 << 30); @@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc) node); nand_release(&chip->mtd); sunxi_nand_ecc_cleanup(&chip->nand.ecc); + list_del(&chip->node); } } diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 5bbd1f094f4e..1fc23e48fe8e 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi, goto bad; } + if (data_size > ubi->leb_size) { + ubi_err(ubi, "bad data_size"); + goto bad; + } + if (vol_type == UBI_VID_STATIC) { /* * Although from high-level point of view static volumes may diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 80bdd5b88bac..d85c19762160 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi, if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + return -ENOSPC; } ubi->rsvd_pebs += reserved_pebs; ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 275d9fb6fe5c..eb4489f9082f 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); + err = -ENOSPC; goto out_free; } ubi->avail_pebs -= reserved_pebs; diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index e5fac368068a..131026fbc2d7 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -87,6 +87,7 @@ static const struct pci_device_id peak_pci_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, + {PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, #ifdef CONFIG_CAN_PEAK_PCIEC {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index f8baa897d1a0..1f7dd927cc5e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2051,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA; else reg |= PORT_CONTROL_FRAME_MODE_DSA; + reg |= PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; } if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 2c063b60db4b..96f485ab612e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -327,9 +327,13 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) pdata->debugfs_xpcs_reg = 0; buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); + if (!buf) + return; + pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL); if (!pdata->xgbe_debugfs) { netdev_err(pdata->netdev, "debugfs_create_dir failed\n"); + kfree(buf); return; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index aeb7ce64452e..be628bd9fb18 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3351,6 +3351,13 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) udp_rss_requested = 0; else return -EINVAL; + + if (CHIP_IS_E1x(bp) && udp_rss_requested) { + DP(BNX2X_MSG_ETHTOOL, + "57710, 57711 boards don't support RSS according to UDP 4-tuple\n"); + return -EINVAL; + } + if ((info->flow_type == UDP_V4_FLOW) && (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 3bc701e4c59e..1805541b4240 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1683,6 +1683,24 @@ static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR); } +static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) +{ + u32 int0_enable = 0; + + /* Monitor cable plug/unplugged event for internal PHY, external PHY + * and MoCA PHY + */ + if (priv->internal_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->ext_phy) { + int0_enable |= UMAC_IRQ_LINK_EVENT; + } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { + if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + int0_enable |= UMAC_IRQ_LINK_EVENT; + } + bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); +} + static int init_umac(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; @@ -1723,15 +1741,8 @@ static int init_umac(struct bcmgenet_priv *priv) /* Enable Tx default queue 16 interrupts */ int0_enable |= UMAC_IRQ_TXDMA_DONE; - /* Monitor cable plug/unplugged event for internal PHY */ - if (priv->internal_phy) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->ext_phy) { - int0_enable |= UMAC_IRQ_LINK_EVENT; - } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) - int0_enable |= UMAC_IRQ_LINK_EVENT; - + /* Configure backpressure vectors for MoCA */ + if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { reg = bcmgenet_bp_mc_get(priv); reg |= BIT(priv->hw_params->bp_in_en_shift); @@ -2645,6 +2656,9 @@ static void bcmgenet_netif_start(struct net_device *dev) netif_tx_start_all_queues(dev); + /* Monitor link interrupts now */ + bcmgenet_link_intr_enable(priv); + phy_start(priv->phydev); } diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index b7a0f7879de2..9e59663a6ead 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar) } /* Flush FLI data fifo. */ -static u32 +static int bfa_flash_fifo_flush(void __iomem *pci_bar) { u32 i; @@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) } /* Read flash status. */ -static u32 +static int bfa_flash_status_read(void __iomem *pci_bar) { union bfa_flash_dev_status_reg dev_status; - u32 status; + int status; u32 ret_status; int i; @@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar) } /* Start flash read operation. */ -static u32 +static int bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, char *buf) { - u32 status; + int status; /* len must be mutiple of 4 and not exceeding fifo size */ if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) @@ -1703,7 +1703,8 @@ static enum bfa_status bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, u32 len) { - u32 n, status; + u32 n; + int status; u32 off, l, s, residue, fifo_sz; residue = len; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 821540913343..d463563e1f70 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -592,6 +592,7 @@ struct be_adapter { int be_get_temp_freq; struct be_hwmon hwmon_info; u8 pf_number; + u8 pci_func_num; struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */ u32 bmc_filt_mask; diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index eb323913cd39..1795c935ff02 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -851,8 +851,10 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, return status; dest_wrb = be_cmd_copy(adapter, wrb); - if (!dest_wrb) - return -EBUSY; + if (!dest_wrb) { + status = -EBUSY; + goto unlock; + } if (use_mcc(adapter)) status = be_mcc_notify_wait(adapter); @@ -862,6 +864,7 @@ static int be_cmd_notify_wait(struct be_adapter *adapter, if (!status) memcpy(wrb, dest_wrb, sizeof(*wrb)); +unlock: be_cmd_unlock(adapter); return status; } @@ -1984,6 +1987,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) be_if_cap_flags(adapter)); } flags &= be_if_cap_flags(adapter); + if (!flags) + return -ENOTSUPP; return __be_cmd_rx_filter(adapter, flags, value); } @@ -2887,6 +2892,7 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter) if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; + adapter->pci_func_num = attribs->pci_func_num; serial_num = attribs->hba_attribs.controller_serial_number; for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & @@ -3709,7 +3715,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) status = -EINVAL; goto err; } - adapter->pf_number = desc->pf_num; be_copy_nic_desc(res, desc); } @@ -3721,7 +3726,10 @@ err: return status; } -/* Will use MBOX only if MCCQ has not been created */ +/* Will use MBOX only if MCCQ has not been created + * non-zero domain => a PF is querying this on behalf of a VF + * zero domain => a PF or a VF is querying this for itself + */ int be_cmd_get_profile_config(struct be_adapter *adapter, struct be_resources *res, u8 query, u8 domain) { @@ -3748,10 +3756,15 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, OPCODE_COMMON_GET_PROFILE_CONFIG, cmd.size, &wrb, &cmd); - req->hdr.domain = domain; if (!lancer_chip(adapter)) req->hdr.version = 1; req->type = ACTIVE_PROFILE_TYPE; + /* When a function is querying profile information relating to + * itself hdr.pf_number must be set to it's pci_func_num + 1 + */ + req->hdr.domain = domain; + if (domain == 0) + req->hdr.pf_num = adapter->pci_func_num + 1; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the * descriptors with all bits set to "1" for the fields which can be @@ -3921,12 +3934,16 @@ static void be_fill_vf_res_template(struct be_adapter *adapter, vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS); } - - nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); } else { num_vf_qs = 1; } + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + + nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); nic_vft->rq_count = cpu_to_le16(num_vf_qs); nic_vft->txq_count = cpu_to_le16(num_vf_qs); nic_vft->rssq_count = cpu_to_le16(num_vf_qs); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 7d178bdb112e..91155ea74f34 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -289,7 +289,9 @@ struct be_cmd_req_hdr { u32 timeout; /* dword 1 */ u32 request_length; /* dword 2 */ u8 version; /* dword 3 */ - u8 rsvd[3]; /* dword 3 */ + u8 rsvd1; /* dword 3 */ + u8 pf_num; /* dword 3 */ + u8 rsvd2; /* dword 3 */ }; #define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ @@ -1652,7 +1654,11 @@ struct mgmt_hba_attribs { struct mgmt_controller_attrib { struct mgmt_hba_attribs hba_attribs; - u32 rsvd0[10]; + u32 rsvd0[2]; + u16 rsvd1; + u8 pci_func_num; + u8 rsvd2; + u32 rsvd3[7]; } __packed; struct be_cmd_req_cntl_attribs { diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7bf51a1a0a77..eb48a977f8da 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1123,11 +1123,12 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, struct be_wrb_params *wrb_params) { - /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or - * less may cause a transmit stall on that port. So the work-around is - * to pad short packets (<= 32 bytes) to a 36-byte length. + /* Lancer, SH and BE3 in SRIOV mode have a bug wherein + * packets that are 32b or less may cause a transmit stall + * on that port. The workaround is to pad such packets + * (len <= 32 bytes) to a minimum length of 36b. */ - if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { + if (skb->len <= 32) { if (skb_put_padto(skb, 36)) return NULL; } @@ -4205,10 +4206,6 @@ static int be_get_config(struct be_adapter *adapter) int status, level; u16 profile_id; - status = be_cmd_get_cntl_attributes(adapter); - if (status) - return status; - status = be_cmd_query_fw_cfg(adapter); if (status) return status; @@ -4407,6 +4404,11 @@ static int be_setup(struct be_adapter *adapter) if (!lancer_chip(adapter)) be_cmd_req_native_mode(adapter); + /* Need to invoke this cmd first to get the PCI Function Number */ + status = be_cmd_get_cntl_attributes(adapter); + if (status) + return status; + if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); @@ -4999,7 +5001,15 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter, return false; } - return (fhdr->asic_type_rev >= adapter->asic_rev); + /* In BE3 FW images the "asic_type_rev" field doesn't track the + * asic_rev of the chips it is compatible with. + * When asic_type_rev is 0 the image is compatible only with + * pre-BE3-R chips (asic_rev < 0x10) + */ + if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) + return adapter->asic_rev < 0x10; + else + return (fhdr->asic_type_rev >= adapter->asic_rev); } static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw) diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 3c40f6b99224..55c36230e176 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -198,17 +198,28 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) /* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MDIO registers (struct gfar) * This is mildly evil, but so is our hardware for doing this. * Also, we have to cast back to struct gfar because of * definition weirdness done in gianfar.h. */ -static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) +static uint32_t __iomem *get_gfar_tbipa_from_mdio(void __iomem *p) { struct gfar __iomem *enet_regs = p; return &enet_regs->tbipa; } +/* + * Return the TBIPA address, starting from the address + * of the mapped GFAR MII registers (gfar_mii_regs[] within struct gfar) + */ +static uint32_t __iomem *get_gfar_tbipa_from_mii(void __iomem *p) +{ + return get_gfar_tbipa_from_mdio(container_of(p, struct gfar, gfar_mii_regs)); +} + /* * Return the TBIPAR address for an eTSEC2 node */ @@ -220,11 +231,12 @@ static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) #if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) /* - * Return the TBIPAR address for a QE MDIO node + * Return the TBIPAR address for a QE MDIO node, starting from the address + * of the mapped MII registers (struct fsl_pq_mii) */ static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { - struct fsl_pq_mdio __iomem *mdio = p; + struct fsl_pq_mdio __iomem *mdio = container_of(p, struct fsl_pq_mdio, mii); return &mdio->utbipar; } @@ -300,14 +312,14 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "fsl,gianfar-tbi", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { .compatible = "fsl,gianfar-mdio", .data = &(struct fsl_pq_mdio_data) { .mii_offset = 0, - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mii, }, }, { @@ -315,7 +327,7 @@ static const struct of_device_id fsl_pq_mdio_match[] = { .compatible = "gianfar", .data = &(struct fsl_pq_mdio_data) { .mii_offset = offsetof(struct fsl_pq_mdio, mii), - .get_tbipa = get_gfar_tbipa, + .get_tbipa = get_gfar_tbipa_from_mdio, }, }, { @@ -445,6 +457,16 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) tbipa = data->get_tbipa(priv->map); + /* + * Add consistency check to make sure TBI is contained + * within the mapped range (not because we would get a + * segfault, rather to catch bugs in computing TBI + * address). Print error message but continue anyway. + */ + if ((void *)tbipa > priv->map + resource_size(&res) - 4) + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", + ((void *)tbipa - priv->map) + 4); + iowrite32be(be32_to_cpup(prop), tbipa); } } diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index cc2d8b4b18e3..253f8ed0537a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev) struct net_device *ndev; struct hip04_priv *priv; struct resource *res; - unsigned int irq; + int irq; int ret; ndev = alloc_etherdev(sizeof(struct hip04_priv)); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 28df37420da9..ac02c675c59c 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr { u32 index; }; -#define EMAC_ETHTOOL_REGS_VER 0 -#define EMAC4_ETHTOOL_REGS_VER 1 -#define EMAC4SYNC_ETHTOOL_REGS_VER 2 +#define EMAC_ETHTOOL_REGS_VER 3 +#define EMAC4_ETHTOOL_REGS_VER 4 +#define EMAC4SYNC_ETHTOOL_REGS_VER 5 #endif /* __IBM_NEWEMAC_CORE_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 3e0d20037675..c0e943aecd13 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -386,7 +386,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -404,6 +403,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -445,7 +445,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -463,6 +462,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); if (ntu == ntc) { @@ -1007,6 +1014,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); if (i40e_is_nvm_update_op(&e->desc)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 851c1a159be8..dd44fafd8798 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + /* this controls whether VLAN is stripped from inner headers */ + rx_ctx.showiv = 0; #ifdef I40E_FCOE rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE); #endif @@ -8388,6 +8389,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_TSO; netdev->features = NETIF_F_SG | @@ -8395,6 +8397,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_SCTP_CSUM | NETIF_F_HIGHDMA | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index f08450b90774..a23ebfd5cd25 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -373,7 +373,6 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) hw->aq.asq.next_to_use = 0; hw->aq.asq.next_to_clean = 0; - hw->aq.asq.count = hw->aq.num_asq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_asq_ring(hw); @@ -391,6 +390,7 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.asq.count = hw->aq.num_asq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -432,7 +432,6 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) hw->aq.arq.next_to_use = 0; hw->aq.arq.next_to_clean = 0; - hw->aq.arq.count = hw->aq.num_arq_entries; /* allocate the ring memory */ ret_code = i40e_alloc_adminq_arq_ring(hw); @@ -450,6 +449,7 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw) goto init_adminq_free_rings; /* success! */ + hw->aq.arq.count = hw->aq.num_arq_entries; goto init_adminq_exit; init_adminq_free_rings: @@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); + if (hw->aq.arq.count == 0) { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQRX: Admin queue not initialized.\n"); + ret_code = I40E_ERR_QUEUE_EMPTY; + goto clean_arq_element_err; + } + /* set next_to_use to head */ ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); if (ntu == ntc) { @@ -948,6 +955,8 @@ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); + +clean_arq_element_err: mutex_unlock(&hw->aq.arq_mutex); return ret_code; diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 8e81e53c370e..c34488479365 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1364,6 +1364,10 @@ int mlx4_test_interrupts(struct mlx4_dev *dev) * and performing a NOP command */ for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { + /* Make sure request_irq was called */ + if (!priv->eq_table.eq[i].have_irq) + continue; + /* Temporary use polling for command completions */ mlx4_cmd_use_polling(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 006757f80988..cc3a9897574c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2669,14 +2669,11 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) if (msi_x) { int nreq = dev->caps.num_ports * num_online_cpus() + 1; - bool shared_ports = false; nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq); - if (nreq > MAX_MSIX) { + if (nreq > MAX_MSIX) nreq = MAX_MSIX; - shared_ports = true; - } entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); if (!entries) @@ -2699,9 +2696,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, dev->caps.num_ports); - if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) - shared_ports = true; - for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { if (i == MLX4_EQ_ASYNC) continue; @@ -2709,7 +2703,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) priv->eq_table.eq[i].irq = entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; - if (shared_ports) { + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, dev->caps.num_ports); /* We don't set affinity hint when there diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index bd9ea0d01aae..1d4e2e054647 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1184,10 +1184,11 @@ out: if (prot == MLX4_PROT_ETH) { /* manage the steering entry for promisc mode */ if (new_entry) - new_steering_entry(dev, port, steer, index, qp->qpn); + err = new_steering_entry(dev, port, steer, + index, qp->qpn); else - existing_steering_entry(dev, port, steer, - index, qp->qpn); + err = existing_steering_entry(dev, port, steer, + index, qp->qpn); } if (err && link && index != -1) { if (index < dev->caps.num_mgms) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c index e71563ce05d1..22d603f78273 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c @@ -598,6 +598,8 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) return; priv->vlan.filter_disabled = false; + if (priv->netdev->flags & IFF_PROMISC) + return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } @@ -607,6 +609,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) return; priv->vlan.filter_disabled = true; + if (priv->netdev->flags & IFF_PROMISC) + return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } @@ -717,8 +721,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled; bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; - if (enable_promisc) + if (enable_promisc) { mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (!priv->vlan.filter_disabled) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); + } if (enable_allmulti) mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) @@ -730,8 +738,12 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); if (disable_allmulti) mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); - if (disable_promisc) + if (disable_promisc) { + if (!priv->vlan.filter_disabled) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, + 0); mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + } ea->promisc_enabled = promisc_enabled; ea->allmulti_enabled = allmulti_enabled; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index aa0d5ffe92d8..9335e5ae18cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev) return err; } - -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey) -{ - struct mlx5_cmd_query_special_contexts_mbox_in in; - struct mlx5_cmd_query_special_contexts_mbox_out out; - int err; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); - err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); - if (err) - return err; - - if (out.hdr.status) - err = mlx5_cmd_status_to_err(&out.hdr); - - *rsvd_lkey = be32_to_cpu(out.resd_lkey); - - return err; -} -EXPORT_SYMBOL(mlx5_core_query_special_context); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 821caaab9bfb..3b9480fa3403 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -311,7 +311,7 @@ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int err; memset(in, 0, sizeof(in)); - MLX5_SET(ptys_reg, in, local_port, local_port); + MLX5_SET(pvlc_reg, in, local_port, local_port); err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc, pvlc_size, MLX5_REG_PVLC, 0, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index dbcaf5df8967..28c19cc1a17c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -374,26 +374,31 @@ static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, int err; int ret; + mlxsw_core->emad.trans_active = true; + err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); if (err) { dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", mlxsw_core->emad.tid); dev_kfree_skb(skb); - return err; + goto trans_inactive_out; } - mlxsw_core->emad.trans_active = true; ret = wait_event_timeout(mlxsw_core->emad.wait, !(mlxsw_core->emad.trans_active), msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); if (!ret) { dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", mlxsw_core->emad.tid); - mlxsw_core->emad.trans_active = false; - return -EIO; + err = -EIO; + goto trans_inactive_out; } return 0; + +trans_inactive_out: + mlxsw_core->emad.trans_active = false; + return err; } static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index ffd55d030ce2..36fb1cec53c9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -187,6 +187,7 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) { u16 max_index, be_index; u16 offset; /* byte offset inside the array */ + u8 in_byte_index; BUG_ON(index && !item->element_size); if (item->offset % sizeof(u32) != 0 || @@ -199,7 +200,8 @@ __mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift) max_index = (item->size.bytes << 3) / item->element_size - 1; be_index = max_index - index; offset = be_index * item->element_size >> 3; - *shift = index % (BITS_PER_BYTE / item->element_size) << 1; + in_byte_index = index % (BITS_PER_BYTE / item->element_size); + *shift = in_byte_index * item->element_size; return item->offset + offset; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 462cea31ecbb..cef866c37648 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1582,11 +1582,11 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, if (in_mbox) memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); - mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32); - mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); - mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32); - mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); + mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 3e52ee93438c..62cbbd1ada8d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1069,9 +1069,9 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) return 0; err_register_netdev: -err_port_admin_status_set: err_port_mac_learning_mode_set: err_port_stp_state_set: +err_port_admin_status_set: err_port_mtu_set: err_port_speed_set: err_port_swid_set: diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 66fd868152e5..b159ef8303cc 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -476,13 +476,12 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) mac[5] = tmp >> 8; } -static void __lpc_eth_clock_enable(struct netdata_local *pldat, - bool enable) +static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable) { if (enable) - clk_enable(pldat->clk); + clk_prepare_enable(pldat->clk); else - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); } static void __lpc_params_setup(struct netdata_local *pldat) @@ -1494,7 +1493,7 @@ err_out_free_irq: err_out_iounmap: iounmap(pldat->net_base); err_out_disable_clocks: - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); err_out_free_dev: free_netdev(ndev); @@ -1519,7 +1518,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev) iounmap(pldat->net_base); mdiobus_unregister(pldat->mii_bus); mdiobus_free(pldat->mii_bus); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); clk_put(pldat->clk); free_netdev(ndev); @@ -1540,7 +1539,7 @@ static int lpc_eth_drv_suspend(struct platform_device *pdev, if (netif_running(ndev)) { netif_device_detach(ndev); __lpc_eth_shutdown(pldat); - clk_disable(pldat->clk); + clk_disable_unprepare(pldat->clk); /* * Reset again now clock is disable to be sure diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2b32e0c5a0b4..b4f21232019a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) { void __iomem *ioaddr = tp->mmio_addr; struct pci_dev *pdev = tp->pci_dev; - u16 rg_saw_cnt; + int rg_saw_cnt; u32 data; static const struct ephy_info e_info_8168h_1[] = { { 0x1e, 0x0800, 0x0001 }, diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index a83263743665..2b7550c43f78 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -2134,10 +2134,11 @@ static int rhine_rx(struct net_device *dev, int limit) } skb_put(skb, pkt_len); - skb->protocol = eth_type_trans(skb, dev); rhine_rx_vlan_tag(skb, desc, data_size); + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); u64_stats_update_begin(&rp->rx_stats.syncp); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 8f5c02eed47d..cde29f8a37bf 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -870,14 +870,14 @@ static int geneve_newlink(struct net *net, struct net_device *dev, __be16 dst_port = htons(GENEVE_UDP_PORT); __u8 ttl = 0, tos = 0; bool metadata = false; - __be32 rem_addr; - __u32 vni; + __be32 rem_addr = 0; + __u32 vni = 0; - if (!data[IFLA_GENEVE_ID] || !data[IFLA_GENEVE_REMOTE]) - return -EINVAL; + if (data[IFLA_GENEVE_ID]) + vni = nla_get_u32(data[IFLA_GENEVE_ID]); - vni = nla_get_u32(data[IFLA_GENEVE_ID]); - rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); + if (data[IFLA_GENEVE_REMOTE]) + rem_addr = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); if (data[IFLA_GENEVE_TTL]) ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c5ad98ace5d0..11e3975485c1 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -168,8 +168,6 @@ config MDIO_OCTEON busses. It is required by the Octeon and ThunderX ethernet device drivers. - If in doubt, say Y. - config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" depends on ARCH_SUNXI diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 3837ae344f63..2ed75060da50 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev) if (po->pppoe_dev == dev && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { pppox_unbind_sock(sk); - sk->sk_state = PPPOX_ZOMBIE; sk->sk_state_change(sk); po->pppoe_dev = NULL; dev_put(dev); diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index fbb9325d1f6e..e66805eeffb4 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -164,6 +164,7 @@ config USB_NET_AX8817X * Aten UC210T * ASIX AX88172 * Billionton Systems, USB2AR + * Billionton Systems, GUSB2AM-1G-B * Buffalo LUA-U2-KTX * Corega FEther USB2-TX * D-Link DUB-E100 diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c index 75d6f26729a3..079069a060a6 100644 --- a/drivers/net/usb/asix_common.c +++ b/drivers/net/usb/asix_common.c @@ -91,8 +91,10 @@ int asix_rx_fixup_internal(struct usbnet *dev, struct sk_buff *skb, } rx->ax_skb = netdev_alloc_skb_ip_align(dev->net, rx->size); - if (!rx->ax_skb) + if (!rx->ax_skb) { + rx->size = 0; return 0; + } } if (rx->size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) { diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 1173a24feda3..5cabefc23494 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -958,6 +958,10 @@ static const struct usb_device_id products [] = { // Billionton Systems, USB2AR USB_DEVICE (0x08dd, 0x90ff), .driver_info = (unsigned long) &ax8817x_info, +}, { + // Billionton Systems, GUSB2AM-1G-B + USB_DEVICE(0x08dd, 0x0114), + .driver_info = (unsigned long) &ax88178_info, }, { // ATEN UC210T USB_DEVICE (0x0557, 0x2009), diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index bbac1d35ed4e..afdc65fd5bc5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2745,11 +2745,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct vxlan_config conf; int err; - if (!data[IFLA_VXLAN_ID]) - return -EINVAL; - memset(&conf, 0, sizeof(conf)); - conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + + if (data[IFLA_VXLAN_ID]) + conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); if (data[IFLA_VXLAN_GROUP]) { conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 23afcda2de96..678d72af4a9d 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -337,7 +337,7 @@ enum ath10k_hw_rate_cck { #define TARGET_10X_MAX_FRAG_ENTRIES 0 /* 10.2 parameters */ -#define TARGET_10_2_DMA_BURST_SIZE 1 +#define TARGET_10_2_DMA_BURST_SIZE 0 /* Target specific defines for WMI-TLV firmware */ #define TARGET_TLV_NUM_VDEVS 4 @@ -391,7 +391,7 @@ enum ath10k_hw_rate_cck { #define TARGET_10_4_TX_DBG_LOG_SIZE 1024 #define TARGET_10_4_NUM_WDS_ENTRIES 32 -#define TARGET_10_4_DMA_BURST_SIZE 1 +#define TARGET_10_4_DMA_BURST_SIZE 0 #define TARGET_10_4_MAC_AGGR_DELIM 0 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 57f95f2dca5b..90eb75012e4f 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -880,6 +880,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->max_rate_tries = 10; hw->sta_data_size = sizeof(struct ath_node); hw->vif_data_size = sizeof(struct ath_vif); + hw->extra_tx_headroom = 4; hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1; hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1; diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 28490702124a..71d3e9adbf3c 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -120,6 +120,7 @@ MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if over #ifdef CONFIG_B43_BCMA static const struct bcma_device_id b43_bcma_tbl[] = { BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x15, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS), BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1C, BCMA_ANY_CLASS), diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index ab45819c1fbb..e18629a16fb0 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1020,7 +1020,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64( + aes_sc[i].pn = cpu_to_le64( (u64)pn[5] | ((u64)pn[4] << 8) | ((u64)pn[3] << 16) | diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 6951aba620eb..3fb327d5a911 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -348,6 +348,6 @@ const struct iwl_cfg iwl7265d_n_cfg = { }; MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); -MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 04264e417c1c..576187611e61 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c @@ -274,18 +274,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_CCMP: if (sta) { - u8 *pn = seq.ccmp.pn; + u64 pn64; aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); } else { aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; } @@ -298,12 +293,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, u8 *pn = seq.ccmp.pn; ieee80211_get_key_rx_seq(key, i, &seq); - aes_sc->pn = cpu_to_le64((u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + aes_sc[i].pn = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); } data->use_rsc_tsc = true; break; @@ -1453,15 +1448,15 @@ static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: - iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq); iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key); + atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn)); break; case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); + ieee80211_set_key_tx_seq(key, &seq); break; } - ieee80211_set_key_tx_seq(key, &seq); /* that's it for this key */ return; diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c index 4a0ce83315bd..5c7f7cc9ffcc 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c @@ -703,7 +703,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) * abort after reading the nvm in case RF Kill is on, we will complete * the init seq later when RF kill will switch to off */ - if (iwl_mvm_is_radio_killed(mvm)) { + if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); iwl_remove_notification(&mvm->notif_wait, &calib_wait); @@ -736,7 +736,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, MVM_UCODE_CALIB_TIMEOUT); - if (ret && iwl_mvm_is_radio_killed(mvm)) { + if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); ret = 1; } diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index aa8c2b7f23c7..7c2944a72470 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -2388,6 +2388,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); RCU_INIT_POINTER(mvm->csa_vif, NULL); + mvmvif->csa_countdown = false; } if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index b95a07ec9e36..c754051a4cea 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -860,6 +860,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm) test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status); } +static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm) +{ + return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); +} + /* Must be called with rcu_read_lock() held and it can only be * released when mvmsta is not needed anymore. */ diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c index a37de3f410a0..f0cb092f980e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -590,6 +590,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ieee80211_unregister_hw(mvm->hw); iwl_mvm_leds_exit(mvm); out_free: + flush_delayed_work(&mvm->fw_dump_wk); iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name) diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index b0825c402c73..644b58bc5226 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -414,6 +414,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 5932306084fd..bf9afbf46c1b 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1114,6 +1114,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3070) }, { USB_DEVICE(0x1b75, 0x3071) }, { USB_DEVICE(0x1b75, 0x3072) }, { USB_DEVICE(0x1b75, 0xa200) }, diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h index d4567d12e07e..5da6703942d9 100644 --- a/drivers/net/wireless/rtlwifi/pci.h +++ b/drivers/net/wireless/rtlwifi/pci.h @@ -247,6 +247,8 @@ struct rtl_pci { /* MSI support */ bool msi_support; bool using_msi; + /* interrupt clear before set */ + bool int_clear; }; struct mp_adapter { diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c index b7f18e2155eb..6e9418ed90c2 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c @@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci) } } +static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 tmp = rtl_read_dword(rtlpriv, REG_HISR); + + rtl_write_dword(rtlpriv, REG_HISR, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HISRE); + rtl_write_dword(rtlpriv, REG_HISRE, tmp); + + tmp = rtl_read_dword(rtlpriv, REG_HSISR); + rtl_write_dword(rtlpriv, REG_HSISR, tmp); +} + void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + if (!rtlpci->int_clear) + rtl8821ae_clear_interrupt(hw);/*clear it here first*/ + rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF); rtlpci->irq_enabled = true; diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c index a4988121e1ab..8ee141a55bc5 100644 --- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c @@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl8821ae_bt_reg_init(hw); rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear; rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer(); rtlpriv->dm.dm_initialgain_enable = 1; @@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support; + rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear; if (rtlpriv->cfg->mod_params->disable_watchdog) pr_info("watchdog disabled\n"); rtlpriv->psc.reg_fwctrl_lps = 3; @@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = { .swctrl_lps = false, .fwctrl_lps = true, .msi_support = true, + .int_clear = true, .debug = DBG_EMERG, .disable_watchdog = 0, }; @@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444); module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444); module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog, bool, 0444); +module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444); MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n"); MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); @@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); +MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index b90ca618b123..4544752a2ba8 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -2249,6 +2249,9 @@ struct rtl_mod_params { /* default 0: 1 means disable */ bool disable_watchdog; + + /* default 0: 1 means do not disable interrupts */ + bool int_clear; }; struct rtl_hal_usbint_cfg { diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 929a6e7e5ecf..56ebd8267386 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -788,6 +788,12 @@ static void connect(struct backend_info *be) /* Use the number of queues requested by the frontend */ be->vif->queues = vzalloc(requested_num_queues * sizeof(struct xenvif_queue)); + if (!be->vif->queues) { + xenbus_dev_fatal(dev, -ENOMEM, + "allocating queues"); + return; + } + be->vif->num_queues = requested_num_queues; be->vif->stalled_queues = requested_num_queues; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index d3c6676b3c0c..6fd4e5a5ef4a 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -67,7 +67,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from reading */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -92,7 +92,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, int rc; /* Stop the user from writing */ - if (pos > nvmem->size) + if (pos >= nvmem->size) return 0; if (pos + count > nvmem->size) @@ -825,7 +825,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem, return rc; /* shift bits in-place */ - if (cell->bit_offset || cell->bit_offset) + if (cell->bit_offset || cell->nbits) nvmem_shift_read_buffer_in_place(cell, buf); *len = cell->bytes; @@ -938,7 +938,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len) rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); /* free the tmp buffer */ - if (cell->bit_offset) + if (cell->bit_offset || cell->nbits) kfree(buf); if (IS_ERR_VALUE(rc)) diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c index 14777dd5212d..cfa3b85064dd 100644 --- a/drivers/nvmem/sunxi_sid.c +++ b/drivers/nvmem/sunxi_sid.c @@ -103,7 +103,7 @@ static int sunxi_sid_probe(struct platform_device *pdev) struct nvmem_device *nvmem; struct regmap *regmap; struct sunxi_sid *sid; - int i, size; + int ret, i, size; char *randomness; sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); @@ -131,6 +131,11 @@ static int sunxi_sid_probe(struct platform_device *pdev) return PTR_ERR(nvmem); randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); + if (!randomness) { + ret = -EINVAL; + goto err_unreg_nvmem; + } + for (i = 0; i < size; i++) randomness[i] = sunxi_sid_read_byte(sid, i); @@ -140,6 +145,10 @@ static int sunxi_sid_probe(struct platform_device *pdev) platform_set_drvdata(pdev, nvmem); return 0; + +err_unreg_nvmem: + nvmem_unregister(nvmem); + return ret; } static int sunxi_sid_remove(struct platform_device *pdev) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index d4497141d083..4a7da3c3e035 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1243,6 +1243,10 @@ static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) BUG_ON(!chip); if (!chip->irq_write_msi_msg) chip->irq_write_msi_msg = pci_msi_domain_write_msg; + if (!chip->irq_mask) + chip->irq_mask = pci_msi_mask_irq; + if (!chip->irq_unmask) + chip->irq_unmask = pci_msi_unmask_irq; } /** diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index dd652f2ae03d..108a3118ace7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi) * Unbound PCI devices are always put in D0, regardless of * runtime PM status. During probe, the device is set to * active and the usage count is incremented. If the driver - * supports runtime PM, it should call pm_runtime_put_noidle() - * in its probe routine and pm_runtime_get_noresume() in its - * remove routine. + * supports runtime PM, it should call pm_runtime_put_noidle(), + * or any other runtime PM helper function decrementing the usage + * count, in its probe routine and pm_runtime_get_noresume() in + * its remove routine. */ pm_runtime_get_sync(dev); pci_dev->driver = pci_drv; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 2365a32a595e..be3755c973e9 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -823,9 +823,15 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) } /* Now look up the logical CPU number */ - for_each_possible_cpu(cpu) - if (dn == of_cpu_device_node_get(cpu)) + for_each_possible_cpu(cpu) { + struct device_node *cpu_dn; + + cpu_dn = of_cpu_device_node_get(cpu); + of_node_put(cpu_dn); + + if (dn == cpu_dn) break; + } if (cpu >= nr_cpu_ids) { pr_warn("Failed to find logical CPU for %s\n", diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index 0062027afb1e..77a2e054fdea 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c @@ -276,6 +276,7 @@ static const struct of_device_id phy_berlin_sata_of_match[] = { { .compatible = "marvell,berlin2q-sata-phy" }, { }, }; +MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match); static struct platform_driver phy_berlin_sata_driver = { .probe = phy_berlin_sata_probe, diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c index 49a1ed0cef56..107cb57c3513 100644 --- a/drivers/phy/phy-qcom-ufs.c +++ b/drivers/phy/phy-qcom-ufs.c @@ -432,6 +432,7 @@ out_disable_src: out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); static int ufs_qcom_phy_disable_vreg(struct phy *phy, @@ -474,6 +475,7 @@ void ufs_qcom_phy_disable_ref_clk(struct phy *generic_phy) phy->is_ref_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); #define UFS_REF_CLK_EN (1 << 5) @@ -517,11 +519,13 @@ void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) { ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); /* Turn ON M-PHY RMMI interface clocks */ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) @@ -550,6 +554,7 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) out: return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); /* Turn OFF M-PHY RMMI interface clocks */ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) @@ -562,6 +567,7 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) phy->is_iface_clk_enabled = false; } } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); int ufs_qcom_phy_start_serdes(struct phy *generic_phy) { @@ -578,6 +584,7 @@ int ufs_qcom_phy_start_serdes(struct phy *generic_phy) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) { @@ -595,6 +602,7 @@ int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, u8 major, u16 minor, u16 step) @@ -605,6 +613,7 @@ void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, ufs_qcom_phy->host_ctrl_rev_minor = minor; ufs_qcom_phy->host_ctrl_rev_step = step; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) { @@ -625,6 +634,7 @@ int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) return ret; } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); int ufs_qcom_phy_remove(struct phy *generic_phy, struct ufs_qcom_phy *ufs_qcom_phy) @@ -662,6 +672,7 @@ int ufs_qcom_phy_is_pcs_ready(struct phy *generic_phy) return ufs_qcom_phy->phy_spec_ops-> is_physical_coding_sublayer_ready(ufs_qcom_phy); } +EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); int ufs_qcom_phy_power_on(struct phy *generic_phy) { diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 5a5c073e72fe..91d6f342c565 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c @@ -98,6 +98,7 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) struct device_node *child; struct regmap *grf; unsigned int reg_offset; + int err; grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); if (IS_ERR(grf)) { @@ -129,6 +130,11 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) return PTR_ERR(rk_phy->phy); } phy_set_drvdata(rk_phy->phy, rk_phy); + + /* only power up usb phy when it use, so disable it when init*/ + err = rockchip_usb_phy_power(rk_phy, 1); + if (err) + return err; } phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c index faf635654312..293ed4381cc0 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx25.c +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c @@ -26,7 +26,8 @@ #include "pinctrl-imx.h" enum imx25_pads { - MX25_PAD_RESERVE0 = 1, + MX25_PAD_RESERVE0 = 0, + MX25_PAD_RESERVE1 = 1, MX25_PAD_A10 = 2, MX25_PAD_A13 = 3, MX25_PAD_A14 = 4, @@ -169,6 +170,7 @@ enum imx25_pads { /* Pad names for the pinmux subsystem */ static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = { IMX_PINCTRL_PIN(MX25_PAD_RESERVE0), + IMX_PINCTRL_PIN(MX25_PAD_RESERVE1), IMX_PINCTRL_PIN(MX25_PAD_A10), IMX_PINCTRL_PIN(MX25_PAD_A13), IMX_PINCTRL_PIN(MX25_PAD_A14), diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c index 63676617bc59..f9a3f8f446f7 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c @@ -653,7 +653,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */ - SUNXI_FUNCTION(0x3, "uart3"), /* PWM1 */ + SUNXI_FUNCTION(0x3, "pwm"), /* PWM1 */ SUNXI_FUNCTION(0x5, "uart2"), /* CTS */ SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */ }; diff --git a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c index 7e9dae54fcb2..2df8bbecebfc 100644 --- a/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c +++ b/drivers/pinctrl/uniphier/pinctrl-ph1-sld8.c @@ -22,49 +22,49 @@ #define DRIVER_NAME "ph1-sld8-pinctrl" static const struct pinctrl_pin_desc ph1_sld8_pins[] = { - UNIPHIER_PINCTRL_PIN(0, "PCA00", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(0, "PCA00", 0, 15, UNIPHIER_PIN_DRV_4_8, 15, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(1, "PCA01", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(1, "PCA01", 0, 16, UNIPHIER_PIN_DRV_4_8, 16, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(2, "PCA02", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(2, "PCA02", 0, 17, UNIPHIER_PIN_DRV_4_8, 17, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(3, "PCA03", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(3, "PCA03", 0, 18, UNIPHIER_PIN_DRV_4_8, 18, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(4, "PCA04", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(4, "PCA04", 0, 19, UNIPHIER_PIN_DRV_4_8, 19, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(5, "PCA05", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(5, "PCA05", 0, 20, UNIPHIER_PIN_DRV_4_8, 20, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(6, "PCA06", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(6, "PCA06", 0, 21, UNIPHIER_PIN_DRV_4_8, 21, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(7, "PCA07", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(7, "PCA07", 0, 22, UNIPHIER_PIN_DRV_4_8, 22, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(8, "PCA08", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(8, "PCA08", 0, 23, UNIPHIER_PIN_DRV_4_8, 23, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(9, "PCA09", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(9, "PCA09", 0, 24, UNIPHIER_PIN_DRV_4_8, 24, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(10, "PCA10", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(10, "PCA10", 0, 25, UNIPHIER_PIN_DRV_4_8, 25, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(11, "PCA11", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(11, "PCA11", 0, 26, UNIPHIER_PIN_DRV_4_8, 26, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(12, "PCA12", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(12, "PCA12", 0, 27, UNIPHIER_PIN_DRV_4_8, 27, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(13, "PCA13", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(13, "PCA13", 0, 28, UNIPHIER_PIN_DRV_4_8, 28, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(14, "PCA14", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(14, "PCA14", 0, 29, UNIPHIER_PIN_DRV_4_8, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "XNFRE_GB", UNIPHIER_PIN_IECTRL_NONE, @@ -118,199 +118,199 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(31, "NFD7_GB", UNIPHIER_PIN_IECTRL_NONE, 36, UNIPHIER_PIN_DRV_8_12_16_20, 128, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(32, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(32, "SDCLK", 8, 40, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(33, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(33, "SDCMD", 8, 44, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(34, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(34, "SDDAT0", 8, 48, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(35, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(35, "SDDAT1", 8, 52, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(36, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(36, "SDDAT2", 8, 56, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(37, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(37, "SDDAT3", 8, 60, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(38, "SDCD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(38, "SDCD", 8, -1, UNIPHIER_PIN_DRV_FIXED_4, 129, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(39, "SDWP", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(39, "SDWP", 8, -1, UNIPHIER_PIN_DRV_FIXED_4, 130, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(40, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(40, "SDVOLC", 9, -1, UNIPHIER_PIN_DRV_FIXED_4, 131, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(41, "USB0VBUS", 0, 37, UNIPHIER_PIN_DRV_4_8, 37, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(42, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(42, "USB0OD", 0, 38, UNIPHIER_PIN_DRV_4_8, 38, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(43, "USB1VBUS", 0, 39, UNIPHIER_PIN_DRV_4_8, 39, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(44, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(44, "USB1OD", 0, 40, UNIPHIER_PIN_DRV_4_8, 40, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(45, "PCRESET", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(45, "PCRESET", 0, 41, UNIPHIER_PIN_DRV_4_8, 41, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(46, "PCREG", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(46, "PCREG", 0, 42, UNIPHIER_PIN_DRV_4_8, 42, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(47, "PCCE2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(47, "PCCE2", 0, 43, UNIPHIER_PIN_DRV_4_8, 43, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(48, "PCVS1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(48, "PCVS1", 0, 44, UNIPHIER_PIN_DRV_4_8, 44, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(49, "PCCD2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(49, "PCCD2", 0, 45, UNIPHIER_PIN_DRV_4_8, 45, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(50, "PCCD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(50, "PCCD1", 0, 46, UNIPHIER_PIN_DRV_4_8, 46, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(51, "PCREADY", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(51, "PCREADY", 0, 47, UNIPHIER_PIN_DRV_4_8, 47, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(52, "PCDOE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(52, "PCDOE", 0, 48, UNIPHIER_PIN_DRV_4_8, 48, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(53, "PCCE1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(53, "PCCE1", 0, 49, UNIPHIER_PIN_DRV_4_8, 49, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(54, "PCWE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(54, "PCWE", 0, 50, UNIPHIER_PIN_DRV_4_8, 50, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(55, "PCOE", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(55, "PCOE", 0, 51, UNIPHIER_PIN_DRV_4_8, 51, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(56, "PCWAIT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(56, "PCWAIT", 0, 52, UNIPHIER_PIN_DRV_4_8, 52, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(57, "PCIOWR", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(57, "PCIOWR", 0, 53, UNIPHIER_PIN_DRV_4_8, 53, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(58, "PCIORD", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(58, "PCIORD", 0, 54, UNIPHIER_PIN_DRV_4_8, 54, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(59, "HS0DIN0", 0, 55, UNIPHIER_PIN_DRV_4_8, 55, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(60, "HS0DIN1", 0, 56, UNIPHIER_PIN_DRV_4_8, 56, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(61, "HS0DIN2", 0, 57, UNIPHIER_PIN_DRV_4_8, 57, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(62, "HS0DIN3", 0, 58, UNIPHIER_PIN_DRV_4_8, 58, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(63, "HS0DIN4", 0, 59, UNIPHIER_PIN_DRV_4_8, 59, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(64, "HS0DIN5", 0, 60, UNIPHIER_PIN_DRV_4_8, 60, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(65, "HS0DIN6", 0, 61, UNIPHIER_PIN_DRV_4_8, 61, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(66, "HS0DIN7", 0, 62, UNIPHIER_PIN_DRV_4_8, 62, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(67, "HS0BCLKIN", 0, 63, UNIPHIER_PIN_DRV_4_8, 63, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(68, "HS0VALIN", 0, 64, UNIPHIER_PIN_DRV_4_8, 64, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(69, "HS0SYNCIN", 0, 65, UNIPHIER_PIN_DRV_4_8, 65, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(70, "HSDOUT0", 0, 66, UNIPHIER_PIN_DRV_4_8, 66, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(71, "HSDOUT1", 0, 67, UNIPHIER_PIN_DRV_4_8, 67, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(72, "HSDOUT2", 0, 68, UNIPHIER_PIN_DRV_4_8, 68, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(73, "HSDOUT3", 0, 69, UNIPHIER_PIN_DRV_4_8, 69, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(74, "HSDOUT4", 0, 70, UNIPHIER_PIN_DRV_4_8, 70, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(75, "HSDOUT5", 0, 71, UNIPHIER_PIN_DRV_4_8, 71, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(76, "HSDOUT6", 0, 72, UNIPHIER_PIN_DRV_4_8, 72, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(77, "HSDOUT7", 0, 73, UNIPHIER_PIN_DRV_4_8, 73, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(78, "HSBCLKOUT", 0, 74, UNIPHIER_PIN_DRV_4_8, 74, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(79, "HSVALOUT", 0, 75, UNIPHIER_PIN_DRV_4_8, 75, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(80, "HSSYNCOUT", 0, 76, UNIPHIER_PIN_DRV_4_8, 76, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(81, "HS1DIN0", 0, 77, UNIPHIER_PIN_DRV_4_8, 77, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(82, "HS1DIN1", 0, 78, UNIPHIER_PIN_DRV_4_8, 78, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(83, "HS1DIN2", 0, 79, UNIPHIER_PIN_DRV_4_8, 79, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(84, "HS1DIN3", 0, 80, UNIPHIER_PIN_DRV_4_8, 80, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(85, "HS1DIN4", 0, 81, UNIPHIER_PIN_DRV_4_8, 81, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(86, "HS1DIN5", 0, 82, UNIPHIER_PIN_DRV_4_8, 82, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(87, "HS1DIN6", 0, 83, UNIPHIER_PIN_DRV_4_8, 83, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(88, "HS1DIN7", 0, 84, UNIPHIER_PIN_DRV_4_8, 84, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(89, "HS1BCLKIN", 0, 85, UNIPHIER_PIN_DRV_4_8, 85, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(90, "HS1VALIN", 0, 86, UNIPHIER_PIN_DRV_4_8, 86, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(91, "HS1SYNCIN", 0, 87, UNIPHIER_PIN_DRV_4_8, 87, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(92, "AGCI", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(92, "AGCI", 3, -1, UNIPHIER_PIN_DRV_FIXED_4, 132, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(93, "AGCR", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(93, "AGCR", 4, -1, UNIPHIER_PIN_DRV_FIXED_4, 133, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(94, "AGCBS", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(94, "AGCBS", 5, -1, UNIPHIER_PIN_DRV_FIXED_4, 134, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(95, "IECOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(95, "IECOUT", 0, 88, UNIPHIER_PIN_DRV_4_8, 88, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(96, "ASMCK", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(96, "ASMCK", 0, 89, UNIPHIER_PIN_DRV_4_8, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "ABCKO", UNIPHIER_PIN_IECTRL_NONE, @@ -325,31 +325,31 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(100, "ASDOUT1", UNIPHIER_PIN_IECTRL_NONE, 93, UNIPHIER_PIN_DRV_4_8, 93, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(101, "ARCOUT", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(101, "ARCOUT", 0, 94, UNIPHIER_PIN_DRV_4_8, 94, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(102, "SDA0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(102, "SDA0", 10, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(103, "SCL0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(103, "SCL0", 10, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(104, "SDA1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(104, "SDA1", 11, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(105, "SCL1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(105, "SCL1", 11, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(106, "DMDSDA0", 12, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(107, "DMDSCL0", 12, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(108, "DMDSDA1", 13, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(109, "DMDSCL1", 13, -1, UNIPHIER_PIN_DRV_FIXED_4, -1, UNIPHIER_PIN_PULL_NONE), UNIPHIER_PINCTRL_PIN(110, "SBO0", UNIPHIER_PIN_IECTRL_NONE, @@ -358,76 +358,76 @@ static const struct pinctrl_pin_desc ph1_sld8_pins[] = { UNIPHIER_PINCTRL_PIN(111, "SBI0", UNIPHIER_PIN_IECTRL_NONE, 96, UNIPHIER_PIN_DRV_4_8, 96, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(112, "SBO1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(112, "SBO1", 0, 97, UNIPHIER_PIN_DRV_4_8, 97, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(113, "SBI1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(113, "SBI1", 0, 98, UNIPHIER_PIN_DRV_4_8, 98, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(114, "TXD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(114, "TXD1", 0, 99, UNIPHIER_PIN_DRV_4_8, 99, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(115, "RXD1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(115, "RXD1", 0, 100, UNIPHIER_PIN_DRV_4_8, 100, UNIPHIER_PIN_PULL_UP), - UNIPHIER_PINCTRL_PIN(116, "HIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(116, "HIN", 1, -1, UNIPHIER_PIN_DRV_FIXED_5, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(117, "VIN", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(117, "VIN", 2, -1, UNIPHIER_PIN_DRV_FIXED_5, -1, UNIPHIER_PIN_PULL_NONE), - UNIPHIER_PINCTRL_PIN(118, "TCON0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(118, "TCON0", 0, 101, UNIPHIER_PIN_DRV_4_8, 101, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(119, "TCON1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(119, "TCON1", 0, 102, UNIPHIER_PIN_DRV_4_8, 102, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(120, "TCON2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(120, "TCON2", 0, 103, UNIPHIER_PIN_DRV_4_8, 103, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(121, "TCON3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(121, "TCON3", 0, 104, UNIPHIER_PIN_DRV_4_8, 104, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(122, "TCON4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(122, "TCON4", 0, 105, UNIPHIER_PIN_DRV_4_8, 105, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(123, "TCON5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(123, "TCON5", 0, 106, UNIPHIER_PIN_DRV_4_8, 106, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(124, "TCON6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(124, "TCON6", 0, 107, UNIPHIER_PIN_DRV_4_8, 107, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(125, "TCON7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(125, "TCON7", 0, 108, UNIPHIER_PIN_DRV_4_8, 108, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(126, "TCON8", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(126, "TCON8", 0, 109, UNIPHIER_PIN_DRV_4_8, 109, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(127, "PWMA", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(127, "PWMA", 0, 110, UNIPHIER_PIN_DRV_4_8, 110, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(128, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(128, "XIRQ0", 0, 111, UNIPHIER_PIN_DRV_4_8, 111, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(129, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(129, "XIRQ1", 0, 112, UNIPHIER_PIN_DRV_4_8, 112, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(130, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(130, "XIRQ2", 0, 113, UNIPHIER_PIN_DRV_4_8, 113, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(131, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(131, "XIRQ3", 0, 114, UNIPHIER_PIN_DRV_4_8, 114, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(132, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(132, "XIRQ4", 0, 115, UNIPHIER_PIN_DRV_4_8, 115, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(133, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(133, "XIRQ5", 0, 116, UNIPHIER_PIN_DRV_4_8, 116, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(134, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(134, "XIRQ6", 0, 117, UNIPHIER_PIN_DRV_4_8, 117, UNIPHIER_PIN_PULL_DOWN), - UNIPHIER_PINCTRL_PIN(135, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE, + UNIPHIER_PINCTRL_PIN(135, "XIRQ7", 0, 118, UNIPHIER_PIN_DRV_4_8, 118, UNIPHIER_PIN_PULL_DOWN), }; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 01bf3476a791..a9567af7cec0 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = { AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, - AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), + AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)), AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, - AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)), + AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)), /* secondary switchable output of DCDC1 */ AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)), diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 7849187d91ae..8a34f6acc801 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) return 0; } + /* Did the lookup explicitly defer for us? */ + if (ret == -EPROBE_DEFER) + return ret; + if (have_full_constraints()) { r = dummy_regulator_rdev; } else { diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index add419d6ff34..a56a7b243e91 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -212,6 +212,17 @@ static const struct file_operations twa_fops = { .llseek = noop_llseek, }; +/* + * The controllers use an inline buffer instead of a mapped SGL for small, + * single entry buffers. Note that we treat a zero-length transfer like + * a mapped SGL. + */ +static bool twa_command_mapped(struct scsi_cmnd *cmd) +{ + return scsi_sg_count(cmd) != 1 || + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; +} + /* This function will complete an aen request from the isr */ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) { @@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) } /* Now complete the io */ - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) struct scsi_cmnd *cmd = tw_dev->srb[i]; cmd->result = (DID_RESET << 16); - scsi_dma_unmap(cmd); + if (twa_command_mapped(cmd)) + scsi_dma_unmap(cmd); cmd->scsi_done(cmd); } } @@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); switch (retval) { case SCSI_MLQUEUE_HOST_BUSY: - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); twa_free_request_id(tw_dev, request_id); break; case 1: SCpnt->result = (DID_ERROR << 16); - scsi_dma_unmap(SCpnt); + if (twa_command_mapped(SCpnt)) + scsi_dma_unmap(SCpnt); done(SCpnt); tw_dev->state[request_id] = TW_S_COMPLETED; twa_free_request_id(tw_dev, request_id); @@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, /* Map sglist from scsi layer to cmd packet */ if (scsi_sg_count(srb)) { - if ((scsi_sg_count(srb) == 1) && - (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { + if (!twa_command_mapped(srb)) { if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) scsi_sg_copy_to_buffer(srb, @@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re { struct scsi_cmnd *cmd = tw_dev->srb[request_id]; - if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && + if (!twa_command_mapped(cmd) && (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { if (scsi_sg_count(cmd) == 1) { diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 33c74d3436c9..6bffd91b973a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -976,13 +976,13 @@ static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) wake_up(&conn->ehwait); } -static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) +static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) { struct iscsi_nopout hdr; struct iscsi_task *task; if (!rhdr && conn->ping_task) - return; + return -EINVAL; memset(&hdr, 0, sizeof(struct iscsi_nopout)); hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; @@ -996,13 +996,16 @@ static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) hdr.ttt = RESERVED_ITT; task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); - if (!task) + if (!task) { iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); - else if (!rhdr) { + return -EIO; + } else if (!rhdr) { /* only track our nops */ conn->ping_task = task; conn->last_ping = jiffies; } + + return 0; } static int iscsi_nop_out_rsp(struct iscsi_task *task, @@ -2092,8 +2095,10 @@ static void iscsi_check_transport_timeouts(unsigned long data) if (time_before_eq(last_recv + recv_timeout, jiffies)) { /* send a ping to try to provoke some traffic */ ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); - iscsi_send_nopout(conn, NULL); - next_timeout = conn->last_ping + (conn->ping_timeout * HZ); + if (iscsi_send_nopout(conn, NULL)) + next_timeout = jiffies + (1 * HZ); + else + next_timeout = conn->last_ping + (conn->ping_timeout * HZ); } else next_timeout = last_recv + recv_timeout; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index edb044a7b56d..0a2168e69bbc 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -111,7 +111,7 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name) dh = __scsi_dh_lookup(name); if (!dh) { - request_module(name); + request_module("scsi_dh_%s", name); dh = __scsi_dh_lookup(name); } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index cbfc5990052b..126a48c6431e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req) static void scsi_mq_done(struct scsi_cmnd *cmd) { trace_scsi_dispatch_cmd_done(cmd); - blk_mq_complete_request(cmd->request); + blk_mq_complete_request(cmd->request, cmd->request->errors); } static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 3cf9faa6cc3f..a85d863d4a44 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev) goto free_master; } - dspi->irq = platform_get_irq(pdev, 0); - if (dspi->irq <= 0) { + ret = platform_get_irq(pdev, 0); + if (ret == 0) ret = -EINVAL; + if (ret < 0) goto free_master; - } + dspi->irq = ret; ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, dummy_thread_fn, 0, dev_name(&pdev->dev), dspi); diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c index 23685e74917e..bd2c69f85949 100644 --- a/drivers/staging/iio/accel/sca3000_ring.c +++ b/drivers/staging/iio/accel/sca3000_ring.c @@ -116,7 +116,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r, if (ret) goto error_ret; - for (i = 0; i < num_read; i++) + for (i = 0; i < num_read / sizeof(u16); i++) *(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i); if (copy_to_user(buf, rx, num_read)) diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c index 3f7715c9968b..47fc00a3f63b 100644 --- a/drivers/staging/iio/adc/mxs-lradc.c +++ b/drivers/staging/iio/adc/mxs-lradc.c @@ -915,11 +915,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev, case IIO_CHAN_INFO_OFFSET: if (chan->type == IIO_TEMP) { /* The calculated value from the ADC is in Kelvin, we - * want Celsius for hwmon so the offset is - * -272.15 * scale + * want Celsius for hwmon so the offset is -273.15 + * The offset is applied before scaling so it is + * actually -213.15 * 4 / 1.012 = -1079.644268 */ - *val = -1075; - *val2 = 691699; + *val = -1079; + *val2 = 644268; return IIO_VAL_INT_PLUS_MICRO; } diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 769b61193d87..a9bc6e23fc25 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) prefetchw(&page->flags); ret = add_to_page_cache_lru(page, inode->i_mapping, offset, - GFP_KERNEL); + GFP_NOFS); if (ret == 0) { unlock_page(page); } else { diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index 4299cf45f947..5e1f16c36b49 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c @@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void) __this_cpu_write(reporting_keystroke, true); input_report_key(virt_keyboard, KEY_DOWN, PRESSED); input_report_key(virt_keyboard, KEY_DOWN, RELEASED); + input_sync(virt_keyboard); __this_cpu_write(reporting_keystroke, false); /* reenable preemption */ diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 7ff96270c933..e570ff084add 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -144,6 +144,16 @@ static void estimate_pid_constants(struct thermal_zone_device *tz, switch_on_temp = 0; temperature_threshold = control_temp - switch_on_temp; + /* + * estimate_pid_constants() tries to find appropriate default + * values for thermal zones that don't provide them. If a + * system integrator has configured a thermal zone with two + * passive trip points at the same temperature, that person + * hasn't put any effort to set up the thermal zone properly + * so just give up. + */ + if (!temperature_threshold) + return; if (!tz->tzp->k_po || force) tz->tzp->k_po = int_to_frac(sustainable_power) / diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 0bae8cc6c23a..ca920b0ecf8f 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -932,7 +932,7 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, if (data->soc == SOC_ARCH_EXYNOS5260) emul_con = EXYNOS5260_EMUL_CON; - if (data->soc == SOC_ARCH_EXYNOS5433) + else if (data->soc == SOC_ARCH_EXYNOS5433) emul_con = EXYNOS5433_TMU_EMUL_CON; else if (data->soc == SOC_ARCH_EXYNOS7) emul_con = EXYNOS7_TMU_REG_EMUL_CON; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 20932cc9c8f7..b09023b07169 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty) spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status |= TIOCPKT_FLUSHREAD; spin_unlock_irqrestore(&tty->ctrl_lock, flags); - if (waitqueue_active(&tty->link->read_wait)) - wake_up_interruptible(&tty->link->read_wait); + wake_up_interruptible(&tty->link->read_wait); } } @@ -1382,8 +1381,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); return 0; } } @@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible_poll(&tty->read_wait, POLLIN); + wake_up_interruptible_poll(&tty->read_wait, POLLIN); } } @@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) } /* The termios change make the tty ready for I/O */ - if (waitqueue_active(&tty->write_wait)) - wake_up_interruptible(&tty->write_wait); - if (waitqueue_active(&tty->read_wait)) - wake_up_interruptible(&tty->read_wait); + wake_up_interruptible(&tty->write_wait); + wake_up_interruptible(&tty->read_wait); } /** diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 21d01a491405..e508939daea3 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -80,10 +80,6 @@ int serial8250_tx_dma(struct uart_8250_port *p) return 0; dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); - if (dma->tx_size < p->port.fifosize) { - ret = -EINVAL; - goto err; - } desc = dmaengine_prep_slave_single(dma->txchan, dma->tx_addr + xmit->tail, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index b1e0ba3e525b..0bbf34035d6a 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -261,6 +261,14 @@ configured less than Maximum supported fifo bytes */ UART_FCR7_64BYTE, .flags = UART_CAP_FIFO, }, + [PORT_RT2880] = { + .name = "Palmchip BK-3103", + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, + }, }; /* Uart divisor latch read */ diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 5ca5cf3e9359..538ea03bc101 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -2786,7 +2786,7 @@ static int atmel_serial_probe(struct platform_device *pdev) ret = atmel_init_gpios(port, &pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "Failed to initialize GPIOs."); - goto err; + goto err_clear_bit; } ret = atmel_init_port(port, pdev); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index fe3d41cc8416..d0388a071ba1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1631,12 +1631,12 @@ imx_console_write(struct console *co, const char *s, unsigned int count) int locked = 1; int retval; - retval = clk_prepare_enable(sport->clk_per); + retval = clk_enable(sport->clk_per); if (retval) return; - retval = clk_prepare_enable(sport->clk_ipg); + retval = clk_enable(sport->clk_ipg); if (retval) { - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_per); return; } @@ -1675,8 +1675,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) if (locked) spin_unlock_irqrestore(&sport->port.lock, flags); - clk_disable_unprepare(sport->clk_ipg); - clk_disable_unprepare(sport->clk_per); + clk_disable(sport->clk_ipg); + clk_disable(sport->clk_per); } /* @@ -1777,7 +1777,15 @@ imx_console_setup(struct console *co, char *options) retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); - clk_disable_unprepare(sport->clk_ipg); + clk_disable(sport->clk_ipg); + if (retval) { + clk_unprepare(sport->clk_ipg); + goto error_console; + } + + retval = clk_prepare(sport->clk_per); + if (retval) + clk_disable_unprepare(sport->clk_ipg); error_console: return retval; diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 5a3fa8913880..a660ab181cca 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -242,7 +242,10 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) atomic_inc(&buf->priority); mutex_lock(&buf->lock); - while ((next = buf->head->next) != NULL) { + /* paired w/ release in __tty_buffer_request_room; ensures there are + * no pending memory accesses to the freed buffer + */ + while ((next = smp_load_acquire(&buf->head->next)) != NULL) { tty_buffer_free(port, buf->head); buf->head = next; } @@ -290,7 +293,10 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, if (n != NULL) { n->flags = flags; buf->tail = n; - b->commit = b->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&b->commit, b->used); /* paired w/ acquire in flush_to_ldisc(); ensures the * latest commit value can be read before the head is * advanced to the next buffer @@ -393,7 +399,10 @@ void tty_schedule_flip(struct tty_port *port) { struct tty_bufhead *buf = &port->buf; - buf->tail->commit = buf->tail->used; + /* paired w/ acquire in flush_to_ldisc(); ensures + * flush_to_ldisc() sees buffer data. + */ + smp_store_release(&buf->tail->commit, buf->tail->used); schedule_work(&buf->work); } EXPORT_SYMBOL(tty_schedule_flip); @@ -467,7 +476,7 @@ static void flush_to_ldisc(struct work_struct *work) struct tty_struct *tty; struct tty_ldisc *disc; - tty = port->itty; + tty = READ_ONCE(port->itty); if (tty == NULL) return; @@ -491,7 +500,10 @@ static void flush_to_ldisc(struct work_struct *work) * is advancing to the next buffer */ next = smp_load_acquire(&head->next); - count = head->commit - head->read; + /* paired w/ release in __tty_buffer_request_room() or in + * tty_buffer_flush(); ensures we see the committed buffer data + */ + count = smp_load_acquire(&head->commit) - head->read; if (!count) { if (next == NULL) { check_other_closed(tty); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 02785d844354..2eefaa6e3e3a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2128,8 +2128,24 @@ retry_open: if (!noctty && current->signal->leader && !current->signal->tty && - tty->session == NULL) - __proc_set_tty(tty); + tty->session == NULL) { + /* + * Don't let a process that only has write access to the tty + * obtain the privileges associated with having a tty as + * controlling terminal (being able to reopen it with full + * access through /dev/tty, being able to perform pushback). + * Many distributions set the group of all ttys to "tty" and + * grant write-only access to all terminals for setgid tty + * binaries, which should not imply full privileges on all ttys. + * + * This could theoretically break old code that performs open() + * on a write-only file descriptor. In that case, it might be + * necessary to also permit this if + * inode_permission(inode, MAY_READ) == 0. + */ + if (filp->f_mode & FMODE_READ) + __proc_set_tty(tty); + } spin_unlock_irq(¤t->sighand->siglock); read_unlock(&tasklist_lock); tty_unlock(tty); @@ -2418,7 +2434,7 @@ static int fionbio(struct file *file, int __user *p) * Takes ->siglock() when updating signal->tty */ -static int tiocsctty(struct tty_struct *tty, int arg) +static int tiocsctty(struct tty_struct *tty, struct file *file, int arg) { int ret = 0; @@ -2452,6 +2468,13 @@ static int tiocsctty(struct tty_struct *tty, int arg) goto unlock; } } + + /* See the comment in tty_open(). */ + if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) { + ret = -EPERM; + goto unlock; + } + proc_set_tty(tty); unlock: read_unlock(&tasklist_lock); @@ -2844,7 +2867,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) no_tty(); return 0; case TIOCSCTTY: - return tiocsctty(tty, arg); + return tiocsctty(tty, file, arg); case TIOCGPGRP: return tiocgpgrp(tty, real_tty, p); case TIOCSPGRP: @@ -3151,13 +3174,18 @@ struct class *tty_class; static int tty_cdev_add(struct tty_driver *driver, dev_t dev, unsigned int index, unsigned int count) { + int err; + /* init here, since reused cdevs cause crashes */ driver->cdevs[index] = cdev_alloc(); if (!driver->cdevs[index]) return -ENOMEM; - cdev_init(driver->cdevs[index], &tty_fops); + driver->cdevs[index]->ops = &tty_fops; driver->cdevs[index]->owner = driver->owner; - return cdev_add(driver->cdevs[index], dev, count); + err = cdev_add(driver->cdevs[index], dev, count); + if (err) + kobject_put(&driver->cdevs[index]->kobj); + return err; } /** diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index d85abfed84cc..f5a381945db2 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech ConferenceCam CC3000e */ + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT }, + + /* Logitech PTZ Pro Camera */ + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = { /* Philips PSC805 audio device */ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Plantronic Audio 655 DSP */ + { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* Plantronic Audio 648 USB */ + { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Artisman Watchdog Dongle */ { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index d1b81539d632..d6199507f861 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -159,8 +159,10 @@ static int ep_bd_list_alloc(struct bdc_ep *ep) bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, GFP_ATOMIC, &dma); - if (!bd_table->start_bd) + if (!bd_table->start_bd) { + kfree(bd_table); goto fail; + } bd_table->dma = dma; diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index c79d33676672..c47d3e480586 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -147,6 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { xhci->quirks |= XHCI_SPURIOUS_REBOOT; + xhci->quirks |= XHCI_SPURIOUS_WAKEUP; } if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 43291f93afeb..97ffe3997273 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2191,6 +2191,10 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, } /* Fast path - was this the last TRB in the TD for this URB? */ } else if (event_trb == td->last_trb) { + if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX) + return finish_td(xhci, td, event_trb, event, ep, + status, false); + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - @@ -2242,6 +2246,12 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); + + if (trb_comp_code == COMP_SHORT_TX) { + xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n"); + td->urb_length_set = true; + return 0; + } } return finish_td(xhci, td, event_trb, event, ep, status, false); @@ -2274,6 +2284,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, u32 trb_comp_code; int ret = 0; int td_num = 0; + bool handling_skipped_tds = false; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); xdev = xhci->devs[slot_id]; @@ -2410,6 +2421,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = true; xhci_dbg(xhci, "Miss service interval error, set skip flag\n"); goto cleanup; + case COMP_PING_ERR: + ep->skip = true; + xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n"); + goto cleanup; default: if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { status = 0; @@ -2546,13 +2561,18 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep, &status); cleanup: + + + handling_skipped_tds = ep->skip && + trb_comp_code != COMP_MISSED_INT && + trb_comp_code != COMP_PING_ERR; + /* - * Do not update event ring dequeue pointer if ep->skip is set. - * Will roll back to continue process missed tds. + * Do not update event ring dequeue pointer if we're in a loop + * processing missed tds. */ - if (trb_comp_code == COMP_MISSED_INT || !ep->skip) { + if (!handling_skipped_tds) inc_deq(xhci, xhci->event_ring); - } if (ret) { urb = td->urb; @@ -2587,7 +2607,7 @@ cleanup: * Process them as short transfer until reach the td pointed by * the event. */ - } while (ep->skip && trb_comp_code != COMP_MISSED_INT); + } while (handling_skipped_tds); return 0; } diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 3ad5d19e4d04..23c794813e6a 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data, if (this_time > max) this_time = max; - memcpy(data, dev->buf, this_time); + memcpy(data, dev->buf + dev->used, this_time); dev->used += this_time; diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 7b98e1d9194c..d82fa36c3465 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -476,6 +476,11 @@ static const struct of_device_id usbhs_of_match[] = { .compatible = "renesas,usbhs-r8a7794", .data = (void *)USBHS_TYPE_RCAR_GEN2, }, + { + /* Gen3 is compatible with Gen2 */ + .compatible = "renesas,usbhs-r8a7795", + .data = (void *)USBHS_TYPE_RCAR_GEN2, + }, { }, }; MODULE_DEVICE_TABLE(of, usbhs_of_match); @@ -493,7 +498,7 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) return NULL; dparam = &info->driver_param; - dparam->type = of_id ? (u32)of_id->data : 0; + dparam->type = of_id ? (uintptr_t)of_id->data : 0; if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) dparam->buswait_bwait = tmp; gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0, diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 1aaf89300621..92f394927f24 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -1093,6 +1093,7 @@ static void fbcon_init(struct vc_data *vc, int init) con_copy_unimap(vc, svc); ops = info->fbcon_par; + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); p->con_rotate = initial_rotation; set_blitting_type(vc, info); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 0e5fde1d3ffb..9f9a7bef1ff6 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { dev_err(dev, "Invalid waveform\n"); err = -EINVAL; - goto err_failed; + goto err_fw; } mutex_lock(&(par->io_lock)); @@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev, mutex_unlock(&(par->io_lock)); if (err < 0) { dev_err(dev, "Failed to store broadsheet waveform\n"); - goto err_failed; + goto err_fw; } dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); - return len; + err = len; +err_fw: + release_firmware(fw_entry); err_failed: return err; } diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index 7fa2e6f9e322..b335c1ae8625 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state) static int fsl_diu_resume(struct platform_device *ofdev) { struct fsl_diu_data *data; + unsigned int i; data = dev_get_drvdata(&ofdev->dev); - enable_lcdc(data->fsl_diu_info); + + fsl_diu_enable_interrupts(data); + update_lcdc(data->fsl_diu_info); + for (i = 0; i < NUM_AOIS; i++) { + if (data->mfb[i].count) + fsl_diu_enable_panel(&data->fsl_diu_info[i]); + } return 0; } diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c index 9b8bebdf8f86..f9ec5c0484fa 100644 --- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c @@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = { { .compatible = "fujitsu,coral", }, { /* end */ } }; +MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl); static struct platform_driver of_platform_mb862xxfb_driver = { .driver = { diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c index a8ce920fa797..d811e6dcaef7 100644 --- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c +++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c @@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev) adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); if (adapter_node) { - adapter = of_find_i2c_adapter_by_node(adapter_node); + adapter = of_get_i2c_adapter_by_node(adapter_node); if (adapter == NULL) { dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); omap_dss_put_device(ddata->in); diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c index 90cbc4c3406c..c581231c74a5 100644 --- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c @@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = { { .compatible = "omapdss,sony,acx565akm", }, {}, }; +MODULE_DEVICE_TABLE(of, acx565akm_of_match); static struct spi_driver acx565akm_driver = { .driver = { diff --git a/drivers/video/fbdev/tridentfb.c b/drivers/video/fbdev/tridentfb.c index 7ed9a227f5ea..01b43e9ce941 100644 --- a/drivers/video/fbdev/tridentfb.c +++ b/drivers/video/fbdev/tridentfb.c @@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data, writemmr(par, DST1, point(x, y)); writemmr(par, DST2, point(x + w - 1, y + h - 1)); - memcpy(par->io_virt + 0x10000, data, 4 * size); + iowrite32_rep(par->io_virt + 0x10000, data, size); } static void blade_copy_rect(struct tridentfb_par *par, @@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par) static inline void set_lwidth(struct tridentfb_par *par, int width) { write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); - write3X4(par, AddColReg, - (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); + /* chips older than TGUI9660 have only 1 width bit in AddColReg */ + /* touching the other one breaks I2C/DDC */ + if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320) + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4)); + else + write3X4(par, AddColReg, + (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); } /* For resolutions smaller than FP resolution stretch */ diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c index 32d8275e4c88..8a1076beecd3 100644 --- a/drivers/video/of_display_timing.c +++ b/drivers/video/of_display_timing.c @@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np) */ pr_err("%s: error in timing %d\n", of_node_full_name(np), disp->num_timings + 1); + kfree(dt); goto timingfail; } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index c68edc16aa54..79e1aa1b0959 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -817,8 +817,9 @@ config ITCO_WDT tristate "Intel TCO Timer/Watchdog" depends on (X86 || IA64) && PCI select WATCHDOG_CORE + depends on I2C || I2C=n select LPC_ICH if !EXPERT - select I2C_I801 if !EXPERT + select I2C_I801 if !EXPERT && I2C ---help--- Hardware driver for the intel TCO timer based watchdog devices. These drivers are included in the Intel 82801 I/O Controller diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c index 66c3e656a616..8a5ce5b5a0b6 100644 --- a/drivers/watchdog/bcm2835_wdt.c +++ b/drivers/watchdog/bcm2835_wdt.c @@ -36,6 +36,13 @@ #define PM_RSTC_WRCFG_FULL_RESET 0x00000020 #define PM_RSTC_RESET 0x00000102 +/* + * The Raspberry Pi firmware uses the RSTS register to know which partiton + * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10. + * Partiton 63 is a special partition used by the firmware to indicate halt. + */ +#define PM_RSTS_RASPBERRYPI_HALT 0x555 + #define SECS_TO_WDOG_TICKS(x) ((x) << 16) #define WDOG_TICKS_TO_SECS(x) ((x) >> 16) @@ -151,8 +158,7 @@ static void bcm2835_power_off(void) * hard reset. */ val = readl_relaxed(wdt->base + PM_RSTS); - val &= PM_RSTC_WRCFG_CLR; - val |= PM_PASSWORD | PM_RSTS_HADWRH_SET; + val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT; writel_relaxed(val, wdt->base + PM_RSTS); /* Continue with normal reset mechanism */ diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index cc1bdfc2ff71..006e2348022c 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, gef_wdt_ids); static struct platform_driver gef_wdt_driver = { .driver = { diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index 69013007dc47..098fa9c34d6d 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c @@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = { { .compatible = "men,a021-wdt" }, { }, }; +MODULE_DEVICE_TABLE(of, a21_wdt_ids); static struct platform_driver a21_wdt_driver = { .probe = a21_wdt_probe, diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c index 2789da2c0515..60b0605bd7e6 100644 --- a/drivers/watchdog/moxart_wdt.c +++ b/drivers/watchdog/moxart_wdt.c @@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = { { .compatible = "moxa,moxart-watchdog" }, { }, }; +MODULE_DEVICE_TABLE(of, moxart_watchdog_match); static struct platform_driver moxart_wdt_driver = { .probe = moxart_wdt_probe, diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index ecbc63d3143e..9a2ec79e8cfb 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1828,7 +1828,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, int found = 0; struct extent_buffer *eb; struct btrfs_inode_extref *extref; - struct extent_buffer *leaf; u32 item_size; u32 cur_offset; unsigned long ptr; @@ -1856,9 +1855,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); btrfs_release_path(path); - leaf = path->nodes[0]; - item_size = btrfs_item_size_nr(leaf, slot); - ptr = btrfs_item_ptr_offset(leaf, slot); + item_size = btrfs_item_size_nr(eb, slot); + ptr = btrfs_item_ptr_offset(eb, slot); cur_offset = 0; while (cur_offset < item_size) { @@ -1872,7 +1870,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, if (ret) break; - cur_offset += btrfs_inode_extref_name_len(leaf, extref); + cur_offset += btrfs_inode_extref_name_len(eb, extref); cur_offset += sizeof(*extref); } btrfs_tree_read_unlock_blocking(eb); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 295795aebe0b..1e60d00d4ea7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2847,6 +2847,8 @@ int open_ctree(struct super_block *sb, !extent_buffer_uptodate(chunk_root->node)) { printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); + if (!IS_ERR(chunk_root->node)) + free_extent_buffer(chunk_root->node); chunk_root->node = NULL; goto fail_tree_roots; } @@ -2885,6 +2887,8 @@ retry_root_backup: !extent_buffer_uptodate(tree_root->node)) { printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", sb->s_id); + if (!IS_ERR(tree_root->node)) + free_extent_buffer(tree_root->node); tree_root->node = NULL; goto recovery_tree_root; } diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 8d052209f473..2513a7f53334 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -112,11 +112,11 @@ static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh, u32 generation; if (fh_type == FILEID_BTRFS_WITH_PARENT) { - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE) + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE) return NULL; root_objectid = fid->root_objectid; } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) return NULL; root_objectid = fid->parent_root_objectid; } else @@ -136,11 +136,11 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, u32 generation; if ((fh_type != FILEID_BTRFS_WITH_PARENT || - fh_len != BTRFS_FID_SIZE_CONNECTABLE) && + fh_len < BTRFS_FID_SIZE_CONNECTABLE) && (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || - fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) && + fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) && (fh_type != FILEID_BTRFS_WITHOUT_PARENT || - fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE)) + fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE)) return NULL; objectid = fid->objectid; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9f9604201333..601d7d45d164 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2828,6 +2828,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head; int ret; int run_all = count == (unsigned long)-1; + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2844,6 +2845,7 @@ again: #ifdef SCRAMBLE_DELAYED_REFS delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); #endif + trans->can_flush_pending_bgs = false; ret = __btrfs_run_delayed_refs(trans, root, count); if (ret < 0) { btrfs_abort_transaction(trans, root, ret); @@ -2893,6 +2895,7 @@ again: } out: assert_qgroups_uptodate(trans); + trans->can_flush_pending_bgs = can_flush_pending_bgs; return 0; } @@ -4306,7 +4309,8 @@ out: * the block groups that were made dirty during the lifetime of the * transaction. */ - if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { + if (trans->can_flush_pending_bgs && + trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { btrfs_create_pending_block_groups(trans, trans->root); btrfs_trans_release_chunk_metadata(trans); } @@ -9560,7 +9564,9 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, struct btrfs_block_group_item item; struct btrfs_key key; int ret = 0; + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; + trans->can_flush_pending_bgs = false; list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { if (ret) goto next; @@ -9581,6 +9587,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, next: list_del_init(&block_group->bg_list); } + trans->can_flush_pending_bgs = can_flush_pending_bgs; } int btrfs_make_block_group(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e2357e31609a..3915c9473e94 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3132,12 +3132,12 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, - unsigned long *bio_flags, int rw) + unsigned long *bio_flags, int rw, + u64 *prev_em_start) { struct inode *inode; struct btrfs_ordered_extent *ordered; int index; - u64 prev_em_start = (u64)-1; inode = pages[0]->mapping->host; while (1) { @@ -3153,7 +3153,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, for (index = 0; index < nr_pages; index++) { __do_readpage(tree, pages[index], get_extent, em_cached, bio, - mirror_num, bio_flags, rw, &prev_em_start); + mirror_num, bio_flags, rw, prev_em_start); page_cache_release(pages[index]); } } @@ -3163,7 +3163,8 @@ static void __extent_readpages(struct extent_io_tree *tree, int nr_pages, get_extent_t *get_extent, struct extent_map **em_cached, struct bio **bio, int mirror_num, - unsigned long *bio_flags, int rw) + unsigned long *bio_flags, int rw, + u64 *prev_em_start) { u64 start = 0; u64 end = 0; @@ -3184,7 +3185,7 @@ static void __extent_readpages(struct extent_io_tree *tree, index - first_index, start, end, get_extent, em_cached, bio, mirror_num, bio_flags, - rw); + rw, prev_em_start); start = page_start; end = start + PAGE_CACHE_SIZE - 1; first_index = index; @@ -3195,7 +3196,8 @@ static void __extent_readpages(struct extent_io_tree *tree, __do_contiguous_readpages(tree, &pages[first_index], index - first_index, start, end, get_extent, em_cached, bio, - mirror_num, bio_flags, rw); + mirror_num, bio_flags, rw, + prev_em_start); } static int __extent_read_full_page(struct extent_io_tree *tree, @@ -4207,6 +4209,7 @@ int extent_readpages(struct extent_io_tree *tree, struct page *page; struct extent_map *em_cached = NULL; int nr = 0; + u64 prev_em_start = (u64)-1; for (page_idx = 0; page_idx < nr_pages; page_idx++) { page = list_entry(pages->prev, struct page, lru); @@ -4223,12 +4226,12 @@ int extent_readpages(struct extent_io_tree *tree, if (nr < ARRAY_SIZE(pagepool)) continue; __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, - &bio, 0, &bio_flags, READ); + &bio, 0, &bio_flags, READ, &prev_em_start); nr = 0; } if (nr) __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, - &bio, 0, &bio_flags, READ); + &bio, 0, &bio_flags, READ, &prev_em_start); if (em_cached) free_extent_map(em_cached); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b823fac91c92..8c6f247ba81d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -2584,7 +2584,7 @@ static long btrfs_fallocate(struct file *file, int mode, alloc_start); if (ret) goto out; - } else { + } else if (offset + len > inode->i_size) { /* * If we are fallocating from the end of the file onward we * need to zero out the end of the page if i_size lands in the diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0adf5422fce9..8d20f3b1cab0 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -4639,6 +4639,11 @@ locked: bctl->flags |= BTRFS_BALANCE_TYPE_MASK; } + if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { + ret = -EINVAL; + goto out_bctl; + } + do_balance: /* * Ownership of bctl and mutually_exclusive_operation_running @@ -4650,12 +4655,15 @@ do_balance: need_unlock = false; ret = btrfs_balance(bctl, bargs); + bctl = NULL; if (arg) { if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; } +out_bctl: + kfree(bctl); out_bargs: kfree(bargs); out_unlock: diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index aa72bfd28f7d..a739b825bdd3 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1920,10 +1920,12 @@ static int did_overwrite_ref(struct send_ctx *sctx, /* * We know that it is or will be overwritten. Check this now. * The current inode being processed might have been the one that caused - * inode 'ino' to be orphanized, therefore ow_inode can actually be the - * same as sctx->send_progress. + * inode 'ino' to be orphanized, therefore check if ow_inode matches + * the current inode being processed. */ - if (ow_inode <= sctx->send_progress) + if ((ow_inode < sctx->send_progress) || + (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && + gen == sctx->cur_inode_gen)) ret = 1; else ret = 0; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 74bc3338418b..a5b06442f0bf 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -557,6 +557,7 @@ again: h->delayed_ref_elem.seq = 0; h->type = type; h->allocating_chunk = false; + h->can_flush_pending_bgs = true; h->reloc_reserved = false; h->sync = false; INIT_LIST_HEAD(&h->qgroup_ref_list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 87964bf8892d..a994bb097ee5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -118,6 +118,7 @@ struct btrfs_trans_handle { short aborted; short adding_csums; bool allocating_chunk; + bool can_flush_pending_bgs; bool reloc_reserved; bool sync; unsigned int type; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 2ca784a14e84..595279a8b99f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -376,6 +376,14 @@ struct map_lookup { #define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4) #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) +#define BTRFS_BALANCE_ARGS_MASK \ + (BTRFS_BALANCE_ARGS_PROFILES | \ + BTRFS_BALANCE_ARGS_USAGE | \ + BTRFS_BALANCE_ARGS_DEVID | \ + BTRFS_BALANCE_ARGS_DRANGE | \ + BTRFS_BALANCE_ARGS_VRANGE | \ + BTRFS_BALANCE_ARGS_LIMIT) + /* * Profile changing flags. When SOFT is set we won't relocate chunk if * it already has the target profile (even though it may be diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 27aea110e923..c3cc1609025f 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.07" +#define CIFS_VERSION "2.08" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e2a6af1508af..62203c387db4 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, struct page *page, *tpage; unsigned int expected_index; int rc; + gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); INIT_LIST_HEAD(tmplist); @@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, */ __set_page_locked(page); rc = add_to_page_cache_locked(page, mapping, - page->index, GFP_KERNEL); + page->index, gfp); /* give up if we can't stick it in the cache */ if (rc) { @@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; __set_page_locked(page); - if (add_to_page_cache_locked(page, mapping, page->index, - GFP_KERNEL)) { + if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { __clear_page_locked(page); break; } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index f621b44cb800..6b66dd5d1540 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, struct tcon_link *tlink = NULL; struct cifs_tcon *tcon = NULL; struct TCP_Server_Info *server; - struct cifs_io_parms io_parms; /* * To avoid spurious oplock breaks from server, in the case of @@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, rc = -ENOSYS; cifsFileInfo_put(open_file); cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { - unsigned int bytes_written; - - io_parms.netfid = open_file->fid.netfid; - io_parms.pid = open_file->pid; - io_parms.tcon = tcon; - io_parms.offset = 0; - io_parms.length = attrs->ia_size; - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, - NULL, NULL, 1); - cifs_dbg(FYI, "Wrt seteof rc %d\n", rc); - } } else rc = -EINVAL; @@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, else rc = -ENOSYS; cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { - __u16 netfid; - int oplock = 0; - rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN, - GENERIC_WRITE, CREATE_NOT_DIR, &netfid, - &oplock, NULL, cifs_sb->local_nls, - cifs_remap(cifs_sb)); - if (rc == 0) { - unsigned int bytes_written; - - io_parms.netfid = netfid; - io_parms.pid = current->tgid; - io_parms.tcon = tcon; - io_parms.offset = 0; - io_parms.length = attrs->ia_size; - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL, - NULL, 1); - cifs_dbg(FYI, "wrt seteof rc %d\n", rc); - CIFSSMBClose(xid, tcon, netfid); - } - } if (tlink) cifs_put_tlink(tlink); diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index ce83e2edbe0a..597a417ba94d 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -922,7 +922,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, if (tcon && tcon->bad_network_name) return -ENOENT; - if ((tcon->seal) && + if ((tcon && tcon->seal) && ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { cifs_dbg(VFS, "encryption requested but no server support"); return -EOPNOTSUPP; diff --git a/fs/dax.c b/fs/dax.c index 7ae6df7ea1d2..a86d3cc2b389 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh, static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, struct vm_area_struct *vma, struct vm_fault *vmf) { + struct address_space *mapping = inode->i_mapping; sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); unsigned long vaddr = (unsigned long)vmf->virtual_address; void __pmem *addr; @@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, pgoff_t size; int error; + i_mmap_lock_read(mapping); + /* * Check truncate didn't happen while we were allocating a block. * If it did, this block may or may not be still allocated to the @@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, error = vm_insert_mixed(vma, vaddr, pfn); out: + i_mmap_unlock_read(mapping); + return error; } @@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, * from a read fault and we've raced with a truncate */ error = -EIO; - goto unlock; + goto unlock_page; } - } else { - i_mmap_lock_write(mapping); } error = get_block(inode, block, &bh, 0); if (!error && (bh.b_size < PAGE_SIZE)) error = -EIO; /* fs corruption? */ if (error) - goto unlock; + goto unlock_page; if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { if (vmf->flags & FAULT_FLAG_WRITE) { @@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (!error && (bh.b_size < PAGE_SIZE)) error = -EIO; if (error) - goto unlock; + goto unlock_page; } else { - i_mmap_unlock_write(mapping); return dax_load_hole(mapping, page, vmf); } } @@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, else clear_user_highpage(new_page, vaddr); if (error) - goto unlock; + goto unlock_page; vmf->page = page; if (!page) { + i_mmap_lock_read(mapping); /* Check we didn't race with truncate */ size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; if (vmf->pgoff >= size) { + i_mmap_unlock_read(mapping); error = -EIO; - goto unlock; + goto out; } } return VM_FAULT_LOCKED; @@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); } - if (!page) - i_mmap_unlock_write(mapping); out: if (error == -ENOMEM) return VM_FAULT_OOM | major; @@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return VM_FAULT_SIGBUS | major; return VM_FAULT_NOPAGE | major; - unlock: + unlock_page: if (page) { unlock_page(page); page_cache_release(page); - } else { - i_mmap_unlock_write(mapping); } - goto out; } EXPORT_SYMBOL(__dax_fault); @@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); bh.b_size = PMD_SIZE; - i_mmap_lock_write(mapping); length = get_block(inode, block, &bh, write); if (length) return VM_FAULT_SIGBUS; + i_mmap_lock_read(mapping); /* * If the filesystem isn't willing to tell us the length of a hole, @@ -569,24 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) goto fallback; - if (buffer_unwritten(&bh) || buffer_new(&bh)) { - int i; - for (i = 0; i < PTRS_PER_PMD; i++) - clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); - wmb_pmem(); - count_vm_event(PGMAJFAULT); - mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); - result |= VM_FAULT_MAJOR; - } - /* * If we allocated new storage, make sure no process has any * zero pages covering this hole */ if (buffer_new(&bh)) { - i_mmap_unlock_write(mapping); + i_mmap_unlock_read(mapping); unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); - i_mmap_lock_write(mapping); + i_mmap_lock_read(mapping); } /* @@ -633,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) goto fallback; + if (buffer_unwritten(&bh) || buffer_new(&bh)) { + int i; + for (i = 0; i < PTRS_PER_PMD; i++) + clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE); + wmb_pmem(); + count_vm_event(PGMAJFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); + result |= VM_FAULT_MAJOR; + } + result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); } out: + i_mmap_unlock_read(mapping); + if (buffer_unwritten(&bh)) complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); - i_mmap_unlock_write(mapping); - return result; fallback: diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 47728da7702c..b46e9fc64196 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -63,7 +63,7 @@ config EXT4_FS If unsure, say N. config EXT4_USE_FOR_EXT2 - bool "Use ext4 for ext2/ext3 file systems" + bool "Use ext4 for ext2 file systems" depends on EXT4_FS depends on EXT2_FS=n default y diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index e26803fb210d..560af0437704 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping, if (pages) { page = list_entry(pages->prev, struct page, lru); list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) + if (add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) goto next_page; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 091a36444972..29e4599f6fc1 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -778,19 +778,24 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi, struct wb_writeback_work *base_work, bool skip_if_busy) { - int next_memcg_id = 0; - struct bdi_writeback *wb; - struct wb_iter iter; + struct bdi_writeback *last_wb = NULL; + struct bdi_writeback *wb = list_entry_rcu(&bdi->wb_list, + struct bdi_writeback, bdi_node); might_sleep(); restart: rcu_read_lock(); - bdi_for_each_wb(wb, bdi, &iter, next_memcg_id) { + list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) { DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done); struct wb_writeback_work fallback_work; struct wb_writeback_work *work; long nr_pages; + if (last_wb) { + wb_put(last_wb); + last_wb = NULL; + } + /* SYNC_ALL writes out I_DIRTY_TIME too */ if (!wb_has_dirty_io(wb) && (base_work->sync_mode == WB_SYNC_NONE || @@ -819,12 +824,22 @@ restart: wb_queue_work(wb, work); - next_memcg_id = wb->memcg_css->id + 1; + /* + * Pin @wb so that it stays on @bdi->wb_list. This allows + * continuing iteration from @wb after dropping and + * regrabbing rcu read lock. + */ + wb_get(wb); + last_wb = wb; + rcu_read_unlock(); wb_wait_for_completion(bdi, &fallback_work_done); goto restart; } rcu_read_unlock(); + + if (last_wb) + wb_put(last_wb); } #else /* CONFIG_CGROUP_WRITEBACK */ @@ -1857,12 +1872,11 @@ void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { struct bdi_writeback *wb; - struct wb_iter iter; if (!bdi_has_dirty_io(bdi)) continue; - bdi_for_each_wb(wb, bdi, &iter, 0) + list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) wb_start_writeback(wb, wb_split_bdi_pages(wb, nr_pages), false, reason); } @@ -1894,11 +1908,10 @@ static void wakeup_dirtytime_writeback(struct work_struct *w) rcu_read_lock(); list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { struct bdi_writeback *wb; - struct wb_iter iter; - bdi_for_each_wb(wb, bdi, &iter, 0) - if (!list_empty(&bdi->wb.b_dirty_time)) - wb_wakeup(&bdi->wb); + list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) + if (!list_empty(&wb->b_dirty_time)) + wb_wakeup(wb); } rcu_read_unlock(); schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); diff --git a/fs/mpage.c b/fs/mpage.c index 778a4ddef77a..a7c34274f207 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -139,7 +139,8 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) static struct bio * do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, sector_t *last_block_in_bio, struct buffer_head *map_bh, - unsigned long *first_logical_block, get_block_t get_block) + unsigned long *first_logical_block, get_block_t get_block, + gfp_t gfp) { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; @@ -277,8 +278,7 @@ alloc_new: goto out; } bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), - min_t(int, nr_pages, BIO_MAX_PAGES), - GFP_KERNEL); + min_t(int, nr_pages, BIO_MAX_PAGES), gfp); if (bio == NULL) goto confused; } @@ -361,6 +361,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; + gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping); map_bh.b_state = 0; map_bh.b_size = 0; @@ -370,12 +371,13 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, prefetchw(&page->flags); list_del(&page->lru); if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + page->index, + gfp)) { bio = do_mpage_readpage(bio, page, nr_pages - page_idx, &last_block_in_bio, &map_bh, &first_logical_block, - get_block); + get_block, gfp); } page_cache_release(page); } @@ -395,11 +397,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) sector_t last_block_in_bio = 0; struct buffer_head map_bh; unsigned long first_logical_block = 0; + gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(page->mapping); map_bh.b_state = 0; map_bh.b_size = 0; bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, - &map_bh, &first_logical_block, get_block); + &map_bh, &first_logical_block, get_block, gfp); if (bio) mpage_bio_submit(READ, bio); return 0; diff --git a/fs/namei.c b/fs/namei.c index 726d211db484..33e9495a3129 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1558,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd, negative = d_is_negative(dentry); if (read_seqcount_retry(&dentry->d_seq, seq)) return -ECHILD; - if (negative) - return -ENOENT; /* * This sequence count validates that the parent had no @@ -1580,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd, goto unlazy; } } + /* + * Note: do negative dentry check after revalidation in + * case that drops it. + */ + if (negative) + return -ENOENT; path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f93b9cdb4934..5133bb18830e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1458,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state) if (delegation) delegation_flags = delegation->flags; rcu_read_unlock(); - if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { + switch (data->o_arg.claim) { + default: + break; + case NFS4_OPEN_CLAIM_DELEGATE_CUR: + case NFS4_OPEN_CLAIM_DELEG_CUR_FH: pr_err_ratelimited("NFS: Broken NFSv4 server %s is " "returning a delegation for " "OPEN(CLAIM_DELEGATE_CUR)\n", clp->cl_hostname); - } else if ((delegation_flags & 1UL<inode, data->owner->so_cred, &data->o_res); @@ -1771,6 +1777,9 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, if (IS_ERR(opendata)) return PTR_ERR(opendata); nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); + write_seqlock(&state->seqlock); + nfs4_stateid_copy(&state->stateid, &state->open_stateid); + write_sequnlock(&state->seqlock); clear_bit(NFS_DELEGATED_STATE, &state->flags); switch (type & (FMODE_READ|FMODE_WRITE)) { case FMODE_READ|FMODE_WRITE: @@ -1863,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data) data->rpc_done = 0; data->rpc_status = 0; data->timestamp = jiffies; + if (data->is_recover) + nfs4_set_sequence_privileged(&data->c_arg.seq_args); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5db324635e92..d854693a15b0 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1725,7 +1725,8 @@ restart: if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) continue; - atomic_inc(&sp->so_count); + if (!atomic_inc_not_zero(&sp->so_count)) + continue; spin_unlock(&clp->cl_lock); rcu_read_unlock(); diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 28df12e525ba..671cf68fe56b 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event, __entry->flags = flags; __entry->fmode = (__force unsigned int)ctx->mode; __entry->dev = ctx->dentry->d_sb->s_dev; - if (!IS_ERR(state)) + if (!IS_ERR_OR_NULL(state)) inode = state->inode; if (inode != NULL) { __entry->fileid = NFS_FILEID(inode); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 72624dc4a623..75ab7622e0cc 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, if (!nfs_pageio_add_request(pgio, req)) { nfs_redirty_request(req); ret = pgio->pg_error; - } + } else + nfs_add_stats(page_file_mapping(page)->host, + NFSIOS_WRITEPAGES, 1); out: return ret; } static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) { - struct inode *inode = page_file_mapping(page)->host; int ret; - nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); - nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); - nfs_pageio_cond_complete(pgio, page_file_index(page)); ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); if (ret == -EAGAIN) { @@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) { struct nfs_pageio_descriptor pgio; + struct inode *inode = page_file_mapping(page)->host; int err; - nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc), + nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); + nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, &nfs_async_write_completion_ops); err = nfs_do_writepage(page, wbc, &pgio); nfs_pageio_complete(&pgio); @@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino return 1; if (!flctx || (list_empty_careful(&flctx->flc_flock) && list_empty_careful(&flctx->flc_posix))) - return 0; + return 1; /* Check to see if there are whole file write locks */ ret = 0; diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index cdefaa331a07..c29d9421bd5e 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, u32 device_generation = 0; int error; - /* - * We do not attempt to support I/O smaller than the fs block size, - * or not aligned to it. - */ - if (args->lg_minlength < block_size) { - dprintk("pnfsd: I/O too small\n"); - goto out_layoutunavailable; - } if (seg->offset & (block_size - 1)) { dprintk("pnfsd: I/O misaligned\n"); goto out_layoutunavailable; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index ee5aa4daaea0..ce38b4ccc9ab 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -1658,12 +1658,13 @@ send_response: if (ret < 0) { mlog(ML_ERROR, "failed to dispatch assert master work\n"); response = DLM_MASTER_RESP_ERROR; + spin_unlock(&res->spinlock); dlm_lockres_put(res); } else { dispatched = 1; __dlm_lockres_grab_inflight_worker(dlm, res); + spin_unlock(&res->spinlock); } - spin_unlock(&res->spinlock); } else { if (res) dlm_lockres_put(res); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 3d90ad7ff91f..58eaa5c0d387 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1723,8 +1723,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, } else { dispatched = 1; __dlm_lockres_grab_inflight_worker(dlm, res); + spin_unlock(&res->spinlock); } - spin_unlock(&res->spinlock); } else { /* put.. incase we are not the master */ spin_unlock(&res->spinlock); diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index ba1323a94924..a586467f6ff6 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -70,6 +70,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) unsigned order; void *data; int ret; + gfp_t gfp = mapping_gfp_mask(inode->i_mapping); /* make various checks */ order = get_order(newsize); @@ -84,7 +85,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) /* allocate enough contiguous pages to be able to satisfy the * request */ - pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); + pages = alloc_pages(gfp, order); if (!pages) return -ENOMEM; @@ -108,7 +109,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) struct page *page = pages + loop; ret = add_to_page_cache_lru(page, inode->i_mapping, loop, - GFP_KERNEL); + gfp); if (ret < 0) goto add_error; diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 96f3448b6eb4..fd65b3f1923c 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, { int err; - mutex_lock(&inode->i_mutex); err = security_inode_init_security(inode, dentry, qstr, &init_xattrs, 0); - mutex_unlock(&inode->i_mutex); - if (err) { struct ubifs_info *c = dentry->i_sb->s_fs_info; ubifs_err(c, "cannot initialize security for inode %lu, error %d", diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 94f9ea8abcae..011dde083f23 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -1,15 +1,10 @@ #ifndef _ASM_WORD_AT_A_TIME_H #define _ASM_WORD_AT_A_TIME_H -/* - * This says "generic", but it's actually big-endian only. - * Little-endian can use more efficient versions of these - * interfaces, see for example - * arch/x86/include/asm/word-at-a-time.h - * for those. - */ - #include +#include + +#ifdef __BIG_ENDIAN struct word_at_a_time { const unsigned long high_bits, low_bits; @@ -53,4 +48,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct #define zero_bytemask(mask) (~1ul << __fls(mask)) #endif +#else + +/* + * The optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif /* __BIG_ENDIAN */ + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index 2a747a91fded..3febb4b9fce9 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h @@ -240,5 +240,6 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev); extern void drm_kms_helper_poll_disable(struct drm_device *dev); extern void drm_kms_helper_poll_enable(struct drm_device *dev); +extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev); #endif diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 499e9f625aef..0212d139a480 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -568,6 +568,10 @@ #define MODE_I2C_READ 4 #define MODE_I2C_STOP 8 +/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */ +#define DP_MST_PHYSICAL_PORT_0 0 +#define DP_MST_LOGICAL_PORT_0 8 + #define DP_LINK_STATUS_SIZE 6 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], int lane_count); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 86d0b25ed054..5340099741ae 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write { u8 *bytes; }; +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 struct drm_dp_remote_i2c_read { u8 num_transactions; u8 port_number; @@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read { u8 *bytes; u8 no_stop_bit; u8 i2c_transaction_delay; - } transactions[4]; + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; u8 read_i2c_device_id; u8 num_bytes_read; }; @@ -374,6 +375,7 @@ struct drm_dp_mst_topology_mgr; struct drm_dp_mst_topology_cbs { /* create a connector for a port */ struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); + void (*register_connector)(struct drm_connector *connector); void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_connector *connector); void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7235c4851460..43856d19cf4d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -217,6 +217,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); +bool acpi_isa_irq_available(int irq); void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index a23209b43842..1b4d69f68c33 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,8 @@ struct bdi_writeback { struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + struct list_head bdi_node; /* anchored at bdi->wb_list */ + #ifdef CONFIG_CGROUP_WRITEBACK struct percpu_ref refcnt; /* used only for !root wb's */ struct fprop_local_percpu memcg_completions; @@ -150,6 +152,7 @@ struct backing_dev_info { atomic_long_t tot_write_bandwidth; struct bdi_writeback wb; /* the root writeback info for this bdi */ + struct list_head wb_list; /* list of all wbs */ #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct rb_root cgwb_congested_tree; /* their congested states */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index d5eb4ad1c534..c85f74946a8b 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -19,13 +19,17 @@ #include int __must_check bdi_init(struct backing_dev_info *bdi); -void bdi_destroy(struct backing_dev_info *bdi); +void bdi_exit(struct backing_dev_info *bdi); __printf(3, 4) int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); +void bdi_unregister(struct backing_dev_info *bdi); + int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); +void bdi_destroy(struct backing_dev_info *bdi); + void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, bool range_cyclic, enum wb_reason reason); void wb_start_background_writeback(struct bdi_writeback *wb); @@ -408,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) rcu_read_unlock(); } -struct wb_iter { - int start_memcg_id; - struct radix_tree_iter tree_iter; - void **slot; -}; - -static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter, - struct backing_dev_info *bdi) -{ - struct radix_tree_iter *titer = &iter->tree_iter; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - if (iter->start_memcg_id >= 0) { - iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id); - iter->start_memcg_id = -1; - } else { - iter->slot = radix_tree_next_slot(iter->slot, titer, 0); - } - - if (!iter->slot) - iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0); - if (iter->slot) - return *iter->slot; - return NULL; -} - -static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter, - struct backing_dev_info *bdi, - int start_memcg_id) -{ - iter->start_memcg_id = start_memcg_id; - - if (start_memcg_id) - return __wb_iter_next(iter, bdi); - else - return &bdi->wb; -} - -/** - * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order - * @wb_cur: cursor struct bdi_writeback pointer - * @bdi: bdi to walk wb's of - * @iter: pointer to struct wb_iter to be used as iteration buffer - * @start_memcg_id: memcg ID to start iteration from - * - * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending - * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter - * to be used as temp storage during iteration. rcu_read_lock() must be - * held throughout iteration. - */ -#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \ - for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \ - (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi)) - #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -522,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) { } -struct wb_iter { - int next_id; -}; - -#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \ - for ((iter)->next_id = (start_blkcg_id); \ - ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); ) - static inline int inode_congested(struct inode *inode, int cong_bits) { return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 0a5cc7a1109b..c02e669945e9 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -713,9 +713,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, if (!throtl) { blkg = blkg ?: q->root_blkg; - blkg_rwstat_add(&blkg->stat_bytes, bio->bi_flags, + blkg_rwstat_add(&blkg->stat_bytes, bio->bi_rw, bio->bi_iter.bi_size); - blkg_rwstat_add(&blkg->stat_ios, bio->bi_flags, 1); + blkg_rwstat_add(&blkg->stat_ios, bio->bi_rw, 1); } rcu_read_unlock(); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 37d1602c4f7a..5e7d43ab61c0 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -145,7 +145,6 @@ enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, BLK_MQ_F_SG_MERGE = 1 << 2, - BLK_MQ_F_SYSFS_UP = 1 << 3, BLK_MQ_F_DEFER_ISSUE = 1 << 4, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -215,7 +214,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_abort_requeue_list(struct request_queue *q); -void blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request(struct request *rq, int error); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -224,8 +223,6 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); -void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, - void *priv); void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, void *priv); void blk_mq_freeze_queue(struct request_queue *q); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99da9ebc7377..19c2e947d4d1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -456,6 +456,8 @@ struct request_queue { struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; struct bio_set *bio_split; + + bool mq_sysfs_init_done; }; #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ diff --git a/include/linux/cma.h b/include/linux/cma.h index f7ef093ec49a..29f9e774ab76 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -26,6 +26,6 @@ extern int __init cma_declare_contiguous(phys_addr_t base, extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma); -extern struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align); +extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count); #endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index dfaa7b3e9ae9..8efb40e61d6e 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -237,12 +237,25 @@ #define KASAN_ABI_VERSION 3 #endif +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#endif + #endif /* gcc version >= 40000 specific checks */ #if !defined(__noclone) #define __noclone /* not needed */ #endif +#if !defined(__no_sanitize_address) +#define __no_sanitize_address +#endif + /* * A trick to suppress uninitialized variable warning without generating any * code diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c836eb2dc44d..3d7810341b57 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -198,19 +198,45 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #include -static __always_inline void __read_once_size(const volatile void *p, void *res, int size) +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) + +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) { - switch (size) { - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; - default: - barrier(); - __builtin_memcpy((void *)res, (const void *)p, size); - barrier(); - } + __READ_ONCE_SIZE; +} + +#ifdef CONFIG_KASAN +/* + * This function is not 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +static __no_sanitize_address __maybe_unused +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} +#else +static __always_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; } +#endif static __always_inline void __write_once_size(volatile void *p, void *res, int size) { @@ -248,8 +274,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ -#define READ_ONCE(x) \ - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) #define WRITE_ONCE(x, val) \ ({ \ diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h index 569bbd039896..fec734df1524 100644 --- a/include/linux/dma-contiguous.h +++ b/include/linux/dma-contiguous.h @@ -111,7 +111,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, return ret; } -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order); bool dma_release_from_contiguous(struct device *dev, struct page *pages, int count); @@ -144,7 +144,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size, } static inline -struct page *dma_alloc_from_contiguous(struct device *dev, int count, +struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, unsigned int order) { return NULL; diff --git a/include/linux/iova.h b/include/linux/iova.h index 3920a19d8194..92f7177db2ce 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -68,8 +68,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) return iova >> iova_shift(iovad); } -int iommu_iova_cache_init(void); -void iommu_iova_cache_destroy(void); +int iova_cache_get(void); +void iova_cache_put(void); struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d3ca79236fb0..f644fdb06dd6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -161,6 +161,11 @@ enum { IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), }; +static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) +{ + return d->of_node; +} + #ifdef CONFIG_IRQ_DOMAIN struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, irq_hw_number_t hwirq_max, int direct_max, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ad800e62cb7a..3e3318ddfc0e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -242,7 +242,6 @@ struct mem_cgroup { * percpu counter. */ struct mem_cgroup_stat_cpu __percpu *stat; - spinlock_t pcp_counter_lock; #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct cg_proto tcp_mem; @@ -677,8 +676,9 @@ enum { struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg); struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); -void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, - unsigned long *pdirty, unsigned long *pwriteback); +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback); #else /* CONFIG_CGROUP_WRITEBACK */ @@ -688,7 +688,8 @@ static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) } static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb, - unsigned long *pavail, + unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, unsigned long *pwriteback) { diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8eb3b19af2a4..250b1ff8b48d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -402,17 +402,6 @@ struct mlx5_cmd_teardown_hca_mbox_out { u8 rsvd[8]; }; -struct mlx5_cmd_query_special_contexts_mbox_in { - struct mlx5_inbox_hdr hdr; - u8 rsvd[8]; -}; - -struct mlx5_cmd_query_special_contexts_mbox_out { - struct mlx5_outbox_hdr hdr; - __be32 dump_fill_mkey; - __be32 resd_lkey; -}; - struct mlx5_cmd_layout { u8 type; u8 rsvd0[3]; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 27b53f9a24ad..8b6d6f2154a4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -845,7 +845,6 @@ void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); -int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey); struct mlx5_profile { u64 mask; diff --git a/include/linux/mm.h b/include/linux/mm.h index 91c08f6f0dc9..80001de019ba 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -905,6 +905,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone, #endif } +#ifdef CONFIG_MEMCG +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return page->mem_cgroup; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ + page->mem_cgroup = memcg; +} +#else +static inline struct mem_cgroup *page_memcg(struct page *page) +{ + return NULL; +} + +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg) +{ +} +#endif + /* * Some inline functions in vmstat.h depend on page_zone() */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index ff476515f716..581abf848566 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -230,12 +230,11 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - const int __n = ARRAY_SIZE(__crcu_array); \ - struct rcu_synchronize __rs_array[__n]; \ - \ - __wait_rcu_gp(checktiny, __n, __crcu_array, __rs_array); \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ + __crcu_array, __rs_array); \ } while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2b0a30a6e31c..4398411236f1 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2708,7 +2708,7 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); else if (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_start_offset(skb) <= len) + skb_checksum_start_offset(skb) < 0) skb->ip_summed = CHECKSUM_NONE; } diff --git a/include/linux/string.h b/include/linux/string.h index a8d90db9c4b0..9ef7795e65e4 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -25,6 +25,9 @@ extern char * strncpy(char *,const char *, __kernel_size_t); #ifndef __HAVE_ARCH_STRLCPY size_t strlcpy(char *, const char *, size_t); #endif +#ifndef __HAVE_ARCH_STRSCPY +ssize_t __must_check strscpy(char *, const char *, size_t); +#endif #ifndef __HAVE_ARCH_STRCAT extern char * strcat(char *, const char *); #endif diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 3dd5a781da99..bfb74723f151 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -157,7 +157,7 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ - u32 type; + uintptr_t type; u32 enable_gpio; /* diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 4a167b30a12f..b36d837c701e 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -63,7 +63,11 @@ struct unix_sock { #define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; }; -#define unix_sk(__sk) ((struct unix_sock *)__sk) + +static inline struct unix_sock *unix_sk(const struct sock *sk) +{ + return (struct unix_sock *)sk; +} #define peer_wait peer_wq.wait diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 186f3a1e1b1f..fc1937698625 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -113,12 +113,12 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm); -static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) +static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) { __inet_twsk_schedule(tw, timeo, false); } -static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) +static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) { __inet_twsk_schedule(tw, timeo, true); } diff --git a/include/net/sock.h b/include/net/sock.h index 7aa78440559a..e23717013a4e 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -828,6 +828,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s if (sk_rcvqueues_full(sk, limit)) return -ENOBUFS; + /* + * If the skb was allocated from pfmemalloc reserves, only + * allow SOCK_MEMALLOC sockets to use it as this socket is + * helping free memory + */ + if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) + return -ENOMEM; + __sk_add_backlog(sk, skb); sk->sk_backlog.len += skb->truesize; return 0; diff --git a/include/sound/soc.h b/include/sound/soc.h index 884e728b09d9..26ede14597da 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -86,7 +86,7 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array),\ - .info = snd_soc_info_volsw, \ + .info = snd_soc_info_volsw_sx, \ .get = snd_soc_get_volsw_sx,\ .put = snd_soc_put_volsw_sx, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ @@ -156,7 +156,7 @@ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ SNDRV_CTL_ELEM_ACCESS_READWRITE, \ .tlv.p = (tlv_array), \ - .info = snd_soc_info_volsw, \ + .info = snd_soc_info_volsw_sx, \ .get = snd_soc_get_volsw_sx, \ .put = snd_soc_put_volsw_sx, \ .private_value = (unsigned long)&(struct soc_mixer_control) \ @@ -574,6 +574,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo); +int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo); #define snd_soc_info_bool_ext snd_ctl_boolean_mono_info int snd_soc_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol); diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h index 898be3a8db9a..6d8f8fba3341 100644 --- a/include/sound/wm8904.h +++ b/include/sound/wm8904.h @@ -119,7 +119,7 @@ #define WM8904_MIC_REGS 2 #define WM8904_GPIO_REGS 4 #define WM8904_DRC_REGS 4 -#define WM8904_EQ_REGS 25 +#define WM8904_EQ_REGS 24 /** * DRC configurations are specified with a label and a set of register diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h index 9df61f1edb0f..3094618d382f 100644 --- a/include/uapi/asm-generic/signal.h +++ b/include/uapi/asm-generic/signal.h @@ -80,8 +80,10 @@ * SA_RESTORER 0x04000000 */ +#if !defined MINSIGSTKSZ || !defined SIGSTKSZ #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 +#endif #ifndef __ASSEMBLY__ typedef struct { diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 32e07d8cbaf4..036f73bc54cd 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -323,10 +323,10 @@ enum ovs_key_attr { OVS_KEY_ATTR_MPLS, /* array of struct ovs_key_mpls. * The implementation may restrict * the accepted length of the array. */ - OVS_KEY_ATTR_CT_STATE, /* u8 bitmask of OVS_CS_F_* */ + OVS_KEY_ATTR_CT_STATE, /* u32 bitmask of OVS_CS_F_* */ OVS_KEY_ATTR_CT_ZONE, /* u16 connection tracking zone. */ OVS_KEY_ATTR_CT_MARK, /* u32 connection tracking mark */ - OVS_KEY_ATTR_CT_LABEL, /* 16-octet connection tracking label */ + OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */ #ifdef __KERNEL__ OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ @@ -439,9 +439,9 @@ struct ovs_key_nd { __u8 nd_tll[ETH_ALEN]; }; -#define OVS_CT_LABEL_LEN 16 -struct ovs_key_ct_label { - __u8 ct_label[OVS_CT_LABEL_LEN]; +#define OVS_CT_LABELS_LEN 16 +struct ovs_key_ct_labels { + __u8 ct_labels[OVS_CT_LABELS_LEN]; }; /* OVS_KEY_ATTR_CT_STATE flags */ @@ -449,9 +449,9 @@ struct ovs_key_ct_label { #define OVS_CS_F_ESTABLISHED 0x02 /* Part of an existing connection. */ #define OVS_CS_F_RELATED 0x04 /* Related to an established * connection. */ -#define OVS_CS_F_INVALID 0x20 /* Could not track connection. */ -#define OVS_CS_F_REPLY_DIR 0x40 /* Flow is in the reply direction. */ -#define OVS_CS_F_TRACKED 0x80 /* Conntrack has occurred. */ +#define OVS_CS_F_REPLY_DIR 0x08 /* Flow is in the reply direction. */ +#define OVS_CS_F_INVALID 0x10 /* Could not track connection. */ +#define OVS_CS_F_TRACKED 0x20 /* Conntrack has occurred. */ /** * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands. @@ -618,22 +618,24 @@ struct ovs_action_hash { /** * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action. - * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags. + * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack + * table. This allows future packets for the same connection to be identified + * as 'established' or 'related'. * @OVS_CT_ATTR_ZONE: u16 connection tracking zone. * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the * mask, the corresponding bit in the value is copied to the connection * tracking mark field in the connection. - * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN + * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN * mask. For each bit set in the mask, the corresponding bit in the value is * copied to the connection tracking label field in the connection. * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. */ enum ovs_ct_attr { OVS_CT_ATTR_UNSPEC, - OVS_CT_ATTR_FLAGS, /* u8 bitmask of OVS_CT_F_*. */ + OVS_CT_ATTR_COMMIT, /* No argument, commits connection. */ OVS_CT_ATTR_ZONE, /* u16 zone id. */ OVS_CT_ATTR_MARK, /* mark to associate with this connection. */ - OVS_CT_ATTR_LABEL, /* label to associate with this connection. */ + OVS_CT_ATTR_LABELS, /* labels to associate with this connection. */ OVS_CT_ATTR_HELPER, /* netlink helper to assist detection of related connections. */ __OVS_CT_ATTR_MAX @@ -641,14 +643,6 @@ enum ovs_ct_attr { #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1) -/* - * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_* - * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows - * future packets for the same connection to be identified as 'established' - * or 'related'. - */ -#define OVS_CT_F_COMMIT 0x01 - /** * enum ovs_action_attr - Action types. * @@ -705,7 +699,7 @@ enum ovs_action_attr { * data immediately followed by a mask. * The data must be zero for the unmasked * bits. */ - OVS_ACTION_ATTR_CT, /* One nested OVS_CT_ATTR_* . */ + OVS_ACTION_ATTR_CT, /* Nested OVS_CT_ATTR_* . */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 702024769c74..9d8f5d10c1e5 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -160,7 +160,7 @@ struct rtattr { /* Macros to handle rtattributes */ -#define RTA_ALIGNTO 4 +#define RTA_ALIGNTO 4U #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) ) #define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \ (rta)->rta_len >= sizeof(struct rtattr) && \ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index df0e09bb7dd5..9057d7af3ae1 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -11,8 +11,6 @@ #include -#include - #define UFFD_API ((__u64)0xAA) /* * After implementing the respective features it will become: diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h index 9ce083960a25..f18490985fc8 100644 --- a/include/xen/interface/sched.h +++ b/include/xen/interface/sched.h @@ -107,5 +107,13 @@ struct sched_watchdog { #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ +/* + * Domain asked to perform 'soft reset' for it. The expected behavior is to + * reset internal Xen state for the domain returning it to the point where it + * was created but leaving the domain's memory contents and vCPU contexts + * intact. This will allow the domain to start over and set up all Xen specific + * interfaces again. + */ +#define SHUTDOWN_soft_reset 5 #endif /* __XEN_PUBLIC_SCHED_H__ */ diff --git a/ipc/msg.c b/ipc/msg.c index 66c4f567eb73..1471db9a7e61 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) return retval; } - /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - ipc_rcu_putref(msq, msg_rcu_free); - return id; - } - msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); + /* ipc_addid() locks msq upon success. */ + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (id < 0) { + ipc_rcu_putref(msq, msg_rcu_free); + return id; + } + ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); diff --git a/ipc/shm.c b/ipc/shm.c index 222131e8e38f..41787276e141 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if (IS_ERR(file)) goto no_file; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; - goto no_id; - } - shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; @@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; + + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (id < 0) { + error = id; + goto no_id; + } + list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); /* diff --git a/ipc/util.c b/ipc/util.c index be4230020a1f..0f401d94b7c6 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) rcu_read_lock(); spin_lock(&new->lock); + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; + id = idr_alloc(&ids->ipcs_idr, new, (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, GFP_NOWAIT); @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) ids->in_use++; - current_euid_egid(&euid, &egid); - new->cuid = new->uid = euid; - new->gid = new->cgid = egid; - if (next_id < 0) { new->seq = ids->seq++; if (ids->seq > IPCID_SEQ_MAX) diff --git a/kernel/events/core.c b/kernel/events/core.c index f548f69c4299..b11756f9b6dc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event) PERF_EVENT_STATE_INACTIVE; } -/* - * Called at perf_event creation and when events are attached/detached from a - * group. - */ -static void perf_event__read_size(struct perf_event *event) +static void __perf_event_read_size(struct perf_event *event, int nr_siblings) { int entry = sizeof(u64); /* value */ int size = 0; @@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event) entry += sizeof(u64); if (event->attr.read_format & PERF_FORMAT_GROUP) { - nr += event->group_leader->nr_siblings; + nr += nr_siblings; size += sizeof(u64); } @@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event) event->read_size = size; } -static void perf_event__header_size(struct perf_event *event) +static void __perf_event_header_size(struct perf_event *event, u64 sample_type) { struct perf_sample_data *data; - u64 sample_type = event->attr.sample_type; u16 size = 0; - perf_event__read_size(event); - if (sample_type & PERF_SAMPLE_IP) size += sizeof(data->ip); @@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event) event->header_size = size; } +/* + * Called at perf_event creation and when events are attached/detached from a + * group. + */ +static void perf_event__header_size(struct perf_event *event) +{ + __perf_event_read_size(event, + event->group_leader->nr_siblings); + __perf_event_header_size(event, event->attr.sample_type); +} + static void perf_event__id_header_size(struct perf_event *event) { struct perf_sample_data *data; @@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event) event->id_header_size = size; } +static bool perf_event_validate_size(struct perf_event *event) +{ + /* + * The values computed here will be over-written when we actually + * attach the event. + */ + __perf_event_read_size(event, event->group_leader->nr_siblings + 1); + __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); + perf_event__id_header_size(event); + + /* + * Sum the lot; should not exceed the 64k limit we have on records. + * Conservative limit to allow for callchains and other variable fields. + */ + if (event->read_size + event->header_size + + event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) + return false; + + return true; +} + static void perf_group_attach(struct perf_event *event) { struct perf_event *group_leader = event->group_leader, *pos; @@ -8297,13 +8322,35 @@ SYSCALL_DEFINE5(perf_event_open, if (move_group) { gctx = group_leader->ctx; + mutex_lock_double(&gctx->mutex, &ctx->mutex); + } else { + mutex_lock(&ctx->mutex); + } + if (!perf_event_validate_size(event)) { + err = -E2BIG; + goto err_locked; + } + + /* + * Must be under the same ctx::mutex as perf_install_in_context(), + * because we need to serialize with concurrent event creation. + */ + if (!exclusive_event_installable(event, ctx)) { + /* exclusive and group stuff are assumed mutually exclusive */ + WARN_ON_ONCE(move_group); + + err = -EBUSY; + goto err_locked; + } + + WARN_ON_ONCE(ctx->parent_ctx); + + if (move_group) { /* * See perf_event_ctx_lock() for comments on the details * of swizzling perf_event::ctx. */ - mutex_lock_double(&gctx->mutex, &ctx->mutex); - perf_remove_from_context(group_leader, false); list_for_each_entry(sibling, &group_leader->sibling_list, @@ -8311,13 +8358,7 @@ SYSCALL_DEFINE5(perf_event_open, perf_remove_from_context(sibling, false); put_ctx(gctx); } - } else { - mutex_lock(&ctx->mutex); - } - WARN_ON_ONCE(ctx->parent_ctx); - - if (move_group) { /* * Wait for everybody to stop referencing the events through * the old lists, before installing it on new lists. @@ -8349,22 +8390,29 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(group_leader); perf_install_in_context(ctx, group_leader, group_leader->cpu); get_ctx(ctx); - } - if (!exclusive_event_installable(event, ctx)) { - err = -EBUSY; - mutex_unlock(&ctx->mutex); - fput(event_file); - goto err_context; + /* + * Now that all events are installed in @ctx, nothing + * references @gctx anymore, so drop the last reference we have + * on it. + */ + put_ctx(gctx); } + /* + * Precalculate sample_data sizes; do while holding ctx::mutex such + * that we're serialized against further additions and before + * perf_install_in_context() which is the point the event is active and + * can use these values. + */ + perf_event__header_size(event); + perf_event__id_header_size(event); + perf_install_in_context(ctx, event, event->cpu); perf_unpin_context(ctx); - if (move_group) { + if (move_group) mutex_unlock(&gctx->mutex); - put_ctx(gctx); - } mutex_unlock(&ctx->mutex); put_online_cpus(); @@ -8375,12 +8423,6 @@ SYSCALL_DEFINE5(perf_event_open, list_add_tail(&event->owner_entry, ¤t->perf_event_list); mutex_unlock(¤t->perf_event_mutex); - /* - * Precalculate sample_data sizes - */ - perf_event__header_size(event); - perf_event__id_header_size(event); - /* * Drop the reference on the group_event after placing the * new event on the sibling_list. This ensures destruction @@ -8391,6 +8433,12 @@ SYSCALL_DEFINE5(perf_event_open, fd_install(event_fd, event_file); return event_fd; +err_locked: + if (move_group) + mutex_unlock(&gctx->mutex); + mutex_unlock(&ctx->mutex); +/* err_file: */ + fput(event_file); err_context: perf_unpin_context(ctx); put_ctx(ctx); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index de41a68fc038..e25a83b67cce 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -22,7 +22,6 @@ /** * handle_bad_irq - handle spurious and unhandled irqs - * @irq: the interrupt number * @desc: description of the interrupt * * Handles spurious and unhandled IRQ's. It also prints a debugmessage. @@ -35,6 +34,7 @@ void handle_bad_irq(struct irq_desc *desc) kstat_incr_irqs_this_cpu(desc); ack_bad_irq(irq); } +EXPORT_SYMBOL_GPL(handle_bad_irq); /* * Special, empty irq handler: diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 7e6512b9dc1f..be9149f62eb8 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -228,11 +228,7 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info) { struct irq_chip *chip = info->chip; - BUG_ON(!chip); - if (!chip->irq_mask) - chip->irq_mask = pci_msi_mask_irq; - if (!chip->irq_unmask) - chip->irq_unmask = pci_msi_unmask_irq; + BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); if (!chip->irq_set_affinity) chip->irq_set_affinity = msi_domain_set_affinity; } diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index e3a8c9577ba6..a50ddc9417ff 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "internals.h" @@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) void register_irq_proc(unsigned int irq, struct irq_desc *desc) { + static DEFINE_MUTEX(register_lock); char name [MAX_NAMELEN]; - if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) return; + /* + * irq directories are registered only when a handler is + * added, not when the descriptor is created, so multiple + * tasks might try to register at the same time. + */ + mutex_lock(®ister_lock); + + if (desc->dir) + goto out_unlock; + memset(name, 0, MAX_NAMELEN); sprintf(name, "%d", irq); /* create /proc/irq/1234 */ desc->dir = proc_mkdir(name, root_irq_dir); if (!desc->dir) - return; + goto out_unlock; #ifdef CONFIG_SMP /* create /proc/irq//smp_affinity */ @@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) proc_create_data("spurious", 0444, desc->dir, &irq_spurious_proc_fops, (void *)(long)irq); + +out_unlock: + mutex_unlock(®ister_lock); } void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) diff --git a/kernel/kmod.c b/kernel/kmod.c index da98d0593de2..0277d1216f80 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -327,9 +327,13 @@ static void call_usermodehelper_exec_work(struct work_struct *work) call_usermodehelper_exec_sync(sub_info); } else { pid_t pid; - + /* + * Use CLONE_PARENT to reparent it to kthreadd; we do not + * want to pollute current->children, and we need a parent + * that always ignores SIGCHLD to ensure auto-reaping. + */ pid = kernel_thread(call_usermodehelper_exec_async, sub_info, - SIGCHLD); + CLONE_PARENT | SIGCHLD); if (pid < 0) { sub_info->retval = pid; umh_complete(sub_info); diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8acfbf773e06..4e49cc4c9952 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock); static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, struct lockdep_map *nest_lock, unsigned long ip, - int references) + int references, int pin_count) { struct task_struct *curr = current; struct lock_class *class = NULL; @@ -3157,7 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->waittime_stamp = 0; hlock->holdtime_stamp = lockstat_clock(); #endif - hlock->pin_count = 0; + hlock->pin_count = pin_count; if (check && !mark_irqflags(curr, hlock)) return 0; @@ -3343,7 +3343,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3433,7 +3433,7 @@ found_it: hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, - hlock->references)) + hlock->references, hlock->pin_count)) return 0; } @@ -3583,7 +3583,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, current->lockdep_recursion = 1; trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, - irqs_disabled_flags(flags), nest_lock, ip, 0); + irqs_disabled_flags(flags), nest_lock, ip, 0, 0); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } diff --git a/kernel/module.c b/kernel/module.c index b86b7bf1be38..8f051a106676 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1063,11 +1063,15 @@ void symbol_put_addr(void *addr) if (core_kernel_text(a)) return; - /* module_text_address is safe here: we're supposed to have reference - * to module from symbol_get, so it can't go away. */ + /* + * Even though we hold a reference on the module; we still need to + * disable preemption in order to safely traverse the data structure. + */ + preempt_disable(); modaddr = __module_text_address(a); BUG_ON(!modaddr); module_put(modaddr); + preempt_enable(); } EXPORT_SYMBOL_GPL(symbol_put_addr); diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 9f75f25cc5d9..775d36cc0050 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3868,6 +3868,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf) static void __init rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) { + static struct lock_class_key rcu_exp_sched_rdp_class; unsigned long flags; struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_node *rnp = rcu_get_root(rsp); @@ -3883,6 +3884,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) mutex_init(&rdp->exp_funnel_mutex); rcu_boot_init_nocb_percpu_data(rdp); raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (rsp == &rcu_sched_state) + lockdep_set_class_and_name(&rdp->exp_funnel_mutex, + &rcu_exp_sched_rdp_class, + "rcu_data_exp_sched"); } /* diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2f9c92884817..bcd214e4b4d6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2366,8 +2366,15 @@ void wake_up_new_task(struct task_struct *p) trace_sched_wakeup_new(p); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP - if (p->sched_class->task_woken) + if (p->sched_class->task_woken) { + /* + * Nothing relies on rq->lock after this, so its fine to + * drop it. + */ + lockdep_unpin_lock(&rq->lock); p->sched_class->task_woken(rq, p); + lockdep_pin_lock(&rq->lock); + } #endif task_rq_unlock(rq, p, &flags); } @@ -2517,11 +2524,11 @@ static struct rq *finish_task_switch(struct task_struct *prev) * If a task dies, then it sets TASK_DEAD in tsk->state and calls * schedule one last time. The schedule call will never return, and * the scheduled task must drop that reference. - * The test for TASK_DEAD must occur while the runqueue locks are - * still held, otherwise prev could be scheduled on another cpu, die - * there before we look at prev->state, and then the reference would - * be dropped twice. - * Manfred Spraul + * + * We must observe prev->state before clearing prev->on_cpu (in + * finish_lock_switch), otherwise a concurrent wakeup can get prev + * running on another CPU and we could rave with its RUNNING -> DEAD + * transition, resulting in a double drop. */ prev_state = prev->state; vtime_task_switch(prev); @@ -4934,7 +4941,15 @@ void init_idle(struct task_struct *idle, int cpu) idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); - do_set_cpus_allowed(idle, cpumask_of(cpu)); +#ifdef CONFIG_SMP + /* + * Its possible that init_idle() gets called multiple times on a task, + * in that case do_set_cpus_allowed() will not do the right thing. + * + * And since this is boot we can forgo the serialization. + */ + set_cpus_allowed_common(idle, cpumask_of(cpu)); +#endif /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the cpu isn't yet set to this cpu so the @@ -4951,7 +4966,7 @@ void init_idle(struct task_struct *idle, int cpu) rq->curr = rq->idle = idle; idle->on_rq = TASK_ON_RQ_QUEUED; -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP idle->on_cpu = 1; #endif raw_spin_unlock(&rq->lock); @@ -4966,7 +4981,7 @@ void init_idle(struct task_struct *idle, int cpu) idle->sched_class = &idle_sched_class; ftrace_graph_init_idle_task(idle, cpu); vtime_init_idle(idle, cpu); -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); #endif } @@ -7230,9 +7245,6 @@ void __init sched_init_smp(void) alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); alloc_cpumask_var(&fallback_doms, GFP_KERNEL); - /* nohz_full won't take effect without isolating the cpus. */ - tick_nohz_full_add_cpus_to(cpu_isolated_map); - sched_init_numa(); /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index fc8f01083527..8b0a15e285f9 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -668,8 +668,15 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) * Queueing this task back might have overloaded rq, check if we need * to kick someone away. */ - if (has_pushable_dl_tasks(rq)) + if (has_pushable_dl_tasks(rq)) { + /* + * Nothing relies on rq->lock after this, so its safe to drop + * rq->lock. + */ + lockdep_unpin_lock(&rq->lock); push_dl_task(rq); + lockdep_pin_lock(&rq->lock); + } #endif unlock: @@ -1066,8 +1073,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) int target = find_later_rq(p); if (target != -1 && - dl_time_before(p->dl.deadline, - cpu_rq(target)->dl.earliest_dl.curr)) + (dl_time_before(p->dl.deadline, + cpu_rq(target)->dl.earliest_dl.curr) || + (cpu_rq(target)->dl.dl_nr_running == 0))) cpu = target; } rcu_read_unlock(); @@ -1417,7 +1425,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) later_rq = cpu_rq(cpu); - if (!dl_time_before(task->dl.deadline, + if (later_rq->dl.dl_nr_running && + !dl_time_before(task->dl.deadline, later_rq->dl.earliest_dl.curr)) { /* * Target rq has tasks of equal or earlier deadline, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6e2e3483b1ec..9a5e60fe721a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2363,7 +2363,7 @@ static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq) */ tg_weight = atomic_long_read(&tg->load_avg); tg_weight -= cfs_rq->tg_load_avg_contrib; - tg_weight += cfs_rq_load_avg(cfs_rq); + tg_weight += cfs_rq->load.weight; return tg_weight; } @@ -2373,7 +2373,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) long tg_weight, load, shares; tg_weight = calc_tg_weight(tg, cfs_rq); - load = cfs_rq_load_avg(cfs_rq); + load = cfs_rq->load.weight; shares = (tg->shares * load); if (tg_weight) @@ -2664,13 +2664,14 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq); /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) { - int decayed; struct sched_avg *sa = &cfs_rq->avg; + int decayed, removed = 0; if (atomic_long_read(&cfs_rq->removed_load_avg)) { long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); sa->load_avg = max_t(long, sa->load_avg - r, 0); sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); + removed = 1; } if (atomic_long_read(&cfs_rq->removed_util_avg)) { @@ -2688,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif - return decayed; + return decayed || removed; } /* Update task and its cfs_rq load average */ diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 8f177c73ae19..4a2ef5a02fd3 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -57,9 +57,11 @@ static inline int cpu_idle_poll(void) rcu_idle_enter(); trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); + stop_critical_timings(); while (!tif_need_resched() && (cpu_idle_force_poll || tick_check_broadcast_expired())) cpu_relax(); + start_critical_timings(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); rcu_idle_exit(); return 1; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 68cda117574c..6d2a119c7ad9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1078,9 +1078,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) * After ->on_cpu is cleared, the task can be moved to a different CPU. * We must ensure this doesn't happen until the switch is completely * finished. + * + * Pairs with the control dependency and rmb in try_to_wake_up(). */ - smp_wmb(); - prev->on_cpu = 0; + smp_store_release(&prev->on_cpu, 0); #endif #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 841b72f720e8..3a38775b50c2 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data) continue; /* Check the deviation from the watchdog clocksource. */ - if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) { + if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n", cs->name); pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n", diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 3739ac6aa473..44d2cc0436f4 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1251,7 +1251,7 @@ void __init timekeeping_init(void) set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec); tk_set_wall_to_mono(tk, tmp); - timekeeping_update(tk, TK_MIRROR); + timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); write_seqcount_end(&tk_core.seq); raw_spin_unlock_irqrestore(&timekeeper_lock, flags); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b746399ab59c..8abf1ba18085 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -85,9 +85,19 @@ check_stack(unsigned long ip, unsigned long *stack) if (!object_is_on_stack(stack)) return; + /* Can't do this from NMI context (can cause deadlocks) */ + if (in_nmi()) + return; + local_irq_save(flags); arch_spin_lock(&max_stack_lock); + /* + * RCU may not be watching, make it see us. + * The stack trace code uses rcu_sched. + */ + rcu_irq_enter(); + /* In case another CPU set the tracer_frame on us */ if (unlikely(!frame_size)) this_size -= tracer_frame; @@ -169,6 +179,7 @@ check_stack(unsigned long ip, unsigned long *stack) } out: + rcu_irq_exit(); arch_spin_unlock(&max_stack_lock); local_irq_restore(flags); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ca71582fcfab..bcb14cafe007 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1458,13 +1458,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); dwork->wq = wq; + /* timer isn't guaranteed to run in this cpu, record earlier */ + if (cpu == WORK_CPU_UNBOUND) + cpu = raw_smp_processor_id(); dwork->cpu = cpu; timer->expires = jiffies + delay; - if (unlikely(cpu != WORK_CPU_UNBOUND)) - add_timer_on(timer, cpu); - else - add_timer(timer); + add_timer_on(timer, cpu); } /** diff --git a/lib/Kconfig b/lib/Kconfig index 2e491ac15622..f0df318104e7 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -220,6 +220,7 @@ config ZLIB_INFLATE config ZLIB_DEFLATE tristate + select BITREVERSE config LZO_COMPRESS tristate diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ab76b99adc85..1d1521c26302 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -197,6 +197,7 @@ config ENABLE_MUST_CHECK config FRAME_WARN int "Warn for stack frames larger than (needs gcc 4.4)" range 0 8192 + default 0 if KASAN default 1024 if !64BIT default 2048 if 64BIT help diff --git a/lib/fault-inject.c b/lib/fault-inject.c index f1cdeb024d17..6a823a53e357 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr) printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n" "name %pd, interval %lu, probability %lu, " "space %d, times %d\n", attr->dname, - attr->probability, attr->interval, + attr->interval, attr->probability, atomic_read(&attr->space), atomic_read(&attr->times)); if (attr->verbose > 1) diff --git a/lib/string.c b/lib/string.c index 13d1e84ddb80..84775ba873b9 100644 --- a/lib/string.c +++ b/lib/string.c @@ -27,6 +27,10 @@ #include #include +#include +#include +#include + #ifndef __HAVE_ARCH_STRNCASECMP /** * strncasecmp - Case insensitive, length-limited string comparison @@ -146,6 +150,91 @@ size_t strlcpy(char *dest, const char *src, size_t size) EXPORT_SYMBOL(strlcpy); #endif +#ifndef __HAVE_ARCH_STRSCPY +/** + * strscpy - Copy a C-string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @count: Size of destination buffer + * + * Copy the string, or as much of it as fits, into the dest buffer. + * The routine returns the number of characters copied (not including + * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. + * The behavior is undefined if the string buffers overlap. + * The destination buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the src string beyond the specified "count" bytes, and since + * the return value is easier to error-check than strlcpy()'s. + * In addition, the implementation is robust to the string changing out + * from underneath it, unlike the current strlcpy() implementation. + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be + * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() + * with an overflow test, then just memset() the tail of the dest buffer. + */ +ssize_t strscpy(char *dest, const char *src, size_t count) +{ + const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; + size_t max = count; + long res = 0; + + if (count == 0) + return -E2BIG; + +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + /* + * If src is unaligned, don't cross a page boundary, + * since we don't know if the next page is mapped. + */ + if ((long)src & (sizeof(long) - 1)) { + size_t limit = PAGE_SIZE - ((long)src & (PAGE_SIZE - 1)); + if (limit < max) + max = limit; + } +#else + /* If src or dest is unaligned, don't do word-at-a-time. */ + if (((long) dest | (long) src) & (sizeof(long) - 1)) + max = 0; +#endif + + while (max >= sizeof(unsigned long)) { + unsigned long c, data; + + c = *(unsigned long *)(src+res); + if (has_zero(c, &data, &constants)) { + data = prep_zero_mask(c, data, &constants); + data = create_zero_mask(data); + *(unsigned long *)(dest+res) = c & zero_bytemask(data); + return res + find_zero(data); + } + *(unsigned long *)(dest+res) = c; + res += sizeof(unsigned long); + count -= sizeof(unsigned long); + max -= sizeof(unsigned long); + } + + while (count) { + char c; + + c = src[res]; + dest[res] = c; + if (!c) + return res; + res++; + count--; + } + + /* Hit buffer length without finding a NUL; force NUL-termination. */ + if (res) + dest[res-1] = '\0'; + + return -E2BIG; +} +EXPORT_SYMBOL(strscpy); +#endif + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 2df8ddcb0ca0..619984fc07ec 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -480,6 +480,10 @@ static void cgwb_release_workfn(struct work_struct *work) release_work); struct backing_dev_info *bdi = wb->bdi; + spin_lock_irq(&cgwb_lock); + list_del_rcu(&wb->bdi_node); + spin_unlock_irq(&cgwb_lock); + wb_shutdown(wb); css_put(wb->memcg_css); @@ -575,6 +579,7 @@ static int cgwb_create(struct backing_dev_info *bdi, ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb); if (!ret) { atomic_inc(&bdi->usage_cnt); + list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list); list_add(&wb->memcg_node, memcg_cgwb_list); list_add(&wb->blkcg_node, blkcg_cgwb_list); css_get(memcg_css); @@ -676,7 +681,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi) static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { struct radix_tree_iter iter; - struct bdi_writeback_congested *congested, *congested_n; + struct rb_node *rbn; void **slot; WARN_ON(test_bit(WB_registered, &bdi->wb.state)); @@ -686,9 +691,11 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) cgwb_kill(*slot); - rbtree_postorder_for_each_entry_safe(congested, congested_n, - &bdi->cgwb_congested_tree, rb_node) { - rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree); + while ((rbn = rb_first(&bdi->cgwb_congested_tree))) { + struct bdi_writeback_congested *congested = + rb_entry(rbn, struct bdi_writeback_congested, rb_node); + + rb_erase(rbn, &bdi->cgwb_congested_tree); congested->bdi = NULL; /* mark @congested unlinked */ } @@ -764,15 +771,22 @@ static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } int bdi_init(struct backing_dev_info *bdi) { + int ret; + bdi->dev = NULL; bdi->min_ratio = 0; bdi->max_ratio = 100; bdi->max_prop_frac = FPROP_FRAC_BASE; INIT_LIST_HEAD(&bdi->bdi_list); + INIT_LIST_HEAD(&bdi->wb_list); init_waitqueue_head(&bdi->wb_waitq); - return cgwb_bdi_init(bdi); + ret = cgwb_bdi_init(bdi); + + list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list); + + return ret; } EXPORT_SYMBOL(bdi_init); @@ -823,7 +837,7 @@ static void bdi_remove_from_list(struct backing_dev_info *bdi) synchronize_rcu_expedited(); } -void bdi_destroy(struct backing_dev_info *bdi) +void bdi_unregister(struct backing_dev_info *bdi) { /* make sure nobody finds us on the bdi_list anymore */ bdi_remove_from_list(bdi); @@ -835,9 +849,19 @@ void bdi_destroy(struct backing_dev_info *bdi) device_unregister(bdi->dev); bdi->dev = NULL; } +} +void bdi_exit(struct backing_dev_info *bdi) +{ + WARN_ON_ONCE(bdi->dev); wb_exit(&bdi->wb); } + +void bdi_destroy(struct backing_dev_info *bdi) +{ + bdi_unregister(bdi); + bdi_exit(bdi); +} EXPORT_SYMBOL(bdi_destroy); /* diff --git a/mm/cma.c b/mm/cma.c index e7d1db533025..4eb56badf37e 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -361,7 +361,7 @@ err: * This function allocates part of contiguous memory on specific * contiguous memory area. */ -struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) +struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) { unsigned long mask, offset, pfn, start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; @@ -371,7 +371,7 @@ struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align) if (!cma || !cma->count) return NULL; - pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, + pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, count, align); if (!count) diff --git a/mm/dmapool.c b/mm/dmapool.c index 71a8998cd03a..312a716fa14c 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -394,7 +394,7 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) list_for_each_entry(page, &pool->page_list, page_list) { if (dma < page->dma) continue; - if (dma < (page->dma + pool->allocation)) + if ((dma - page->dma) < pool->allocation) return page; } return NULL; diff --git a/mm/filemap.c b/mm/filemap.c index 72940fb38666..327910c2400c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2473,6 +2473,26 @@ ssize_t generic_perform_write(struct file *file, iov_iter_count(i)); again: + /* + * Bring in the user page that we will copy from _first_. + * Otherwise there's a nasty deadlock on copying from the + * same page as we're writing to, without it being marked + * up-to-date. + * + * Not only is this an optimisation, but it is also required + * to check that the address is actually valid, when atomic + * usercopies are used, below. + */ + if (unlikely(iov_iter_fault_in_readable(i, bytes))) { + status = -EFAULT; + break; + } + + if (fatal_signal_pending(current)) { + status = -EINTR; + break; + } + status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status < 0)) @@ -2480,17 +2500,8 @@ again: if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - /* - * 'page' is now locked. If we are trying to copy from a - * mapping of 'page' in userspace, the copy might fault and - * would need PageUptodate() to complete. But, page can not be - * made Uptodate without acquiring the page lock, which we hold. - * Deadlock. Avoid with pagefault_disable(). Fix up below with - * iov_iter_fault_in_readable(). - */ - pagefault_disable(); + copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); - pagefault_enable(); flush_dcache_page(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, @@ -2513,24 +2524,12 @@ again: */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); - /* - * This is the fallback to recover if the copy from - * userspace above faults. - */ - if (unlikely(iov_iter_fault_in_readable(i, bytes))) { - status = -EFAULT; - break; - } goto again; } pos += copied; written += copied; balance_dirty_pages_ratelimited(mapping); - if (fatal_signal_pending(current)) { - status = -EINTR; - break; - } } while (iov_iter_count(i)); return written ? written : status; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4b06b8db9df2..bbac913f96bc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2206,7 +2206,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { pte_t pteval = *_pte; - if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { + if (pte_none(pteval) || (pte_present(pteval) && + is_zero_pfn(pte_pfn(pteval)))) { if (!userfaultfd_armed(vma) && ++none_or_zero <= khugepaged_max_ptes_none) continue; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 999fb0aef8f1..9cc773483624 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3201,6 +3201,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, if (iter_vma == vma) continue; + /* + * Shared VMAs have their own reserves and do not affect + * MAP_PRIVATE accounting but it is possible that a shared + * VMA is using the same page so check and skip such VMAs. + */ + if (iter_vma->vm_flags & VM_MAYSHARE) + continue; + /* * Unmap the page from other VMAs without their own reserves. * They get marked to be SIGKILLed if they fault in these diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ddaeba34e09..c57c4423c688 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -644,12 +644,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) } /* + * Return page count for single (non recursive) @memcg. + * * Implementation Note: reading percpu statistics for memcg. * * Both of vmstat[] and percpu_counter has threshold and do periodic * synchronization to implement "quick" read. There are trade-off between * reading cost and precision of value. Then, we may have a chance to implement - * a periodic synchronizion of counter in memcg's counter. + * a periodic synchronization of counter in memcg's counter. * * But this _read() function is used for user interface now. The user accounts * memory usage by memory cgroup and he _always_ requires exact value because @@ -659,17 +661,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz) * * If there are kernel internal actions which can make use of some not-exact * value, and reading all cpu value can be performance bottleneck in some - * common workload, threashold and synchonization as vmstat[] should be + * common workload, threshold and synchronization as vmstat[] should be * implemented. */ -static long mem_cgroup_read_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx) +static unsigned long +mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { long val = 0; int cpu; + /* Per-cpu values can be negative, use a signed accumulator */ for_each_possible_cpu(cpu) val += per_cpu(memcg->stat->count[idx], cpu); + /* + * Summing races with updates, so val may be negative. Avoid exposing + * transient negative values. + */ + if (val < 0) + val = 0; return val; } @@ -1254,7 +1263,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i], + pr_cont(" %s:%luKB", mem_cgroup_stat_names[i], K(mem_cgroup_read_stat(iter, i))); } @@ -2819,14 +2828,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx) { struct mem_cgroup *iter; - long val = 0; + unsigned long val = 0; - /* Per-cpu values can be negative, use a signed accumulator */ for_each_mem_cgroup_tree(iter, memcg) val += mem_cgroup_read_stat(iter, idx); - if (val < 0) /* race ? */ - val = 0; return val; } @@ -3169,7 +3175,7 @@ static int memcg_stat_show(struct seq_file *m, void *v) for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; - seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i], + seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i], mem_cgroup_read_stat(memcg, i) * PAGE_SIZE); } @@ -3194,13 +3200,13 @@ static int memcg_stat_show(struct seq_file *m, void *v) (u64)memsw * PAGE_SIZE); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { - long long val = 0; + unsigned long long val = 0; if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account) continue; for_each_mem_cgroup_tree(mi, memcg) val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE; - seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val); + seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val); } for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) { @@ -3381,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, ret = page_counter_memparse(args, "-1", &threshold); if (ret) return ret; + threshold <<= PAGE_SHIFT; mutex_lock(&memcg->thresholds_lock); @@ -3734,44 +3741,43 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb) /** * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg * @wb: bdi_writeback in question - * @pavail: out parameter for number of available pages + * @pfilepages: out parameter for number of file pages + * @pheadroom: out parameter for number of allocatable pages according to memcg * @pdirty: out parameter for number of dirty pages * @pwriteback: out parameter for number of pages under writeback * - * Determine the numbers of available, dirty, and writeback pages in @wb's - * memcg. Dirty and writeback are self-explanatory. Available is a bit - * more involved. + * Determine the numbers of file, headroom, dirty, and writeback pages in + * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom + * is a bit more involved. * - * A memcg's headroom is "min(max, high) - used". The available memory is - * calculated as the lowest headroom of itself and the ancestors plus the - * number of pages already being used for file pages. Note that this - * doesn't consider the actual amount of available memory in the system. - * The caller should further cap *@pavail accordingly. + * A memcg's headroom is "min(max, high) - used". In the hierarchy, the + * headroom is calculated as the lowest headroom of itself and the + * ancestors. Note that this doesn't consider the actual amount of + * available memory in the system. The caller should further cap + * *@pheadroom accordingly. */ -void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pavail, - unsigned long *pdirty, unsigned long *pwriteback) +void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, + unsigned long *pheadroom, unsigned long *pdirty, + unsigned long *pwriteback) { struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); struct mem_cgroup *parent; - unsigned long head_room = PAGE_COUNTER_MAX; - unsigned long file_pages; *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY); /* this should eventually include NR_UNSTABLE_NFS */ *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); + *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | + (1 << LRU_ACTIVE_FILE)); + *pheadroom = PAGE_COUNTER_MAX; - file_pages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) | - (1 << LRU_ACTIVE_FILE)); while ((parent = parent_mem_cgroup(memcg))) { unsigned long ceiling = min(memcg->memory.limit, memcg->high); unsigned long used = page_counter_read(&memcg->memory); - head_room = min(head_room, ceiling - min(ceiling, used)); + *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); memcg = parent; } - - *pavail = file_pages + head_room; } #else /* CONFIG_CGROUP_WRITEBACK */ @@ -4179,7 +4185,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void) if (memcg_wb_domain_init(memcg, GFP_KERNEL)) goto out_free_stat; - spin_lock_init(&memcg->pcp_counter_lock); return memcg; out_free_stat: diff --git a/mm/memory.c b/mm/memory.c index 9cb27470fee9..deb679c31f2a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping, if (details.last_index < details.first_index) details.last_index = ULONG_MAX; + + /* DAX uses i_mmap_lock to serialise file truncate vs page fault */ i_mmap_lock_write(mapping); if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) unmap_mapping_range_tree(&mapping->i_mmap, &details); diff --git a/mm/migrate.c b/mm/migrate.c index 7452a00bbb50..842ecd7aaf7f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -740,6 +740,15 @@ static int move_to_new_page(struct page *newpage, struct page *page, if (PageSwapBacked(page)) SetPageSwapBacked(newpage); + /* + * Indirectly called below, migrate_page_copy() copies PG_dirty and thus + * needs newpage's memcg set to transfer memcg dirty page accounting. + * So perform memcg migration in two steps: + * 1. set newpage->mem_cgroup (here) + * 2. clear page->mem_cgroup (below) + */ + set_page_memcg(newpage, page_memcg(page)); + mapping = page_mapping(page); if (!mapping) rc = migrate_page(mapping, newpage, page, mode); @@ -756,9 +765,10 @@ static int move_to_new_page(struct page *newpage, struct page *page, rc = fallback_migrate_page(mapping, newpage, page, mode); if (rc != MIGRATEPAGE_SUCCESS) { + set_page_memcg(newpage, NULL); newpage->mapping = NULL; } else { - mem_cgroup_migrate(page, newpage, false); + set_page_memcg(page, NULL); if (page_was_mapped) remove_migration_ptes(page, newpage); page->mapping = NULL; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0a931cdd4f6b..2c90357c34ea 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -145,9 +145,6 @@ struct dirty_throttle_control { unsigned long pos_ratio; }; -#define DTC_INIT_COMMON(__wb) .wb = (__wb), \ - .wb_completions = &(__wb)->completions - /* * Length of period for aging writeout fractions of bdis. This is an * arbitrarily chosen number. The longer the period, the slower fractions will @@ -157,12 +154,16 @@ struct dirty_throttle_control { #ifdef CONFIG_CGROUP_WRITEBACK -#define GDTC_INIT(__wb) .dom = &global_wb_domain, \ - DTC_INIT_COMMON(__wb) +#define GDTC_INIT(__wb) .wb = (__wb), \ + .dom = &global_wb_domain, \ + .wb_completions = &(__wb)->completions + #define GDTC_INIT_NO_WB .dom = &global_wb_domain -#define MDTC_INIT(__wb, __gdtc) .dom = mem_cgroup_wb_domain(__wb), \ - .gdtc = __gdtc, \ - DTC_INIT_COMMON(__wb) + +#define MDTC_INIT(__wb, __gdtc) .wb = (__wb), \ + .dom = mem_cgroup_wb_domain(__wb), \ + .wb_completions = &(__wb)->memcg_completions, \ + .gdtc = __gdtc static bool mdtc_valid(struct dirty_throttle_control *dtc) { @@ -213,7 +214,8 @@ static void wb_min_max_ratio(struct bdi_writeback *wb, #else /* CONFIG_CGROUP_WRITEBACK */ -#define GDTC_INIT(__wb) DTC_INIT_COMMON(__wb) +#define GDTC_INIT(__wb) .wb = (__wb), \ + .wb_completions = &(__wb)->completions #define GDTC_INIT_NO_WB #define MDTC_INIT(__wb, __gdtc) @@ -682,13 +684,19 @@ static unsigned long hard_dirty_limit(struct wb_domain *dom, return max(thresh, dom->dirty_limit); } -/* memory available to a memcg domain is capped by system-wide clean memory */ -static void mdtc_cap_avail(struct dirty_throttle_control *mdtc) +/* + * Memory which can be further allocated to a memcg domain is capped by + * system-wide clean memory excluding the amount being used in the domain. + */ +static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, + unsigned long filepages, unsigned long headroom) { struct dirty_throttle_control *gdtc = mdtc_gdtc(mdtc); - unsigned long clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); + unsigned long clean = filepages - min(filepages, mdtc->dirty); + unsigned long global_clean = gdtc->avail - min(gdtc->avail, gdtc->dirty); + unsigned long other_clean = global_clean - min(global_clean, clean); - mdtc->avail = min(mdtc->avail, clean); + mdtc->avail = filepages + min(headroom, other_clean); } /** @@ -1562,16 +1570,16 @@ static void balance_dirty_pages(struct address_space *mapping, } if (mdtc) { - unsigned long writeback; + unsigned long filepages, headroom, writeback; /* * If @wb belongs to !root memcg, repeat the same * basic calculations for the memcg domain. */ - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, - &writeback); - mdtc_cap_avail(mdtc); + mem_cgroup_wb_stats(wb, &filepages, &headroom, + &mdtc->dirty, &writeback); mdtc->dirty += writeback; + mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); @@ -1893,10 +1901,11 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) return true; if (mdtc) { - unsigned long writeback; + unsigned long filepages, headroom, writeback; - mem_cgroup_wb_stats(wb, &mdtc->avail, &mdtc->dirty, &writeback); - mdtc_cap_avail(mdtc); + mem_cgroup_wb_stats(wb, &filepages, &headroom, &mdtc->dirty, + &writeback); + mdtc_calc_avail(mdtc, filepages, headroom); domain_dirty_limits(mdtc); /* ditto, ignore writeback */ if (mdtc->dirty > mdtc->bg_thresh) @@ -1956,7 +1965,6 @@ void laptop_mode_timer_fn(unsigned long data) int nr_pages = global_page_state(NR_FILE_DIRTY) + global_page_state(NR_UNSTABLE_NFS); struct bdi_writeback *wb; - struct wb_iter iter; /* * We want to write everything out, not just down to the dirty @@ -1965,10 +1973,12 @@ void laptop_mode_timer_fn(unsigned long data) if (!bdi_has_dirty_io(&q->backing_dev_info)) return; - bdi_for_each_wb(wb, &q->backing_dev_info, &iter, 0) + rcu_read_lock(); + list_for_each_entry_rcu(wb, &q->backing_dev_info.wb_list, bdi_node) if (wb_has_dirty_io(wb)) wb_start_writeback(wb, nr_pages, true, WB_REASON_LAPTOP_TIMER); + rcu_read_unlock(); } /* diff --git a/mm/readahead.c b/mm/readahead.c index 60cd846a9a44..24682f6f4cfd 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, while (!list_empty(pages)) { page = list_to_page(pages); list_del(&page->lru); - if (add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { read_cache_pages_invalidate_page(mapping, page); continue; } @@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_to_page(pages); list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (!add_to_page_cache_lru(page, mapping, page->index, + GFP_KERNEL & mapping_gfp_mask(mapping))) { mapping->a_ops->readpage(filp, page); } page_cache_release(page); diff --git a/mm/slab.c b/mm/slab.c index c77ebe6cc87c..4fcc5dd8d5a6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) size += BYTES_PER_WORD; } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) - if (size >= kmalloc_size(INDEX_NODE + 1) - && cachep->object_size > cache_line_size() - && ALIGN(size, cachep->align) < PAGE_SIZE) { + /* + * To activate debug pagealloc, off-slab management is necessary + * requirement. In early phase of initialization, small sized slab + * doesn't get initialized so it would not be possible. So, we need + * to check size >= 256. It guarantees that all necessary small + * sized slab is initialized in current slab initialization sequence. + */ + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) && + size >= 256 && cachep->object_size > cache_line_size() && + ALIGN(size, cachep->align) < PAGE_SIZE) { cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); size = PAGE_SIZE; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 4f5cd974e11a..fbf14485a049 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1363,15 +1363,16 @@ static cpumask_var_t cpu_stat_off; static void vmstat_update(struct work_struct *w) { - if (refresh_cpu_vm_stats()) + if (refresh_cpu_vm_stats()) { /* * Counters were updated so we expect more updates * to occur in the future. Keep on running the * update worker thread. */ - schedule_delayed_work(this_cpu_ptr(&vmstat_work), + schedule_delayed_work_on(smp_processor_id(), + this_cpu_ptr(&vmstat_work), round_jiffies_relative(sysctl_stat_interval)); - else { + } else { /* * We did not update any counters so the app may be in * a mode where it does not cause counter updates. diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b4548c739a64..2dda439c8cb8 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -91,10 +91,50 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn) * autoconnect action, remove them completely. If they are, just unmark * them as waiting for connection, by clearing explicit_connect field. */ - if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT) + params->explicit_connect = false; + + list_del_init(¶ms->action); + + switch (params->auto_connect) { + case HCI_AUTO_CONN_EXPLICIT: hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type); - else - params->explicit_connect = false; + /* return instead of break to avoid duplicate scan update */ + return; + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + list_add(¶ms->action, &conn->hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + list_add(¶ms->action, &conn->hdev->pend_le_reports); + break; + default: + break; + } + + hci_update_background_scan(conn->hdev); +} + +static void hci_conn_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) + hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); + + hci_chan_list_flush(conn); + + hci_conn_hash_del(hdev, conn); + + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); + + hci_conn_del_sysfs(conn); + + debugfs_remove_recursive(conn->debugfs); + + hci_dev_put(hdev); + + hci_conn_put(conn); } /* This function requires the caller holds hdev->lock */ @@ -102,8 +142,13 @@ static void hci_connect_le_scan_remove(struct hci_conn *conn) { hci_connect_le_scan_cleanup(conn); - hci_conn_hash_del(conn->hdev, conn); - hci_update_background_scan(conn->hdev); + /* We can't call hci_conn_del here since that would deadlock + * with trying to call cancel_delayed_work_sync(&conn->disc_work). + * Instead, call just hci_conn_cleanup() which contains the bare + * minimum cleanup operations needed for a connection in this + * state. + */ + hci_conn_cleanup(conn); } static void hci_acl_create_connection(struct hci_conn *conn) @@ -581,27 +626,17 @@ int hci_conn_del(struct hci_conn *conn) } } - hci_chan_list_flush(conn); - if (conn->amp_mgr) amp_mgr_put(conn->amp_mgr); - hci_conn_hash_del(hdev, conn); - if (hdev->notify) - hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); - skb_queue_purge(&conn->data_q); - hci_conn_del_sysfs(conn); - - debugfs_remove_recursive(conn->debugfs); - - if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) - hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); - - hci_dev_put(hdev); - - hci_conn_put(conn); + /* Remove the connection from the list and cleanup its remaining + * state. This is a separate function since for some cases like + * BT_CONNECT_SCAN we *only* want the cleanup part without the + * rest of hci_conn_del. + */ + hci_conn_cleanup(conn); return 0; } @@ -973,15 +1008,23 @@ static int hci_explicit_conn_params_set(struct hci_request *req, if (is_connected(hdev, addr, addr_type)) return -EISCONN; - params = hci_conn_params_add(hdev, addr, addr_type); - if (!params) - return -EIO; + params = hci_conn_params_lookup(hdev, addr, addr_type); + if (!params) { + params = hci_conn_params_add(hdev, addr, addr_type); + if (!params) + return -ENOMEM; - /* If we created new params, or existing params were marked as disabled, - * mark them to be used just once to connect. - */ - if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { + /* If we created new params, mark them to be deleted in + * hci_connect_le_scan_cleanup. It's different case than + * existing disabled params, those will stay after cleanup. + */ params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + } + + /* We're trying to connect, so make sure params are at pend_le_conns */ + if (params->auto_connect == HCI_AUTO_CONN_DISABLED || + params->auto_connect == HCI_AUTO_CONN_REPORT || + params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { list_del_init(¶ms->action); list_add(¶ms->action, &hdev->pend_le_conns); } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index adcbc74c2432..e837539452fb 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2861,13 +2861,6 @@ struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev, return param; } - list_for_each_entry(param, &hdev->pend_le_reports, action) { - if (bacmp(¶m->addr, addr) == 0 && - param->addr_type == addr_type && - param->explicit_connect) - return param; - } - return NULL; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 186041866315..bc31099d3b5b 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -55,7 +55,12 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) wake_up_bit(&hdev->flags, HCI_INQUIRY); hci_dev_lock(hdev); - hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + /* Set discovery state to stopped if we're not doing LE active + * scanning. + */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || + hdev->le_scan_type != LE_SCAN_ACTIVE) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); hci_dev_unlock(hdev); hci_conn_check_pending(hdev); @@ -4648,8 +4653,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, /* If we're not connectable only connect devices that we have in * our pend_le_conns list. */ - params = hci_explicit_connect_lookup(hdev, addr, addr_type); - + params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, + addr_type); if (!params) return NULL; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ccaf5a436d8f..c4fe2fee753f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3545,6 +3545,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, auth_type); } else { u8 addr_type; + struct hci_conn_params *p; /* Convert from L2CAP channel address type to HCI address type */ @@ -3562,7 +3563,10 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, * If connection parameters already exist, then they * will be kept and this function does nothing. */ - hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); + p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); + + if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) + p->auto_connect = HCI_AUTO_CONN_DISABLED; conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, sec_level, @@ -6117,14 +6121,21 @@ static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr, __hci_update_background_scan(req); break; case HCI_AUTO_CONN_REPORT: - list_add(¶ms->action, &hdev->pend_le_reports); + if (params->explicit_connect) + list_add(¶ms->action, &hdev->pend_le_conns); + else + list_add(¶ms->action, &hdev->pend_le_reports); __hci_update_background_scan(req); break; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: if (!is_connected(hdev, addr, addr_type)) { list_add(¶ms->action, &hdev->pend_le_conns); - __hci_update_background_scan(req); + /* If we are in scan phase of connecting, we were + * already added to pend_le_conns and scanning. + */ + if (params->auto_connect != HCI_AUTO_CONN_EXPLICIT) + __hci_update_background_scan(req); } break; } @@ -6379,7 +6390,8 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - if (params->auto_connect == HCI_AUTO_CONN_DISABLED) { + if (params->auto_connect == HCI_AUTO_CONN_DISABLED || + params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS); mgmt_pending_remove(cmd); @@ -6415,6 +6427,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev, if (p->auto_connect == HCI_AUTO_CONN_DISABLED) continue; device_removed(sk, hdev, &p->addr, p->addr_type); + if (p->explicit_connect) { + p->auto_connect = HCI_AUTO_CONN_EXPLICIT; + continue; + } list_del(&p->action); list_del(&p->list); kfree(p); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 80b94e37c94a..f79ccac6699f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -285,6 +285,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req, switch (op->op) { case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: ceph_osd_data_release(&op->extent.osd_data); break; case CEPH_OSD_OP_CALL: @@ -485,13 +486,14 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && - opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE); + opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO && + opcode != CEPH_OSD_OP_TRUNCATE); op->extent.offset = offset; op->extent.length = length; op->extent.truncate_size = truncate_size; op->extent.truncate_seq = truncate_seq; - if (opcode == CEPH_OSD_OP_WRITE) + if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL) payload_len += length; op->payload_len = payload_len; @@ -670,9 +672,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, break; case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: + case CEPH_OSD_OP_WRITEFULL: case CEPH_OSD_OP_ZERO: case CEPH_OSD_OP_TRUNCATE: - if (src->op == CEPH_OSD_OP_WRITE) + if (src->op == CEPH_OSD_OP_WRITE || + src->op == CEPH_OSD_OP_WRITEFULL) request_data_len = src->extent.length; dst->extent.offset = cpu_to_le64(src->extent.offset); dst->extent.length = cpu_to_le64(src->extent.length); @@ -681,7 +685,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); osd_data = &src->extent.osd_data; - if (src->op == CEPH_OSD_OP_WRITE) + if (src->op == CEPH_OSD_OP_WRITE || + src->op == CEPH_OSD_OP_WRITEFULL) ceph_osdc_msg_data_add(req->r_request, osd_data); else ceph_osdc_msg_data_add(req->r_reply, osd_data); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index b495ab1797fa..29edf74846fc 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1284,7 +1284,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) gstrings.len = ret; - data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER); if (!data) return -ENOMEM; diff --git a/net/core/filter.c b/net/core/filter.c index 05a04ea87172..bb18c3680001 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1415,6 +1415,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) return dev_forward_skb(dev, skb2); skb2->dev = dev; + skb_sender_cpu_clear(skb2); return dev_queue_xmit(skb2); } @@ -1854,9 +1855,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, goto out; /* We're copying the filter that has been originally attached, - * so no conversion/decode needed anymore. + * so no conversion/decode needed anymore. eBPF programs that + * have no original program cannot be dumped through this. */ + ret = -EACCES; fprog = filter->prog->orig_prog; + if (!fprog) + goto out; ret = fprog->len; if (!len) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 805a95a48107..830f8a7c1cb1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -31,7 +31,6 @@ static const char fmt_hex[] = "%#x\n"; static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; -static const char fmt_udec[] = "%u\n"; static const char fmt_ulong[] = "%lu\n"; static const char fmt_u64[] = "%llu\n"; @@ -202,7 +201,7 @@ static ssize_t speed_show(struct device *dev, if (netif_running(netdev)) { struct ethtool_cmd cmd; if (!__ethtool_get_settings(netdev, &cmd)) - ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd)); + ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd)); } rtnl_unlock(); return ret; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dad4dd37e2aa..fab4599ba8b2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags); */ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) { + unsigned char *data = skb->data; + BUG_ON(len > skb->len); - skb->len -= len; - BUG_ON(skb->len < skb->data_len); - skb_postpull_rcsum(skb, skb->data, len); - return skb->data += len; + __skb_pull(skb, len); + skb_postpull_rcsum(skb, data, len); + return skb->data; } EXPORT_SYMBOL_GPL(skb_pull_rcsum); diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index c59fa5d9c22c..adb5325f4934 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "dsa_priv.h" char dsa_driver_version[] = "0.1"; @@ -305,7 +306,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) if (ret < 0) goto out; - ds->slave_mii_bus = mdiobus_alloc(); + ds->slave_mii_bus = devm_mdiobus_alloc(parent); if (ds->slave_mii_bus == NULL) { ret = -ENOMEM; goto out; @@ -314,7 +315,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) ret = mdiobus_register(ds->slave_mii_bus); if (ret < 0) - goto out_free; + goto out; /* @@ -367,10 +368,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent) return ret; -out_free: - mdiobus_free(ds->slave_mii_bus); out: - kfree(ds); return ret; } @@ -400,7 +398,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, /* * Allocate and initialise switch state. */ - ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL); + ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL); if (ds == NULL) return ERR_PTR(-ENOMEM); @@ -420,10 +418,47 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, static void dsa_switch_destroy(struct dsa_switch *ds) { + struct device_node *port_dn; + struct phy_device *phydev; + struct dsa_chip_data *cd = ds->pd; + int port; + #ifdef CONFIG_NET_DSA_HWMON if (ds->hwmon_dev) hwmon_device_unregister(ds->hwmon_dev); #endif + + /* Disable configuration of the CPU and DSA ports */ + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) + continue; + + port_dn = cd->port_dn[port]; + if (of_phy_is_fixed_link(port_dn)) { + phydev = of_phy_find_device(port_dn); + if (phydev) { + int addr = phydev->addr; + + phy_device_free(phydev); + of_node_put(port_dn); + fixed_phy_del(addr); + } + } + } + + /* Destroy network devices for physical switch ports. */ + for (port = 0; port < DSA_MAX_PORTS; port++) { + if (!(ds->phys_port_mask & (1 << port))) + continue; + + if (!ds->ports[port]) + continue; + + unregister_netdev(ds->ports[port]); + free_netdev(ds->ports[port]); + } + + mdiobus_unregister(ds->slave_mii_bus); } #ifdef CONFIG_PM_SLEEP @@ -802,10 +837,11 @@ static inline void dsa_of_remove(struct device *dev) } #endif -static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, - struct device *parent, struct dsa_platform_data *pd) +static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, + struct device *parent, struct dsa_platform_data *pd) { int i; + unsigned configured = 0; dst->pd = pd; dst->master_netdev = dev; @@ -825,8 +861,16 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, dst->ds[i] = ds; if (ds->drv->poll_link != NULL) dst->link_poll_needed = 1; + + ++configured; } + /* + * If no switch was found, exit cleanly + */ + if (!configured) + return -EPROBE_DEFER; + /* * If we use a tagging format that doesn't have an ethertype * field, make sure that all packets from this point on get @@ -843,6 +887,8 @@ static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev, dst->link_poll_timer.expires = round_jiffies(jiffies + HZ); add_timer(&dst->link_poll_timer); } + + return 0; } static int dsa_probe(struct platform_device *pdev) @@ -883,7 +929,7 @@ static int dsa_probe(struct platform_device *pdev) goto out; } - dst = kzalloc(sizeof(*dst), GFP_KERNEL); + dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL); if (dst == NULL) { dev_put(dev); ret = -ENOMEM; @@ -892,7 +938,9 @@ static int dsa_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dst); - dsa_setup_dst(dst, dev, &pdev->dev, pd); + ret = dsa_setup_dst(dst, dev, &pdev->dev, pd); + if (ret) + goto out; return 0; @@ -914,7 +962,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst) for (i = 0; i < dst->pd->nr_chips; i++) { struct dsa_switch *ds = dst->ds[i]; - if (ds != NULL) + if (ds) dsa_switch_destroy(ds); } } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cce97385f743..7d91f4612ac0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -458,12 +458,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state) static int dsa_slave_port_attr_set(struct net_device *dev, struct switchdev_attr *attr) { - int ret = 0; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + int ret; switch (attr->id) { case SWITCHDEV_ATTR_PORT_STP_STATE: - if (attr->trans == SWITCHDEV_TRANS_COMMIT) - ret = dsa_slave_stp_update(dev, attr->u.stp_state); + if (attr->trans == SWITCHDEV_TRANS_PREPARE) + ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP; + else + ret = ds->drv->port_stp_update(ds, p->port, + attr->u.stp_state); break; default: ret = -EOPNOTSUPP; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index f03db8b7abee..0c9c3482e419 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -312,7 +312,7 @@ static void arp_send_dst(int type, int ptype, __be32 dest_ip, if (!skb) return; - skb_dst_set(skb, dst); + skb_dst_set(skb, dst_clone(dst)); arp_xmit(skb); } @@ -384,7 +384,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb) } if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE)) - dst = dst_clone(skb_dst(skb)); + dst = skb_dst(skb); arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, dst_hw, dev->dev_addr, NULL, dst); } @@ -811,7 +811,7 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) } else { pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); - return 0; + goto out_free_dst; } goto out; } @@ -865,6 +865,8 @@ static int arp_process(struct sock *sk, struct sk_buff *skb) out: consume_skb(skb); +out_free_dst: + dst_release(reply_dst); return 0; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 6fcbd215cdbc..690bcbc59f26 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -340,6 +340,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_tun_key.tun_id = 0; + fl4.flowi4_flags = 0; no_addr = idev->ifa_list == NULL; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7bb9c39e0a4d..61b45a17fc73 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -577,21 +577,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack); static bool reqsk_queue_unlink(struct request_sock_queue *queue, struct request_sock *req) { - struct listen_sock *lopt = queue->listen_opt; struct request_sock **prev; + struct listen_sock *lopt; bool found = false; spin_lock(&queue->syn_wait_lock); - - for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; - prev = &(*prev)->dl_next) { - if (*prev == req) { - *prev = req->dl_next; - found = true; - break; + lopt = queue->listen_opt; + if (lopt) { + for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; + prev = &(*prev)->dl_next) { + if (*prev == req) { + *prev = req->dl_next; + found = true; + break; + } } } - spin_unlock(&queue->syn_wait_lock); if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) reqsk_put(req); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c6ad99ad0ffb..c81deb85acb4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1737,6 +1737,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.flowi4_mark = skb->mark; fl4.flowi4_tos = tos; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; + fl4.flowi4_flags = 0; fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res, 0); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 900113376d4e..36b85bd05ac8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3119,6 +3119,8 @@ static void addrconf_gre_config(struct net_device *dev) } addrconf_addr_gen(idev, true); + if (dev->flags & IFF_POINTOPOINT) + addrconf_add_mroute(dev); } #endif diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 92b1aa38f121..61d403ee1031 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb) if (skb->pkt_type != PACKET_HOST) goto drop; + if (unlikely(skb->sk)) + goto drop; + if (skb_warn_if_lro(skb)) goto drop; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index f204089e854c..968f31c01f89 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -142,6 +142,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) struct net_device *loopback_dev = net->loopback_dev; int cpu; + if (dev == loopback_dev) + return; + for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct rt6_info *rt; @@ -151,14 +154,12 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) struct inet6_dev *rt_idev = rt->rt6i_idev; struct net_device *rt_dev = rt->dst.dev; - if (rt_idev && (rt_idev->dev == dev || !dev) && - rt_idev->dev != loopback_dev) { + if (rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(loopback_dev); in6_dev_put(rt_idev); } - if (rt_dev && (rt_dev == dev || !dev) && - rt_dev != loopback_dev) { + if (rt_dev == dev) { rt->dst.dev = loopback_dev; dev_hold(rt->dst.dev); dev_put(rt_dev); @@ -247,12 +248,6 @@ static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, { } -static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst, - unsigned long old) -{ - return NULL; -} - static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, .destroy = ip6_dst_destroy, @@ -261,7 +256,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, .redirect = ip6_rt_blackhole_redirect, - .cow_metrics = ip6_rt_blackhole_cow_metrics, + .cow_metrics = dst_cow_metrics_generic, .neigh_lookup = ip6_neigh_lookup, }; @@ -318,6 +313,15 @@ static const struct rt6_info ip6_blk_hole_entry_template = { #endif +static void rt6_info_init(struct rt6_info *rt) +{ + struct dst_entry *dst = &rt->dst; + + memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); + INIT_LIST_HEAD(&rt->rt6i_siblings); + INIT_LIST_HEAD(&rt->rt6i_uncached); +} + /* allocate dst with ip6_dst_ops */ static struct rt6_info *__ip6_dst_alloc(struct net *net, struct net_device *dev, @@ -326,13 +330,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net, struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0, DST_OBSOLETE_FORCE_CHK, flags); - if (rt) { - struct dst_entry *dst = &rt->dst; + if (rt) + rt6_info_init(rt); - memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); - INIT_LIST_HEAD(&rt->rt6i_siblings); - INIT_LIST_HEAD(&rt->rt6i_uncached); - } return rt; } @@ -1193,7 +1193,8 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, fl6->flowi6_iif = LOOPBACK_IFINDEX; - if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr)) + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || + fl6->flowi6_oif) flags |= RT6_LOOKUP_F_IFACE; if (!ipv6_addr_any(&fl6->saddr)) @@ -1212,24 +1213,20 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0); if (rt) { - new = &rt->dst; - - memset(new + 1, 0, sizeof(*rt) - sizeof(*new)); + rt6_info_init(rt); + new = &rt->dst; new->__use = 1; new->input = dst_discard; new->output = dst_discard_sk; - if (dst_metrics_read_only(&ort->dst)) - new->_metrics = ort->dst._metrics; - else - dst_copy_metrics(new, &ort->dst); + dst_copy_metrics(new, &ort->dst); rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->rt6i_gateway = ort->rt6i_gateway; - rt->rt6i_flags = ort->rt6i_flags; + rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); @@ -2621,7 +2618,8 @@ void rt6_ifdown(struct net *net, struct net_device *dev) fib6_clean_all(net, fib6_ifdown, &adn); icmp6_clean_all(fib6_ifdown, &adn); - rt6_uncached_list_flush_dev(net, dev); + if (dev) + rt6_uncached_list_flush_dev(net, dev); } struct rt6_mtu_change_arg { diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 30caa289c5db..5cedfda4b241 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -37,6 +37,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; + fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); if (saddr) memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr)); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f6b090df3930..afca2eb4dfa7 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work) tunnel = container_of(work, struct l2tp_tunnel, del_work); sk = l2tp_tunnel_sock_lookup(tunnel); if (!sk) - return; + goto out; sock = sk->sk_socket; @@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work) } l2tp_tunnel_sock_put(sk); +out: + l2tp_tunnel_dec_refcount(tunnel); } /* Create a socket for the tunnel, if one isn't set up by @@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); */ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) { + l2tp_tunnel_inc_refcount(tunnel); l2tp_tunnel_closeall(tunnel); - return (false == queue_work(l2tp_wq, &tunnel->del_work)); + if (false == queue_work(l2tp_wq, &tunnel->del_work)) { + l2tp_tunnel_dec_refcount(tunnel); + return 1; + } + return 0; } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index ced6bf3be8d6..1560c8482bcb 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -149,7 +149,7 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { if (test_bit(i, local->hw.flags)) - pos += scnprintf(pos, end - pos, "%s", + pos += scnprintf(pos, end - pos, "%s\n", hw_flag_names[i]); } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 8ba583243509..3ed7ddfbf8e8 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -101,6 +101,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, * when it wakes up for the next time. */ set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT); + ieee80211_clear_fast_xmit(sta); /* * This code races in the following way: diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 84e0e8c7fb23..7892eb8ed4c8 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1218,8 +1218,10 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, if (!tx->sta) info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; - else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) + else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) { info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT; + ieee80211_check_fast_xmit(tx->sta); + } info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT; @@ -2451,7 +2453,8 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) if (test_sta_flag(sta, WLAN_STA_PS_STA) || test_sta_flag(sta, WLAN_STA_PS_DRIVER) || - test_sta_flag(sta, WLAN_STA_PS_DELIVER)) + test_sta_flag(sta, WLAN_STA_PS_DELIVER) || + test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT)) goto out; if (sdata->noack_map) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 8f060d7f9a0e..0a49a8c7c564 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2785,6 +2785,7 @@ static int netlink_dump(struct sock *sk) struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int len, err = -ENOBUFS; + int alloc_min_size; int alloc_size; mutex_lock(nlk->cb_mutex); @@ -2793,9 +2794,6 @@ static int netlink_dump(struct sock *sk) goto errout_skb; } - cb = &nlk->cb; - alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); - if (!netlink_rx_is_mmaped(sk) && atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto errout_skb; @@ -2805,23 +2803,35 @@ static int netlink_dump(struct sock *sk) * to reduce number of system calls on dump operations, if user * ever provided a big enough buffer. */ - if (alloc_size < nlk->max_recvmsg_len) { - skb = netlink_alloc_skb(sk, - nlk->max_recvmsg_len, - nlk->portid, + cb = &nlk->cb; + alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); + + if (alloc_min_size < nlk->max_recvmsg_len) { + alloc_size = nlk->max_recvmsg_len; + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); - /* available room should be exact amount to avoid MSG_TRUNC */ - if (skb) - skb_reserve(skb, skb_tailroom(skb) - - nlk->max_recvmsg_len); } - if (!skb) + if (!skb) { + alloc_size = alloc_min_size; skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL); + } if (!skb) goto errout_skb; + + /* Trim skb to allocated size. User is expected to provide buffer as + * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at + * netlink_recvmsg())). dump will pack as many smaller messages as + * could fit within the allocated skb. skb is typically allocated + * with larger space than required (could be as much as near 2x the + * requested size with align to next power of 2 approach). Allowing + * dump to use the excess space makes it difficult for a user to have a + * reasonable static buffer based on the expected largest dump of a + * single netdev. The outcome is MSG_TRUNC error. + */ + skb_reserve(skb, skb_tailroom(skb) - alloc_size); netlink_skb_set_owner_r(skb, sk); len = cb->dump(skb, cb); diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 315f5330b6e5..c6a39bf2c3b9 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -684,7 +684,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, { if (skb_network_offset(skb) > MAX_L2_LEN) { OVS_NLERR(1, "L2 header too long to fragment"); - return; + goto err; } if (ethertype == htons(ETH_P_IP)) { @@ -708,8 +708,7 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, struct rt6_info ovs_rt; if (!v6ops) { - kfree_skb(skb); - return; + goto err; } prepare_frag(vport, skb); @@ -728,8 +727,12 @@ static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru, WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.", ovs_vport_name(vport), ntohs(ethertype), mru, vport->dev->mtu); - kfree_skb(skb); + goto err; } + + return; +err: + kfree_skb(skb); } static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, @@ -968,7 +971,7 @@ static int execute_masked_set_action(struct sk_buff *skb, case OVS_KEY_ATTR_CT_STATE: case OVS_KEY_ATTR_CT_ZONE: case OVS_KEY_ATTR_CT_MARK: - case OVS_KEY_ATTR_CT_LABEL: + case OVS_KEY_ATTR_CT_LABELS: err = -EINVAL; break; } @@ -1099,6 +1102,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_CT: + if (!is_flow_key_valid(key)) { + err = ovs_flow_key_update(skb, key); + if (err) + return err; + } + err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key, nla_data(a)); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 002a755fa07e..80bf702715bb 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -37,9 +37,9 @@ struct md_mark { }; /* Metadata label for masked write to conntrack label. */ -struct md_label { - struct ovs_key_ct_label value; - struct ovs_key_ct_label mask; +struct md_labels { + struct ovs_key_ct_labels value; + struct ovs_key_ct_labels mask; }; /* Conntrack action context for execution. */ @@ -47,10 +47,10 @@ struct ovs_conntrack_info { struct nf_conntrack_helper *helper; struct nf_conntrack_zone zone; struct nf_conn *ct; - u32 flags; + u8 commit : 1; u16 family; struct md_mark mark; - struct md_label label; + struct md_labels labels; }; static u16 key_to_nfproto(const struct sw_flow_key *key) @@ -109,21 +109,21 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct) #endif } -static void ovs_ct_get_label(const struct nf_conn *ct, - struct ovs_key_ct_label *label) +static void ovs_ct_get_labels(const struct nf_conn *ct, + struct ovs_key_ct_labels *labels) { struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL; if (cl) { size_t len = cl->words * sizeof(long); - if (len > OVS_CT_LABEL_LEN) - len = OVS_CT_LABEL_LEN; - else if (len < OVS_CT_LABEL_LEN) - memset(label, 0, OVS_CT_LABEL_LEN); - memcpy(label, cl->bits, len); + if (len > OVS_CT_LABELS_LEN) + len = OVS_CT_LABELS_LEN; + else if (len < OVS_CT_LABELS_LEN) + memset(labels, 0, OVS_CT_LABELS_LEN); + memcpy(labels, cl->bits, len); } else { - memset(label, 0, OVS_CT_LABEL_LEN); + memset(labels, 0, OVS_CT_LABELS_LEN); } } @@ -134,7 +134,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, key->ct.state = state; key->ct.zone = zone->id; key->ct.mark = ovs_ct_get_mark(ct); - ovs_ct_get_label(ct, &key->ct.label); + ovs_ct_get_labels(ct, &key->ct.labels); } /* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has @@ -167,7 +167,7 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) { - if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state)) + if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && @@ -179,8 +179,8 @@ int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label), - &key->ct.label)) + nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels), + &key->ct.labels)) return -EMSGSIZE; return 0; @@ -213,9 +213,9 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key, #endif } -static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key, - const struct ovs_key_ct_label *label, - const struct ovs_key_ct_label *mask) +static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_ct_labels *labels, + const struct ovs_key_ct_labels *mask) { enum ip_conntrack_info ctinfo; struct nf_conn_labels *cl; @@ -235,15 +235,15 @@ static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key, nf_ct_labels_ext_add(ct); cl = nf_ct_labels_find(ct); } - if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN) + if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN) return -ENOSPC; - err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask, - OVS_CT_LABEL_LEN / sizeof(u32)); + err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask, + OVS_CT_LABELS_LEN / sizeof(u32)); if (err) return err; - ovs_ct_get_label(ct, &key->ct.label); + ovs_ct_get_labels(ct, &key->ct.labels); return 0; } @@ -465,12 +465,12 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, return 0; } -static bool label_nonzero(const struct ovs_key_ct_label *label) +static bool labels_nonzero(const struct ovs_key_ct_labels *labels) { size_t i; - for (i = 0; i < sizeof(*label); i++) - if (label->ct_label[i]) + for (i = 0; i < sizeof(*labels); i++) + if (labels->ct_labels[i]) return true; return false; @@ -493,7 +493,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, return err; } - if (info->flags & OVS_CT_F_COMMIT) + if (info->commit) err = ovs_ct_commit(net, key, info, skb); else err = ovs_ct_lookup(net, key, info, skb); @@ -506,9 +506,9 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, if (err) goto err; } - if (label_nonzero(&info->label.mask)) - err = ovs_ct_set_label(skb, key, &info->label.value, - &info->label.mask); + if (labels_nonzero(&info->labels.mask)) + err = ovs_ct_set_labels(skb, key, &info->labels.value, + &info->labels.mask); err: skb_push(skb, nh_ofs); return err; @@ -539,14 +539,13 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name, } static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = { - [OVS_CT_ATTR_FLAGS] = { .minlen = sizeof(u32), - .maxlen = sizeof(u32) }, + [OVS_CT_ATTR_COMMIT] = { .minlen = 0, .maxlen = 0 }, [OVS_CT_ATTR_ZONE] = { .minlen = sizeof(u16), .maxlen = sizeof(u16) }, [OVS_CT_ATTR_MARK] = { .minlen = sizeof(struct md_mark), .maxlen = sizeof(struct md_mark) }, - [OVS_CT_ATTR_LABEL] = { .minlen = sizeof(struct md_label), - .maxlen = sizeof(struct md_label) }, + [OVS_CT_ATTR_LABELS] = { .minlen = sizeof(struct md_labels), + .maxlen = sizeof(struct md_labels) }, [OVS_CT_ATTR_HELPER] = { .minlen = 1, .maxlen = NF_CT_HELPER_NAME_LEN } }; @@ -576,8 +575,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, } switch (type) { - case OVS_CT_ATTR_FLAGS: - info->flags = nla_get_u32(a); + case OVS_CT_ATTR_COMMIT: + info->commit = true; break; #ifdef CONFIG_NF_CONNTRACK_ZONES case OVS_CT_ATTR_ZONE: @@ -593,10 +592,10 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info, } #endif #ifdef CONFIG_NF_CONNTRACK_LABELS - case OVS_CT_ATTR_LABEL: { - struct md_label *label = nla_data(a); + case OVS_CT_ATTR_LABELS: { + struct md_labels *labels = nla_data(a); - info->label = *label; + info->labels = *labels; break; } #endif @@ -633,7 +632,7 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr) attr == OVS_KEY_ATTR_CT_MARK) return true; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - attr == OVS_KEY_ATTR_CT_LABEL) { + attr == OVS_KEY_ATTR_CT_LABELS) { struct ovs_net *ovs_net = net_generic(net, ovs_net_id); return ovs_net->xt_label; @@ -701,7 +700,7 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info, if (!start) return -EMSGSIZE; - if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags)) + if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id)) @@ -711,8 +710,8 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info, &ct_info->mark)) return -EMSGSIZE; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label), - &ct_info->label)) + nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels), + &ct_info->labels)) return -EMSGSIZE; if (ct_info->helper) { if (nla_put_string(skb, OVS_CT_ATTR_HELPER, @@ -737,7 +736,7 @@ void ovs_ct_free_action(const struct nlattr *a) void ovs_ct_init(struct net *net) { - unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE; + unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; struct ovs_net *ovs_net = net_generic(net, ovs_net_id); if (nf_connlabels_get(net, n_bits)) { diff --git a/net/openvswitch/conntrack.h b/net/openvswitch/conntrack.h index 43f5dd7a5577..da8714942c95 100644 --- a/net/openvswitch/conntrack.h +++ b/net/openvswitch/conntrack.h @@ -34,6 +34,13 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *, void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key); int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb); void ovs_ct_free_action(const struct nlattr *a); + +static inline bool ovs_ct_state_supported(u32 state) +{ + return !(state & ~(OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | + OVS_CS_F_RELATED | OVS_CS_F_REPLY_DIR | + OVS_CS_F_INVALID | OVS_CS_F_TRACKED)); +} #else #include @@ -46,6 +53,11 @@ static inline bool ovs_ct_verify(struct net *net, int attr) return false; } +static inline bool ovs_ct_state_supported(u32 state) +{ + return false; +} + static inline int ovs_ct_copy_action(struct net *net, const struct nlattr *nla, const struct sw_flow_key *key, struct sw_flow_actions **acts, bool log) @@ -72,7 +84,7 @@ static inline void ovs_ct_fill_key(const struct sk_buff *skb, key->ct.state = 0; key->ct.zone = 0; key->ct.mark = 0; - memset(&key->ct.label, 0, sizeof(key->ct.label)); + memset(&key->ct.labels, 0, sizeof(key->ct.labels)); } static inline int ovs_ct_put_key(const struct sw_flow_key *key, diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index fe527d2dd4b7..8cfa15a08668 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -116,7 +116,7 @@ struct sw_flow_key { u16 zone; u32 mark; u8 state; - struct ovs_key_ct_label label; + struct ovs_key_ct_labels labels; } ct; } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 5c030a4d7338..171a691f1c32 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -291,10 +291,10 @@ size_t ovs_key_attr_size(void) + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */ + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */ - + nla_total_size(1) /* OVS_KEY_ATTR_CT_STATE */ + + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */ + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */ + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */ - + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABEL */ + + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */ + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ @@ -349,10 +349,10 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED, .next = ovs_tunnel_key_lens, }, [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) }, - [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u8) }, + [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) }, [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) }, [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) }, - [OVS_KEY_ATTR_CT_LABEL] = { .len = sizeof(struct ovs_key_ct_label) }, + [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) }, }; static bool check_attr_len(unsigned int attr_len, unsigned int expected_len) @@ -814,7 +814,13 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) && ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) { - u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]); + u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]); + + if (!is_mask && !ovs_ct_state_supported(ct_state)) { + OVS_NLERR(log, "ct_state flags %08x unsupported", + ct_state); + return -EINVAL; + } SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE); @@ -833,14 +839,14 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match, SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask); *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK); } - if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) && - ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) { - const struct ovs_key_ct_label *cl; + if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) && + ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) { + const struct ovs_key_ct_labels *cl; - cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]); - SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label, + cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]); + SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels, sizeof(*cl), is_mask); - *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL); + *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS); } return 0; } @@ -1973,7 +1979,7 @@ static int validate_set(const struct nlattr *a, case OVS_KEY_ATTR_PRIORITY: case OVS_KEY_ATTR_SKB_MARK: case OVS_KEY_ATTR_CT_MARK: - case OVS_KEY_ATTR_CT_LABEL: + case OVS_KEY_ATTR_CT_LABELS: case OVS_KEY_ATTR_ETHERNET: break; diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index f2ea83ba4763..c7f74aab34b9 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -93,7 +93,8 @@ struct sw_flow *ovs_flow_alloc(void) /* Initialize the default stat node. */ stats = kmem_cache_alloc_node(flow_stats_cache, - GFP_KERNEL | __GFP_ZERO, 0); + GFP_KERNEL | __GFP_ZERO, + node_online(0) ? 0 : NUMA_NO_NODE); if (!stats) goto err; diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index dc81dc619aa2..12a36ac21eda 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -280,35 +280,19 @@ void ovs_vport_del(struct vport *vport) */ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats) { - struct net_device *dev = vport->dev; - int i; - - memset(stats, 0, sizeof(*stats)); - stats->rx_errors = dev->stats.rx_errors; - stats->tx_errors = dev->stats.tx_errors; - stats->tx_dropped = dev->stats.tx_dropped; - stats->rx_dropped = dev->stats.rx_dropped; - - stats->rx_dropped += atomic_long_read(&dev->rx_dropped); - stats->tx_dropped += atomic_long_read(&dev->tx_dropped); - - for_each_possible_cpu(i) { - const struct pcpu_sw_netstats *percpu_stats; - struct pcpu_sw_netstats local_stats; - unsigned int start; - - percpu_stats = per_cpu_ptr(dev->tstats, i); - - do { - start = u64_stats_fetch_begin_irq(&percpu_stats->syncp); - local_stats = *percpu_stats; - } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start)); - - stats->rx_bytes += local_stats.rx_bytes; - stats->rx_packets += local_stats.rx_packets; - stats->tx_bytes += local_stats.tx_bytes; - stats->tx_packets += local_stats.tx_packets; - } + const struct rtnl_link_stats64 *dev_stats; + struct rtnl_link_stats64 temp; + + dev_stats = dev_get_stats(vport->dev, &temp); + stats->rx_errors = dev_stats->rx_errors; + stats->tx_errors = dev_stats->tx_errors; + stats->tx_dropped = dev_stats->tx_dropped; + stats->rx_dropped = dev_stats->rx_dropped; + + stats->rx_bytes = dev_stats->rx_bytes; + stats->rx_packets = dev_stats->rx_packets; + stats->tx_bytes = dev_stats->tx_bytes; + stats->tx_packets = dev_stats->tx_packets; } /** @@ -460,6 +444,15 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb, OVS_CB(skb)->input_vport = vport; OVS_CB(skb)->mru = 0; + if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) { + u32 mark; + + mark = skb->mark; + skb_scrub_packet(skb, true); + skb->mark = mark; + tun_info = NULL; + } + /* Extract flow from 'skb' into 'key'. */ error = ovs_flow_key_extract(tun_info, skb, &key); if (unlikely(error)) { diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 2d1be4a760fd..32fcdecdb9e2 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -31,13 +31,17 @@ #define MIRRED_TAB_MASK 7 static LIST_HEAD(mirred_list); +static DEFINE_SPINLOCK(mirred_list_lock); static void tcf_mirred_release(struct tc_action *a, int bind) { struct tcf_mirred *m = to_mirred(a); struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1); + /* We could be called either in a RCU callback or with RTNL lock held. */ + spin_lock_bh(&mirred_list_lock); list_del(&m->tcfm_list); + spin_unlock_bh(&mirred_list_lock); if (dev) dev_put(dev); } @@ -103,10 +107,10 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, } else { if (bind) return 0; - if (!ovr) { - tcf_hash_release(a, bind); + + tcf_hash_release(a, bind); + if (!ovr) return -EEXIST; - } } m = to_mirred(a); @@ -123,7 +127,9 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla, } if (ret == ACT_P_CREATED) { + spin_lock_bh(&mirred_list_lock); list_add(&m->tcfm_list, &mirred_list); + spin_unlock_bh(&mirred_list_lock); tcf_hash_insert(a); } @@ -173,6 +179,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, skb2->skb_iif = skb->dev->ifindex; skb2->dev = dev; + skb_sender_cpu_clear(skb2); err = dev_queue_xmit(skb2); if (err) { @@ -221,7 +228,8 @@ static int mirred_device_event(struct notifier_block *unused, struct tcf_mirred *m; ASSERT_RTNL(); - if (event == NETDEV_UNREGISTER) + if (event == NETDEV_UNREGISTER) { + spin_lock_bh(&mirred_list_lock); list_for_each_entry(m, &mirred_list, tcfm_list) { if (rcu_access_pointer(m->tcfm_dev) == dev) { dev_put(dev); @@ -231,6 +239,8 @@ static int mirred_device_event(struct notifier_block *unused, RCU_INIT_POINTER(m->tcfm_dev, NULL); } } + spin_unlock_bh(&mirred_list_lock); + } return NOTIFY_DONE; } diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 9d15cb6b8cb1..86b04e31e60b 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -368,6 +368,15 @@ static unsigned int hhf_drop(struct Qdisc *sch) return bucket - q->buckets; } +static unsigned int hhf_qdisc_drop(struct Qdisc *sch) +{ + unsigned int prev_backlog; + + prev_backlog = sch->qstats.backlog; + hhf_drop(sch); + return prev_backlog - sch->qstats.backlog; +} + static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct hhf_sched_data *q = qdisc_priv(sch); @@ -696,7 +705,7 @@ static struct Qdisc_ops hhf_qdisc_ops __read_mostly = { .enqueue = hhf_enqueue, .dequeue = hhf_dequeue, .peek = qdisc_peek_dequeued, - .drop = hhf_drop, + .drop = hhf_qdisc_drop, .init = hhf_init, .reset = hhf_reset, .destroy = hhf_destroy, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 197c3f59ecbf..b00f1f9611d6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1208,20 +1208,22 @@ void sctp_assoc_update(struct sctp_association *asoc, * within this document. * * Our basic strategy is to round-robin transports in priorities - * according to sctp_state_prio_map[] e.g., if no such + * according to sctp_trans_score() e.g., if no such * transport with state SCTP_ACTIVE exists, round-robin through * SCTP_UNKNOWN, etc. You get the picture. */ -static const u8 sctp_trans_state_to_prio_map[] = { - [SCTP_ACTIVE] = 3, /* best case */ - [SCTP_UNKNOWN] = 2, - [SCTP_PF] = 1, - [SCTP_INACTIVE] = 0, /* worst case */ -}; - static u8 sctp_trans_score(const struct sctp_transport *trans) { - return sctp_trans_state_to_prio_map[trans->state]; + switch (trans->state) { + case SCTP_ACTIVE: + return 3; /* best case */ + case SCTP_UNKNOWN: + return 2; + case SCTP_PF: + return 1; + default: /* case SCTP_INACTIVE */ + return 0; /* worst case */ + } } static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 35df1266bf07..6098d4c42fa9 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer) int error; struct sctp_transport *transport = (struct sctp_transport *) peer; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); /* Check whether a task is in the sock. */ - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer) transport, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -285,11 +286,12 @@ out_unlock: static void sctp_generate_timeout_event(struct sctp_association *asoc, sctp_event_timeout_t timeout_type) { - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); int error = 0; - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy: timer %d\n", __func__, timeout_type); @@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc, (void *)timeout_type, GFP_ATOMIC); if (error) - asoc->base.sk->sk_err = -error; + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } @@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data) int error = 0; struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -388,11 +391,11 @@ void sctp_generate_heartbeat_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); - if (error) - asoc->base.sk->sk_err = -error; + if (error) + sk->sk_err = -error; out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_transport_put(transport); } @@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data) { struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; - struct net *net = sock_net(asoc->base.sk); + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); - bh_lock_sock(asoc->base.sk); - if (sock_owned_by_user(asoc->base.sk)) { + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { pr_debug("%s: sock is busy\n", __func__); /* Try again later. */ @@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data) asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); out_unlock: - bh_unlock_sock(asoc->base.sk); + bh_unlock_sock(sk); sctp_association_put(asoc); } diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index cb25c89da623..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -39,25 +39,6 @@ static int fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; - struct ib_mr *mr; - - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - } else { - mr = ib_get_dma_mr(ia->ri_pd, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(mr)) { - pr_err("%s: ib_get_dma_mr for failed with %lX\n", - __func__, PTR_ERR(mr)); - return -ENOMEM; - } - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; - ia->ri_dma_mr = mr; - } - return 0; } diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d6653f5d0830..5318951b3b53 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -189,11 +189,6 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct ib_device_attr *devattr = &ia->ri_devattr; int depth, delta; - /* Obtain an lkey to use for the regbufs, which are - * protected from remote access. - */ - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - ia->ri_max_frmr_depth = min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS, devattr->max_fast_reg_page_list_len); diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index 72cf8b15bbb4..617b76f22154 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c @@ -23,7 +23,6 @@ static int physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, struct rpcrdma_create_data_internal *cdata) { - struct ib_device_attr *devattr = &ia->ri_devattr; struct ib_mr *mr; /* Obtain an rkey to use for RPC data payloads. @@ -37,15 +36,8 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep, __func__, PTR_ERR(mr)); return -ENOMEM; } - ia->ri_dma_mr = mr; - - /* Obtain an lkey to use for regbufs. - */ - if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) - ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; - else - ia->ri_dma_lkey = ia->ri_dma_mr->lkey; + ia->ri_dma_mr = mr; return 0; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index cb5174284074..f0c3ff67ca98 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, ctxt->direction = DMA_FROM_DEVICE; ctxt->read_hdr = head; pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, + rs_length); for (pno = 0; pno < pages_needed; pno++) { int len = min_t(int, rs_length, PAGE_SIZE - pg_off); @@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt, ctxt->direction = DMA_FROM_DEVICE; ctxt->frmr = frmr; pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, + rs_length); frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); frmr->direction = DMA_FROM_DEVICE; @@ -531,7 +533,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp, rqstp->rq_arg.page_base = head->arg.page_base; /* rq_respages starts after the last arg page */ - rqstp->rq_respages = &rqstp->rq_arg.pages[page_no]; + rqstp->rq_respages = &rqstp->rq_pages[page_no]; rqstp->rq_next_page = rqstp->rq_respages + 1; /* Rebuild rq_arg head and tail. */ diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 64443eb754ad..41e452bc580c 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt) xprt_clear_connected(xprt); - rpcrdma_buffer_destroy(&r_xprt->rx_buf); rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); + rpcrdma_buffer_destroy(&r_xprt->rx_buf); rpcrdma_ia_close(&r_xprt->rx_ia); xprt_rdma_free_addresses(xprt); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 682996779970..5502d4dade74 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -543,11 +543,8 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) } if (memreg == RPCRDMA_FRMR) { - /* Requires both frmr reg and local dma lkey */ - if (((devattr->device_cap_flags & - (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) != - (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) || - (devattr->max_fast_reg_page_list_len == 0)) { + if (!(devattr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) || + (devattr->max_fast_reg_page_list_len == 0)) { dprintk("RPC: %s: FRMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_MTHCAFMR; @@ -557,6 +554,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) if (!ia->ri_device->alloc_fmr) { dprintk("RPC: %s: MTHCAFMR registration " "not supported by HCA\n", __func__); + rc = -EINVAL; goto out3; } } @@ -755,19 +753,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) cancel_delayed_work_sync(&ep->rep_connect_worker); - if (ia->ri_id->qp) { + if (ia->ri_id->qp) rpcrdma_ep_disconnect(ep, ia); + + rpcrdma_clean_cq(ep->rep_attr.recv_cq); + rpcrdma_clean_cq(ep->rep_attr.send_cq); + + if (ia->ri_id->qp) { rdma_destroy_qp(ia->ri_id); ia->ri_id->qp = NULL; } - rpcrdma_clean_cq(ep->rep_attr.recv_cq); rc = ib_destroy_cq(ep->rep_attr.recv_cq); if (rc) dprintk("RPC: %s: ib_destroy_cq returned %i\n", __func__, rc); - rpcrdma_clean_cq(ep->rep_attr.send_cq); rc = ib_destroy_cq(ep->rep_attr.send_cq); if (rc) dprintk("RPC: %s: ib_destroy_cq returned %i\n", @@ -1252,7 +1253,7 @@ rpcrdma_alloc_regbuf(struct rpcrdma_ia *ia, size_t size, gfp_t flags) goto out_free; iov->length = size; - iov->lkey = ia->ri_dma_lkey; + iov->lkey = ia->ri_pd->local_dma_lkey; rb->rg_size = size; rb->rg_owner = NULL; return rb; diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 02512221b8bc..c09414e6f91b 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -65,7 +65,6 @@ struct rpcrdma_ia { struct rdma_cm_id *ri_id; struct ib_pd *ri_pd; struct ib_mr *ri_dma_mr; - u32 ri_dma_lkey; struct completion ri_done; int ri_async_rc; unsigned int ri_max_frmr_depth; diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index fda38f830a10..77f5d17e2612 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -634,6 +635,8 @@ static int switchdev_port_br_afspec(struct net_device *dev, if (nla_len(attr) != sizeof(struct bridge_vlan_info)) return -EINVAL; vinfo = nla_data(attr); + if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) + return -EINVAL; vlan->flags = vinfo->flags; if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { if (vlan->vid_begin) diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a82c5848d4bc..5351a3f97e8e 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -357,7 +357,7 @@ static inline u32 msg_importance(struct tipc_msg *m) if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m))) return usr; if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)) - return msg_bits(m, 5, 13, 0x7); + return msg_bits(m, 9, 0, 0x7); return TIPC_SYSTEM_IMPORTANCE; } @@ -366,7 +366,7 @@ static inline void msg_set_importance(struct tipc_msg *m, u32 i) int usr = msg_user(m); if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))) - msg_set_bits(m, 5, 13, 0x7, i); + msg_set_bits(m, 9, 0, 0x7, i); else if (i < TIPC_SYSTEM_IMPORTANCE) msg_set_user(m, i); else diff --git a/net/tipc/node.c b/net/tipc/node.c index 703875fd6cde..2c32a83037a3 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1116,7 +1116,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, } /* Ignore duplicate packets */ - if (less(oseqno, rcv_nxt)) + if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) return true; /* Initiate or update failover mode if applicable */ @@ -1146,8 +1146,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, if (!pl || !tipc_link_is_up(pl)) return true; - /* Initiate or update synch mode if applicable */ - if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) { + /* Initiate synch mode if applicable */ + if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) { syncpt = iseqno + exp_pkts - 1; if (!tipc_link_is_up(l)) { tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..94f658235fb4 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2064,6 +2064,11 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) goto out; } + if (flags & MSG_PEEK) + skip = sk_peek_offset(sk, flags); + else + skip = 0; + do { int chunk; struct sk_buff *skb, *last; @@ -2112,7 +2117,6 @@ unlock: break; } - skip = sk_peek_offset(sk, flags); while (skip >= unix_skb_len(skb)) { skip -= unix_skb_len(skb); last = skb; @@ -2181,6 +2185,17 @@ unlock: sk_peek_offset_fwd(sk, chunk); + if (UNIXCB(skb).fp) + break; + + skip = 0; + last = skb; + last_len = skb->len; + unix_state_lock(sk); + skb = skb_peek_next(skb, &sk->sk_receive_queue); + if (skb) + goto again; + unix_state_unlock(sk); break; } } while (size); diff --git a/samples/kprobes/jprobe_example.c b/samples/kprobes/jprobe_example.c index 9119ac6a8270..c285a3b8a9f1 100644 --- a/samples/kprobes/jprobe_example.c +++ b/samples/kprobes/jprobe_example.c @@ -1,13 +1,13 @@ /* * Here's a sample kernel module showing the use of jprobes to dump - * the arguments of do_fork(). + * the arguments of _do_fork(). * * For more information on theory of operation of jprobes, see * Documentation/kprobes.txt * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the - * console whenever do_fork() is invoked to create a new process. + * console whenever _do_fork() is invoked to create a new process. * (Some messages may be suppressed if syslogd is configured to * eliminate duplicate messages.) */ @@ -17,13 +17,13 @@ #include /* - * Jumper probe for do_fork. + * Jumper probe for _do_fork. * Mirror principle enables access to arguments of the probed routine * from the probe handler. */ -/* Proxy routine having the same arguments as actual do_fork() routine */ -static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, +/* Proxy routine having the same arguments as actual _do_fork() routine */ +static long j_do_fork(unsigned long clone_flags, unsigned long stack_start, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { @@ -36,9 +36,9 @@ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, } static struct jprobe my_jprobe = { - .entry = jdo_fork, + .entry = j_do_fork, .kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }, }; diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index 366db1a9fb65..727eb21c9c56 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c @@ -1,13 +1,13 @@ /* * NOTE: This example is works on x86 and powerpc. * Here's a sample kernel module showing the use of kprobes to dump a - * stack trace and selected registers when do_fork() is called. + * stack trace and selected registers when _do_fork() is called. * * For more information on theory of operation of kprobes, see * Documentation/kprobes.txt * * You will see the trace data in /var/log/messages and on the console - * whenever do_fork() is invoked to create a new process. + * whenever _do_fork() is invoked to create a new process. */ #include @@ -16,7 +16,7 @@ /* For each probe you need to allocate a kprobe structure */ static struct kprobe kp = { - .symbol_name = "do_fork", + .symbol_name = "_do_fork", }; /* kprobe pre_handler: called just before the probed instruction is executed */ diff --git a/samples/kprobes/kretprobe_example.c b/samples/kprobes/kretprobe_example.c index 1041b6731598..ebb1d1aed547 100644 --- a/samples/kprobes/kretprobe_example.c +++ b/samples/kprobes/kretprobe_example.c @@ -7,7 +7,7 @@ * * usage: insmod kretprobe_example.ko func= * - * If no func_name is specified, do_fork is instrumented + * If no func_name is specified, _do_fork is instrumented * * For more information on theory of operation of kretprobes, see * Documentation/kprobes.txt @@ -25,7 +25,7 @@ #include #include -static char func_name[NAME_MAX] = "do_fork"; +static char func_name[NAME_MAX] = "_do_fork"; module_param_string(func, func_name, NAME_MAX, S_IRUGO); MODULE_PARM_DESC(func, "Function to kretprobe; this module will report the" " function's execution time"); diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c index 6ce5945a0b89..b071bf476fea 100644 --- a/scripts/extract-cert.c +++ b/scripts/extract-cert.c @@ -17,13 +17,9 @@ #include #include #include -#include #include -#include #include -#include #include -#include #include #include diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 0cd46e129920..b967e4f9fed2 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -115,7 +115,7 @@ esac BUILD_DEBUG="$(grep -s '^CONFIG_DEBUG_INFO=y' $KCONFIG_CONFIG || true)" # Setup the directory structure -rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" +rm -rf "$tmpdir" "$fwdir" "$kernel_headers_dir" "$libc_headers_dir" "$dbg_dir" $objtree/debian/files mkdir -m 755 -p "$tmpdir/DEBIAN" mkdir -p "$tmpdir/lib" "$tmpdir/boot" mkdir -p "$fwdir/lib/firmware/$version/" @@ -408,7 +408,7 @@ binary-arch: \$(MAKE) KDEB_SOURCENAME=${sourcename} KDEB_PKGVERSION=${packageversion} bindeb-pkg clean: - rm -rf debian/*tmp + rm -rf debian/*tmp debian/files mv debian/ debian.backup # debian/ might be cleaned away \$(MAKE) clean mv debian.backup debian diff --git a/scripts/sign-file.c b/scripts/sign-file.c index c3899ca4811c..250a7a645033 100755 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -20,13 +20,34 @@ #include #include #include +#include #include #include #include -#include #include #include +/* + * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to + * assume that it's not available and its header file is missing and that we + * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts + * the options we have on specifying the X.509 certificate we want. + * + * Further, older versions of OpenSSL don't support manually adding signers to + * the PKCS#7 message so have to accept that we get a certificate included in + * the signature message. Nor do such older versions of OpenSSL support + * signing with anything other than SHA1 - so we're stuck with that if such is + * the case. + */ +#if OPENSSL_VERSION_NUMBER < 0x10000000L +#define USE_PKCS7 +#endif +#ifndef USE_PKCS7 +#include +#else +#include +#endif + struct module_signature { uint8_t algo; /* Public-key crypto algorithm [0] */ uint8_t hash; /* Digest algorithm [0] */ @@ -110,30 +131,42 @@ int main(int argc, char **argv) struct module_signature sig_info = { .id_type = PKEY_ID_PKCS7 }; char *hash_algo = NULL; char *private_key_name, *x509_name, *module_name, *dest_name; - bool save_cms = false, replace_orig; + bool save_sig = false, replace_orig; bool sign_only = false; unsigned char buf[4096]; - unsigned long module_size, cms_size; - unsigned int use_keyid = 0, use_signed_attrs = CMS_NOATTR; + unsigned long module_size, sig_size; + unsigned int use_signed_attrs; const EVP_MD *digest_algo; EVP_PKEY *private_key; +#ifndef USE_PKCS7 CMS_ContentInfo *cms; + unsigned int use_keyid = 0; +#else + PKCS7 *pkcs7; +#endif X509 *x509; BIO *b, *bd = NULL, *bm; int opt, n; - OpenSSL_add_all_algorithms(); ERR_load_crypto_strings(); ERR_clear_error(); key_pass = getenv("KBUILD_SIGN_PIN"); +#ifndef USE_PKCS7 + use_signed_attrs = CMS_NOATTR; +#else + use_signed_attrs = PKCS7_NOATTR; +#endif + do { opt = getopt(argc, argv, "dpk"); switch (opt) { - case 'p': save_cms = true; break; - case 'd': sign_only = true; save_cms = true; break; + case 'p': save_sig = true; break; + case 'd': sign_only = true; save_sig = true; break; +#ifndef USE_PKCS7 case 'k': use_keyid = CMS_USE_KEYID; break; +#endif case -1: break; default: format(); } @@ -157,6 +190,14 @@ int main(int argc, char **argv) replace_orig = true; } +#ifdef USE_PKCS7 + if (strcmp(hash_algo, "sha1") != 0) { + fprintf(stderr, "sign-file: %s only supports SHA1 signing\n", + OPENSSL_VERSION_TEXT); + exit(3); + } +#endif + /* Read the private key and the X.509 cert the PKCS#7 message * will point to. */ @@ -213,7 +254,8 @@ int main(int argc, char **argv) bm = BIO_new_file(module_name, "rb"); ERR(!bm, "%s", module_name); - /* Load the CMS message from the digest buffer. */ +#ifndef USE_PKCS7 + /* Load the signature message from the digest buffer. */ cms = CMS_sign(NULL, NULL, NULL, NULL, CMS_NOCERTS | CMS_PARTIAL | CMS_BINARY | CMS_DETACHED | CMS_STREAM); ERR(!cms, "CMS_sign"); @@ -221,17 +263,31 @@ int main(int argc, char **argv) ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, CMS_NOCERTS | CMS_BINARY | CMS_NOSMIMECAP | use_keyid | use_signed_attrs), - "CMS_sign_add_signer"); + "CMS_add1_signer"); ERR(CMS_final(cms, bm, NULL, CMS_NOCERTS | CMS_BINARY) < 0, "CMS_final"); - if (save_cms) { - char *cms_name; +#else + pkcs7 = PKCS7_sign(x509, private_key, NULL, bm, + PKCS7_NOCERTS | PKCS7_BINARY | + PKCS7_DETACHED | use_signed_attrs); + ERR(!pkcs7, "PKCS7_sign"); +#endif - ERR(asprintf(&cms_name, "%s.p7s", module_name) < 0, "asprintf"); - b = BIO_new_file(cms_name, "wb"); - ERR(!b, "%s", cms_name); - ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, "%s", cms_name); + if (save_sig) { + char *sig_file_name; + + ERR(asprintf(&sig_file_name, "%s.p7s", module_name) < 0, + "asprintf"); + b = BIO_new_file(sig_file_name, "wb"); + ERR(!b, "%s", sig_file_name); +#ifndef USE_PKCS7 + ERR(i2d_CMS_bio_stream(b, cms, NULL, 0) < 0, + "%s", sig_file_name); +#else + ERR(i2d_PKCS7_bio(b, pkcs7) < 0, + "%s", sig_file_name); +#endif BIO_free(b); } @@ -247,9 +303,13 @@ int main(int argc, char **argv) ERR(n < 0, "%s", module_name); module_size = BIO_number_written(bd); +#ifndef USE_PKCS7 ERR(i2d_CMS_bio_stream(bd, cms, NULL, 0) < 0, "%s", dest_name); - cms_size = BIO_number_written(bd) - module_size; - sig_info.sig_len = htonl(cms_size); +#else + ERR(i2d_PKCS7_bio(bd, pkcs7) < 0, "%s", dest_name); +#endif + sig_size = BIO_number_written(bd) - module_size; + sig_info.sig_len = htonl(sig_size); ERR(BIO_write(bd, &sig_info, sizeof(sig_info)) < 0, "%s", dest_name); ERR(BIO_write(bd, magic_number, sizeof(magic_number) - 1) < 0, "%s", dest_name); diff --git a/security/keys/gc.c b/security/keys/gc.c index c7952375ac53..addf060399e0 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -134,6 +134,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys) kdebug("- %u", key->serial); key_check(key); + /* Throw away the key data if the key is instantiated */ + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && + !test_bit(KEY_FLAG_NEGATIVE, &key->flags) && + key->type->destroy) + key->type->destroy(key); + security_key_free(key); /* deal with the user's key tracking and quota */ @@ -148,10 +154,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys) if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); - /* now throw away the key memory */ - if (key->type->destroy) - key->type->destroy(key); - key_user_put(key->user); kfree(key->description); diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 486ef6fa393b..0d6253124278 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -440,6 +440,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx, kenter(""); + if (ctx->index_key.type == &key_type_keyring) + return ERR_PTR(-EPERM); + user = key_user_lookup(current_fsuid()); if (!user) return ERR_PTR(-ENOMEM); diff --git a/sound/hda/ext/hdac_ext_bus.c b/sound/hda/ext/hdac_ext_bus.c index 4449d1a99089..2433f7c81472 100644 --- a/sound/hda/ext/hdac_ext_bus.c +++ b/sound/hda/ext/hdac_ext_bus.c @@ -19,6 +19,7 @@ #include #include +#include #include MODULE_DESCRIPTION("HDA extended core"); diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 37f43a1b34ef..a249d5486889 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3367,10 +3367,8 @@ int snd_hda_codec_build_pcms(struct hda_codec *codec) int dev, err; err = snd_hda_codec_parse_pcms(codec); - if (err < 0) { - snd_hda_codec_reset(codec); + if (err < 0) return err; - } /* attach a new PCM streams */ list_for_each_entry(cpcm, &codec->pcm_list_head, list) { diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 584a0343ab0c..85813de26da8 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), + SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), {} /* terminator */ }; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index ca03c40609fc..2f0ec7c45fc7 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -819,6 +819,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD), + SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index afec6dc9f91f..16b8dcba5c12 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5306,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9d947aef2c8b..def5cc8dff02 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec) return err; spec = codec->spec; - codec->power_save_node = 1; + /* enable power_save_node only for new 92HD89xx chips, as it causes + * click noises on old 92HD73xx chips. + */ + if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670) + codec->power_save_node = 1; spec->linear_tone_beep = 0; spec->gen.mixer_nid = 0x1d; spec->have_spdif_mux = 1; diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c index 58c3164802b8..8c907ebea189 100644 --- a/sound/soc/au1x/db1200.c +++ b/sound/soc/au1x/db1200.c @@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = { .cpu_dai_name = "au1xpsc_i2s.2", .platform_name = "au1xpsc-pcm.2", .codec_name = "wm8731.0-001b", + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBM_CFM, .ops = &db1200_i2s_wm8731_ops, }; @@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = { .cpu_dai_name = "au1xpsc_i2s.3", .platform_name = "au1xpsc-pcm.3", .codec_name = "wm8731.0-001b", + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | + SND_SOC_DAIFMT_CBM_CFM, .ops = &db1200_i2s_wm8731_ops, }; diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c index 3c2f0f8d6266..f823eb502367 100644 --- a/sound/soc/codecs/rt298.c +++ b/sound/soc/codecs/rt298.c @@ -50,24 +50,24 @@ struct rt298_priv { }; static struct reg_default rt298_index_def[] = { - { 0x01, 0xaaaa }, - { 0x02, 0x8aaa }, + { 0x01, 0xa5a8 }, + { 0x02, 0x8e95 }, { 0x03, 0x0002 }, - { 0x04, 0xaf01 }, - { 0x08, 0x000d }, - { 0x09, 0xd810 }, - { 0x0a, 0x0120 }, + { 0x04, 0xaf67 }, + { 0x08, 0x200f }, + { 0x09, 0xd010 }, + { 0x0a, 0x0100 }, { 0x0b, 0x0000 }, { 0x0d, 0x2800 }, - { 0x0f, 0x0000 }, - { 0x19, 0x0a17 }, + { 0x0f, 0x0022 }, + { 0x19, 0x0217 }, { 0x20, 0x0020 }, { 0x33, 0x0208 }, { 0x46, 0x0300 }, - { 0x49, 0x0004 }, - { 0x4f, 0x50e9 }, - { 0x50, 0x2000 }, - { 0x63, 0x2902 }, + { 0x49, 0x4004 }, + { 0x4f, 0x50c9 }, + { 0x50, 0x3000 }, + { 0x63, 0x1b02 }, { 0x67, 0x1111 }, { 0x68, 0x1016 }, { 0x69, 0x273f }, @@ -1214,7 +1214,7 @@ static int rt298_i2c_probe(struct i2c_client *i2c, mdelay(10); if (!rt298->pdata.gpio2_en) - regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x4000); + regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0x40); else regmap_write(rt298->regmap, RT298_SET_DMIC2_DEFAULT, 0); diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 268a28bd1df4..5c101af0ac63 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -519,11 +519,11 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = { RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), /* ADC Boost Volume Control */ - SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1, + SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1, RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0, adc_bst_tlv), - SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1, - RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0, + SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2, + RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0, adc_bst_tlv), /* I2S2 function select */ diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h index 0e4cfc6ac649..8c964cfb120d 100644 --- a/sound/soc/codecs/rt5645.h +++ b/sound/soc/codecs/rt5645.h @@ -39,8 +39,8 @@ #define RT5645_STO1_ADC_DIG_VOL 0x1c #define RT5645_MONO_ADC_DIG_VOL 0x1d #define RT5645_ADC_BST_VOL1 0x1e -/* Mixer - D-D */ #define RT5645_ADC_BST_VOL2 0x20 +/* Mixer - D-D */ #define RT5645_STO1_ADC_MIXER 0x27 #define RT5645_MONO_ADC_MIXER 0x28 #define RT5645_AD_DA_MIXER 0x29 @@ -315,12 +315,14 @@ #define RT5645_STO1_ADC_R_BST_SFT 12 #define RT5645_STO1_ADC_COMP_MASK (0x3 << 10) #define RT5645_STO1_ADC_COMP_SFT 10 -#define RT5645_STO2_ADC_L_BST_MASK (0x3 << 8) -#define RT5645_STO2_ADC_L_BST_SFT 8 -#define RT5645_STO2_ADC_R_BST_MASK (0x3 << 6) -#define RT5645_STO2_ADC_R_BST_SFT 6 -#define RT5645_STO2_ADC_COMP_MASK (0x3 << 4) -#define RT5645_STO2_ADC_COMP_SFT 4 + +/* ADC Boost Volume Control (0x20) */ +#define RT5645_MONO_ADC_L_BST_MASK (0x3 << 14) +#define RT5645_MONO_ADC_L_BST_SFT 14 +#define RT5645_MONO_ADC_R_BST_MASK (0x3 << 12) +#define RT5645_MONO_ADC_R_BST_SFT 12 +#define RT5645_MONO_ADC_COMP_MASK (0x3 << 10) +#define RT5645_MONO_ADC_COMP_SFT 10 /* Stereo2 ADC Mixer Control (0x26) */ #define RT5645_STO2_ADC_SRC_MASK (0x1 << 15) diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index bfda25ef0dd4..f540f82b1f27 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1376,8 +1376,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT); snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL, - SGTL5000_BIAS_R_MASK, - sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT); + SGTL5000_BIAS_VOLT_MASK, + sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT); /* * disable DAP * TODO: @@ -1549,7 +1549,7 @@ static int sgtl5000_i2c_probe(struct i2c_client *client, else { sgtl5000->micbias_voltage = 0; dev_err(&client->dev, - "Unsuitable MicBias resistor\n"); + "Unsuitable MicBias voltage\n"); } } else { sgtl5000->micbias_voltage = 0; diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c index e3a0bca28bcf..cc1d3981fa4b 100644 --- a/sound/soc/codecs/tas2552.c +++ b/sound/soc/codecs/tas2552.c @@ -549,7 +549,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = { /* * DAC digital volumes. From -7 to 24 dB in 1 dB steps */ -static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0); +static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0); static const char * const tas2552_din_source_select[] = { "Muted", diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c index 1a82b19b2644..8739126a1f6f 100644 --- a/sound/soc/codecs/tlv320aic3x.c +++ b/sound/soc/codecs/tlv320aic3x.c @@ -1509,14 +1509,17 @@ static int aic3x_init(struct snd_soc_codec *codec) snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL); snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL); - /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ - snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); - /* Line2 Line Out default volume, disconnect from Output Mixer */ - snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); - snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); + /* On tlv320aic3104, these registers are reserved and must not be written */ + if (aic3x->model != AIC3X_MODEL_3104) { + /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ + snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); + /* Line2 Line Out default volume, disconnect from Output Mixer */ + snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); + snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); + } switch (aic3x->model) { case AIC3X_MODEL_3X: diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 293e47a6ff59..39ebd7bf4f53 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -3760,7 +3760,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8962, &wm8962_dai, 1); if (ret < 0) - goto err_enable; + goto err_pm_runtime; regcache_cache_only(wm8962->regmap, true); @@ -3769,6 +3769,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c, return 0; +err_pm_runtime: + pm_runtime_disable(&i2c->dev); err_enable: regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); err: @@ -3778,6 +3780,7 @@ err: static int wm8962_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); + pm_runtime_disable(&client->dev); return 0; } @@ -3805,6 +3808,8 @@ static int wm8962_runtime_resume(struct device *dev) wm8962_reset(wm8962); + regcache_mark_dirty(wm8962->regmap); + /* SYSCLK defaults to on; make sure it is off so we can safely * write to registers if the device is declocked. */ diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c index a3e97b46b64e..ba34252b7bba 100644 --- a/sound/soc/dwc/designware_i2s.c +++ b/sound/soc/dwc/designware_i2s.c @@ -131,23 +131,32 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream) if (stream == SNDRV_PCM_STREAM_PLAYBACK) { for (i = 0; i < 4; i++) - i2s_write_reg(dev->i2s_base, TOR(i), 0); + i2s_read_reg(dev->i2s_base, TOR(i)); } else { for (i = 0; i < 4; i++) - i2s_write_reg(dev->i2s_base, ROR(i), 0); + i2s_read_reg(dev->i2s_base, ROR(i)); } } static void i2s_start(struct dw_i2s_dev *dev, struct snd_pcm_substream *substream) { - + u32 i, irq; i2s_write_reg(dev->i2s_base, IER, 1); - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + for (i = 0; i < 4; i++) { + irq = i2s_read_reg(dev->i2s_base, IMR(i)); + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30); + } i2s_write_reg(dev->i2s_base, ITER, 1); - else + } else { + for (i = 0; i < 4; i++) { + irq = i2s_read_reg(dev->i2s_base, IMR(i)); + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03); + } i2s_write_reg(dev->i2s_base, IRER, 1); + } i2s_write_reg(dev->i2s_base, CER, 1); } diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 48b2d24dd1f0..b95132e2f9dc 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c @@ -95,7 +95,8 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* data on rising edge of bclk, frame low 1clk before data */ - strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI | + SSI_STCR_TEFS; scr |= SSI_SCR_NET; if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { scr &= ~SSI_I2S_MODE_MASK; @@ -104,33 +105,31 @@ static int imx_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) break; case SND_SOC_DAIFMT_LEFT_J: /* data on rising edge of bclk, frame high with data */ - strcr |= SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_DSP_B: /* data on rising edge of bclk, frame high with data */ - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL; break; case SND_SOC_DAIFMT_DSP_A: /* data on rising edge of bclk, frame high 1clk before data */ - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS; + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL | + SSI_STCR_TEFS; break; } /* DAI clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_IB_IF: - strcr |= SSI_STCR_TFSI; - strcr &= ~SSI_STCR_TSCKP; + strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI; break; case SND_SOC_DAIFMT_IB_NF: - strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI); + strcr ^= SSI_STCR_TSCKP; break; case SND_SOC_DAIFMT_NB_IF: - strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP; + strcr ^= SSI_STCR_TFSI; break; case SND_SOC_DAIFMT_NB_NF: - strcr &= ~SSI_STCR_TFSI; - strcr |= SSI_STCR_TSCKP; break; } diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 100d92b5b77e..05977ae1ff2a 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -206,6 +206,34 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol, } EXPORT_SYMBOL_GPL(snd_soc_info_volsw); +/** + * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls + * @kcontrol: mixer control + * @uinfo: control element information + * + * Callback to provide information about a single mixer control, or a double + * mixer control that spans 2 registers of the SX TLV type. SX TLV controls + * have a range that represents both positive and negative values either side + * of zero but without a sign bit. + * + * Returns 0 for success. + */ +int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_info *uinfo) +{ + struct soc_mixer_control *mc = + (struct soc_mixer_control *)kcontrol->private_value; + + snd_soc_info_volsw(kcontrol, uinfo); + /* Max represents the number of levels in an SX control not the + * maximum value, so add the minimum value back on + */ + uinfo->value.integer.max += mc->min; + + return 0; +} +EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx); + /** * snd_soc_get_volsw - single mixer get callback * @kcontrol: mixer control diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c index 82e350e9501c..ac75816ada7c 100644 --- a/sound/synth/emux/emux_oss.c +++ b/sound/synth/emux/emux_oss.c @@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu) struct snd_seq_oss_reg *arg; struct snd_seq_device *dev; - if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS, + /* using device#1 here for avoiding conflicts with OPL3 */ + if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS, sizeof(struct snd_seq_oss_reg), &dev) < 0) return; diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 2975632d51e2..c8fe6d177119 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -41,6 +41,7 @@ FEATURE_TESTS ?= \ libelf-getphdrnum \ libelf-mmap \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libpython-version \ @@ -51,7 +52,8 @@ FEATURE_TESTS ?= \ timerfd \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid FEATURE_DISPLAY ?= \ dwarf \ @@ -61,13 +63,15 @@ FEATURE_DISPLAY ?= \ libbfd \ libelf \ libnuma \ + numa_num_possible_cpus \ libperl \ libpython \ libslang \ libunwind \ libdw-dwarf-unwind \ zlib \ - lzma + lzma \ + get_cpuid # Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features. # If in the future we need per-feature checks/flags for features not diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 74ca42093d70..e43a2971bf56 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -19,6 +19,7 @@ FILES= \ test-libelf-getphdrnum.bin \ test-libelf-mmap.bin \ test-libnuma.bin \ + test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ test-libpython-version.bin \ @@ -34,7 +35,8 @@ FILES= \ test-compile-x32.bin \ test-zlib.bin \ test-lzma.bin \ - test-bpf.bin + test-bpf.bin \ + test-get_cpuid.bin CC := $(CROSS_COMPILE)gcc -MD PKG_CONFIG := $(CROSS_COMPILE)pkg-config @@ -87,6 +89,9 @@ test-libelf-getphdrnum.bin: test-libnuma.bin: $(BUILD) -lnuma +test-numa_num_possible_cpus.bin: + $(BUILD) -lnuma + test-libunwind.bin: $(BUILD) -lelf @@ -162,6 +167,9 @@ test-zlib.bin: test-lzma.bin: $(BUILD) -llzma +test-get_cpuid.bin: + $(BUILD) + test-bpf.bin: $(BUILD) diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 84689a67814a..33cf6f20bd4e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -77,6 +77,10 @@ # include "test-libnuma.c" #undef main +#define main main_test_numa_num_possible_cpus +# include "test-numa_num_possible_cpus.c" +#undef main + #define main main_test_timerfd # include "test-timerfd.c" #undef main @@ -117,6 +121,10 @@ # include "test-lzma.c" #undef main +#define main main_test_get_cpuid +# include "test-get_cpuid.c" +#undef main + int main(int argc, char *argv[]) { main_test_libpython(); @@ -136,6 +144,7 @@ int main(int argc, char *argv[]) main_test_libbfd(); main_test_backtrace(); main_test_libnuma(); + main_test_numa_num_possible_cpus(); main_test_timerfd(); main_test_stackprotector_all(); main_test_libdw_dwarf_unwind(); @@ -143,6 +152,7 @@ int main(int argc, char *argv[]) main_test_zlib(); main_test_pthread_attr_setaffinity_np(); main_test_lzma(); + main_test_get_cpuid(); return 0; } diff --git a/tools/build/feature/test-get_cpuid.c b/tools/build/feature/test-get_cpuid.c new file mode 100644 index 000000000000..d7a2c407130d --- /dev/null +++ b/tools/build/feature/test-get_cpuid.c @@ -0,0 +1,7 @@ +#include + +int main(void) +{ + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; + return __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); +} diff --git a/tools/build/feature/test-numa_num_possible_cpus.c b/tools/build/feature/test-numa_num_possible_cpus.c new file mode 100644 index 000000000000..2606e94b0659 --- /dev/null +++ b/tools/build/feature/test-numa_num_possible_cpus.c @@ -0,0 +1,6 @@ +#include + +int main(void) +{ + return numa_num_possible_cpus(); +} diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index 4d885934b919..cf42b090477b 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c @@ -3795,7 +3795,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, struct format_field *field; struct printk_map *printk; long long val, fval; - unsigned long addr; + unsigned long long addr; char *str; unsigned char *hex; int print; @@ -3828,13 +3828,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, */ if (!(field->flags & FIELD_IS_ARRAY) && field->size == pevent->long_size) { - addr = *(unsigned long *)(data + field->offset); + + /* Handle heterogeneous recording and processing + * architectures + * + * CASE I: + * Traces recorded on 32-bit devices (32-bit + * addressing) and processed on 64-bit devices: + * In this case, only 32 bits should be read. + * + * CASE II: + * Traces recorded on 64 bit devices and processed + * on 32-bit devices: + * In this case, 64 bits must be read. + */ + addr = (pevent->long_size == 8) ? + *(unsigned long long *)(data + field->offset) : + (unsigned long long)*(unsigned int *)(data + field->offset); + /* Check if it matches a print format */ printk = find_printk(pevent, addr); if (printk) trace_seq_puts(s, printk->printk); else - trace_seq_printf(s, "%lx", addr); + trace_seq_printf(s, "%llx", addr); break; } str = malloc(len + 1); diff --git a/tools/perf/Documentation/intel-pt.txt b/tools/perf/Documentation/intel-pt.txt index 4a0501d7a3b4..c94c9de3173e 100644 --- a/tools/perf/Documentation/intel-pt.txt +++ b/tools/perf/Documentation/intel-pt.txt @@ -364,21 +364,6 @@ cyc_thresh Specifies how frequently CYC packets are produced - see cyc CYC packets are not requested by default. -no_force_psb This is a driver option and is not in the IA32_RTIT_CTL MSR. - - It stops the driver resetting the byte count to zero whenever - enabling the trace (for example on context switches) which in - turn results in no PSB being forced. However some processors - will produce a PSB anyway. - - In any case, there is still a PSB when the trace is enabled for - the first time. - - no_force_psb can be used to slightly decrease the trace size but - may make it harder for the decoder to recover from errors. - - no_force_psb is not selected by default. - new snapshot option ------------------- diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 827557fc7511..38a08539f4bf 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -573,9 +573,14 @@ ifndef NO_LIBNUMA msg := $(warning No numa.h found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev); NO_LIBNUMA := 1 else - CFLAGS += -DHAVE_LIBNUMA_SUPPORT - EXTLIBS += -lnuma - $(call detected,CONFIG_NUMA) + ifeq ($(feature-numa_num_possible_cpus), 0) + msg := $(warning Old numa library found, disables 'perf bench numa mem' benchmark, please install numactl-devel/libnuma-devel/libnuma-dev >= 2.0.8); + NO_LIBNUMA := 1 + else + CFLAGS += -DHAVE_LIBNUMA_SUPPORT + EXTLIBS += -lnuma + $(call detected,CONFIG_NUMA) + endif endif endif @@ -621,8 +626,13 @@ ifdef LIBBABELTRACE endif ifndef NO_AUXTRACE - $(call detected,CONFIG_AUXTRACE) - CFLAGS += -DHAVE_AUXTRACE_SUPPORT + ifeq ($(feature-get_cpuid), 0) + msg := $(warning Your gcc lacks the __get_cpuid() builtin, disables support for auxtrace/Intel PT, please install a newer gcc); + NO_AUXTRACE := 1 + else + $(call detected,CONFIG_AUXTRACE) + CFLAGS += -DHAVE_AUXTRACE_SUPPORT + endif endif # Among the variables below, these: diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 349bc96ca1fe..e5f18a288b74 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -17,6 +17,7 @@ libperf-y += levenshtein.o libperf-y += llvm-utils.o libperf-y += parse-options.o libperf-y += parse-events.o +libperf-y += perf_regs.o libperf-y += path.o libperf-y += rbtree.o libperf-y += bitmap.o @@ -103,7 +104,6 @@ libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o libperf-y += scripting-engines/ -libperf-$(CONFIG_PERF_REGS) += perf_regs.o libperf-$(CONFIG_ZLIB) += zlib.o libperf-$(CONFIG_LZMA) += lzma.o diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 885e8ac83997..6b8eb13e14e4 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -6,6 +6,7 @@ const struct sample_reg __weak sample_reg_masks[] = { SMPL_REG_END }; +#ifdef HAVE_PERF_REGS_SUPPORT int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) { int i, idx = 0; @@ -29,3 +30,4 @@ out: *valp = regs->cache_regs[id]; return 0; } +#endif diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index 2984dcc54d67..679d6e493962 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -2,6 +2,7 @@ #define __PERF_REGS_H #include +#include struct regs_dump; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index eb5f18b75402..c6f9af78f6f5 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -270,12 +270,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) int ret = 0; if (module) { - list_for_each_entry(dso, &host_machine->dsos.head, node) { - if (!dso->kernel) - continue; - if (strncmp(dso->short_name + 1, module, - dso->short_name_len - 2) == 0) - goto found; + char module_name[128]; + + snprintf(module_name, sizeof(module_name), "[%s]", module); + map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name); + if (map) { + dso = map->dso; + goto found; } pr_debug("Failed to find module %s.\n", module); return -ENOENT; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 8a4537ee9bc3..fc3f7c922f99 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1580,7 +1580,10 @@ static int __perf_session__process_events(struct perf_session *session, file_offset = page_offset; head = data_offset - page_offset; - if (data_size && (data_offset + data_size < file_size)) + if (data_size == 0) + goto out; + + if (data_offset + data_size < file_size) file_size = data_offset + data_size; ui_progress__init(&prog, file_size, "Processing events..."); diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 415c359de465..2d065d065b67 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -196,7 +196,8 @@ static void zero_per_pkg(struct perf_evsel *counter) memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); } -static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) +static int check_per_pkg(struct perf_evsel *counter, + struct perf_counts_values *vals, int cpu, bool *skip) { unsigned long *mask = counter->per_pkg_mask; struct cpu_map *cpus = perf_evsel__cpus(counter); @@ -218,6 +219,17 @@ static int check_per_pkg(struct perf_evsel *counter, int cpu, bool *skip) counter->per_pkg_mask = mask; } + /* + * we do not consider an event that has not run as a good + * instance to mark a package as used (skip=1). Otherwise + * we may run into a situation where the first CPU in a package + * is not running anything, yet the second is, and this function + * would mark the package as used after the first CPU and would + * not read the values from the second CPU. + */ + if (!(vals->run && vals->ena)) + return 0; + s = cpu_map__get_socket(cpus, cpu); if (s < 0) return -1; @@ -235,7 +247,7 @@ process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel static struct perf_counts_values zero; bool skip = false; - if (check_per_pkg(evsel, cpu, &skip)) { + if (check_per_pkg(evsel, count, cpu, &skip)) { pr_err("failed to read per-pkg counter\n"); return -1; } diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 53bb5f59ec58..475d88d0a1c9 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -38,7 +38,7 @@ static inline char *bfd_demangle(void __maybe_unused *v, #endif #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT -int elf_getphdrnum(Elf *elf, size_t *dst) +static int elf_getphdrnum(Elf *elf, size_t *dst) { GElf_Ehdr gehdr; GElf_Ehdr *ehdr; @@ -1271,8 +1271,6 @@ out_close: static int kcore__init(struct kcore *kcore, char *filename, int elfclass, bool temp) { - GElf_Ehdr *ehdr; - kcore->elfclass = elfclass; if (temp) @@ -1289,9 +1287,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass, if (!gelf_newehdr(kcore->elf, elfclass)) goto out_end; - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); - if (!ehdr) - goto out_end; + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); return 0; @@ -1348,23 +1344,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, u64 addr, u64 len) { - GElf_Phdr gphdr; - GElf_Phdr *phdr; - - phdr = gelf_getphdr(kcore->elf, idx, &gphdr); - if (!phdr) - return -1; - - phdr->p_type = PT_LOAD; - phdr->p_flags = PF_R | PF_W | PF_X; - phdr->p_offset = offset; - phdr->p_vaddr = addr; - phdr->p_paddr = 0; - phdr->p_filesz = len; - phdr->p_memsz = len; - phdr->p_align = page_size; - - if (!gelf_update_phdr(kcore->elf, idx, phdr)) + GElf_Phdr phdr = { + .p_type = PT_LOAD, + .p_flags = PF_R | PF_W | PF_X, + .p_offset = offset, + .p_vaddr = addr, + .p_paddr = 0, + .p_filesz = len, + .p_memsz = len, + .p_align = page_size, + }; + + if (!gelf_update_phdr(kcore->elf, idx, &phdr)) return -1; return 0; diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index 7acafb3c5592..c2cd9bf2348b 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -709,7 +709,7 @@ bool find_process(const char *name) dir = opendir(procfs__mountpoint()); if (!dir) - return -1; + return false; /* Walk through the directory. */ while (ret && (d = readdir(dir)) != NULL) { diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 9655cb49c7cb..bde0ef1a63df 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -71,8 +71,11 @@ unsigned int extra_msr_offset32; unsigned int extra_msr_offset64; unsigned int extra_delta_offset32; unsigned int extra_delta_offset64; +unsigned int aperf_mperf_multiplier = 1; int do_smi; double bclk; +double base_hz; +double tsc_tweak = 1.0; unsigned int show_pkg; unsigned int show_core; unsigned int show_cpu; @@ -502,7 +505,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* %Busy */ if (has_aperf) { if (!skip_c0) - outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc); + outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc/tsc_tweak); else outp += sprintf(outp, "********"); } @@ -510,7 +513,7 @@ int format_counters(struct thread_data *t, struct core_data *c, /* Bzy_MHz */ if (has_aperf) outp += sprintf(outp, "%8.0f", - 1.0 * t->tsc / units * t->aperf / t->mperf / interval_float); + 1.0 * t->tsc * tsc_tweak / units * t->aperf / t->mperf / interval_float); /* TSC_MHz */ outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float); @@ -984,6 +987,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) return -3; if (get_msr(cpu, MSR_IA32_MPERF, &t->mperf)) return -4; + t->aperf = t->aperf * aperf_mperf_multiplier; + t->mperf = t->mperf * aperf_mperf_multiplier; } if (do_smi) { @@ -1149,6 +1154,19 @@ int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; + +static void +calculate_tsc_tweak() +{ + unsigned long long msr; + unsigned int base_ratio; + + get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr); + base_ratio = (msr >> 8) & 0xFF; + base_hz = base_ratio * bclk * 1000000; + tsc_tweak = base_hz / tsc_hz; +} + static void dump_nhm_platform_info(void) { @@ -1926,8 +1944,6 @@ int has_config_tdp(unsigned int family, unsigned int model) switch (model) { case 0x3A: /* IVB */ - case 0x3E: /* IVB Xeon */ - case 0x3C: /* HSW */ case 0x3F: /* HSX */ case 0x45: /* HSW */ @@ -2543,6 +2559,13 @@ int is_knl(unsigned int family, unsigned int model) return 0; } +unsigned int get_aperf_mperf_multiplier(unsigned int family, unsigned int model) +{ + if (is_knl(family, model)) + return 1024; + return 1; +} + #define SLM_BCLK_FREQS 5 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0}; @@ -2744,6 +2767,9 @@ void process_cpuid() } } + if (has_aperf) + aperf_mperf_multiplier = get_aperf_mperf_multiplier(family, model); + do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); do_snb_cstates = has_snb_msrs(family, model); do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); @@ -2762,6 +2788,9 @@ void process_cpuid() if (debug) dump_cstate_pstate_config_info(); + if (has_skl_msrs(family, model)) + calculate_tsc_tweak(); + return; } @@ -3090,7 +3119,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(stderr, "turbostat version 4.7 17-June, 2015" + fprintf(stderr, "turbostat version 4.8 26-Sep, 2015" " - Len Brown \n"); } diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 48c6e1ac6827..b9d3a32cbc04 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -137,6 +137,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu) void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + bool phys_active; + int ret; /* * We're about to run this vcpu again, so there is no need to @@ -151,6 +153,23 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) */ if (kvm_timer_should_fire(vcpu)) kvm_timer_inject_irq(vcpu); + + /* + * We keep track of whether the edge-triggered interrupt has been + * signalled to the vgic/guest, and if so, we mask the interrupt and + * the physical distributor to prevent the timer from raising a + * physical interrupt whenever we run a guest, preventing forward + * VCPU progress. + */ + if (kvm_vgic_get_phys_irq_active(timer->map)) + phys_active = true; + else + phys_active = false; + + ret = irq_set_irqchip_state(timer->map->irq, + IRQCHIP_STATE_ACTIVE, + phys_active); + WARN_ON(ret); } /** diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 6bd1c9bf7ae7..66c66165e712 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -531,6 +531,34 @@ bool vgic_handle_set_pending_reg(struct kvm *kvm, return false; } +/* + * If a mapped interrupt's state has been modified by the guest such that it + * is no longer active or pending, without it have gone through the sync path, + * then the map->active field must be cleared so the interrupt can be taken + * again. + */ +static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu) +{ + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; + struct list_head *root; + struct irq_phys_map_entry *entry; + struct irq_phys_map *map; + + rcu_read_lock(); + + /* Check for PPIs */ + root = &vgic_cpu->irq_phys_map_list; + list_for_each_entry_rcu(entry, root, entry) { + map = &entry->map; + + if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) && + !vgic_irq_is_active(vcpu, map->virt_irq)) + map->active = false; + } + + rcu_read_unlock(); +} + bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, phys_addr_t offset, int vcpu_id) @@ -561,6 +589,7 @@ bool vgic_handle_clear_pending_reg(struct kvm *kvm, vcpu_id, offset); vgic_reg_access(mmio, reg, offset, mode); + vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); vgic_update_state(kvm); return true; } @@ -598,6 +627,7 @@ bool vgic_handle_clear_active_reg(struct kvm *kvm, ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT); if (mmio->is_write) { + vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id)); vgic_update_state(kvm); return true; } @@ -982,6 +1012,12 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) pend_percpu = vcpu->arch.vgic_cpu.pending_percpu; pend_shared = vcpu->arch.vgic_cpu.pending_shared; + if (!dist->enabled) { + bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS); + bitmap_zero(pend_shared, nr_shared); + return 0; + } + pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id); enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id); bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS); @@ -1009,11 +1045,6 @@ void vgic_update_state(struct kvm *kvm) struct kvm_vcpu *vcpu; int c; - if (!dist->enabled) { - set_bit(0, dist->irq_pending_on_cpu); - return; - } - kvm_for_each_vcpu(c, vcpu, kvm) { if (compute_pending_for_cpu(vcpu)) set_bit(c, dist->irq_pending_on_cpu); @@ -1092,6 +1123,15 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu) struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr); + /* + * We must transfer the pending state back to the distributor before + * retiring the LR, otherwise we may loose edge-triggered interrupts. + */ + if (vlr.state & LR_STATE_PENDING) { + vgic_dist_irq_set_pending(vcpu, irq); + vlr.hwirq = 0; + } + vlr.state = 0; vgic_set_lr(vcpu, lr_nr, vlr); clear_bit(lr_nr, vgic_cpu->lr_used); @@ -1132,7 +1172,8 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq, kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state); vgic_irq_clear_active(vcpu, irq); vgic_update_state(vcpu->kvm); - } else if (vgic_dist_irq_is_pending(vcpu, irq)) { + } else { + WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq)); vlr.state |= LR_STATE_PENDING; kvm_debug("Set pending: 0x%x\n", vlr.state); } @@ -1240,7 +1281,7 @@ static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_dist *dist = &vcpu->kvm->arch.vgic; unsigned long *pa_percpu, *pa_shared; - int i, vcpu_id, lr, ret; + int i, vcpu_id; int overflow = 0; int nr_shared = vgic_nr_shared_irqs(dist); @@ -1295,31 +1336,6 @@ epilog: */ clear_bit(vcpu_id, dist->irq_pending_on_cpu); } - - for (lr = 0; lr < vgic->nr_lr; lr++) { - struct vgic_lr vlr; - - if (!test_bit(lr, vgic_cpu->lr_used)) - continue; - - vlr = vgic_get_lr(vcpu, lr); - - /* - * If we have a mapping, and the virtual interrupt is - * presented to the guest (as pending or active), then we must - * set the state to active in the physical world. See - * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt. - */ - if (vlr.state & LR_HW) { - struct irq_phys_map *map; - map = vgic_irq_map_search(vcpu, vlr.irq); - - ret = irq_set_irqchip_state(map->irq, - IRQCHIP_STATE_ACTIVE, - true); - WARN_ON(ret); - } - } } static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) @@ -1421,7 +1437,7 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr) return 0; map = vgic_irq_map_search(vcpu, vlr.irq); - BUG_ON(!map || !map->active); + BUG_ON(!map); ret = irq_get_irqchip_state(map->irq, IRQCHIP_STATE_ACTIVE, @@ -1429,13 +1445,8 @@ static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr) WARN_ON(ret); - if (map->active) { - ret = irq_set_irqchip_state(map->irq, - IRQCHIP_STATE_ACTIVE, - false); - WARN_ON(ret); + if (map->active) return 0; - } return 1; } @@ -1607,8 +1618,12 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, } else { if (level_triggered) { vgic_dist_irq_clear_level(vcpu, irq_num); - if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) + if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) { vgic_dist_irq_clear_pending(vcpu, irq_num); + vgic_cpu_irq_clear(vcpu, irq_num); + if (!compute_pending_for_cpu(vcpu)) + clear_bit(cpuid, dist->irq_pending_on_cpu); + } } ret = false;