]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'rcu-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jun 2017 17:22:35 +0000 (10:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 10 Jun 2017 17:22:35 +0000 (10:22 -0700)
Pull RCU fixes from Ingo Molnar:
 "Fix an SRCU bug affecting KVM IRQ injection"

* 'rcu-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  srcu: Allow use of Classic SRCU from both process and interrupt context
  srcu: Allow use of Tiny/Tree SRCU from both process and interrupt context

153 files changed:
Documentation/admin-guide/kernel-parameters.txt
MAINTAINERS
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/keystone-k2l-netcp.dtsi
arch/arm/boot/dts/keystone-k2l.dtsi
arch/arm/boot/dts/versatile-pb.dts
arch/arm/include/asm/device.h
arch/arm/mach-at91/Kconfig
arch/arm/mach-davinci/pm.c
arch/arm/mm/dma-mapping.c
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/configs/defconfig
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/perf/power9-pmu.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/powernv/subcore.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/simple_gpio.c
block/bfq-cgroup.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bio-integrity.c
block/blk-mq.c
block/blk-throttle.c
crypto/asymmetric_keys/public_key.c
crypto/drbg.c
crypto/gcm.c
drivers/acpi/arm64/iort.c
drivers/acpi/battery.c
drivers/acpi/button.c
drivers/acpi/device_pm.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/base/power/main.c
drivers/base/power/wakeup.c
drivers/block/loop.c
drivers/cpufreq/intel_pstate.c
drivers/firmware/efi/efi-bgrt.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/input/mouse/elantech.c
drivers/input/rmi4/rmi_f03.c
drivers/iommu/of_iommu.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/memory/atmel-ebi.c
drivers/misc/cxl/file.c
drivers/misc/cxl/native.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/of/device.c
drivers/reset/hisilicon/hi6220_reset.c
drivers/xen/privcmd.c
include/linux/dma-iommu.h
include/linux/elevator.h
include/linux/suspend.h
kernel/events/core.c
kernel/power/process.c
kernel/power/suspend.c
kernel/printk/printk.c
sound/core/timer.c
sound/pci/hda/patch_realtek.c
sound/soc/atmel/atmel-classd.c
sound/soc/codecs/da7213.c
sound/soc/codecs/rt286.c
sound/soc/generic/simple-card.c
sound/soc/intel/skylake/skl-sst-ipc.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/intel/skylake/skl.c
sound/soc/intel/skylake/skl.h
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/cmd.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/gen.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/src.c
sound/soc/sh/rcar/ssi.c
sound/soc/sh/rcar/ssiu.c
sound/soc/soc-core.c
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-script-perl.txt
tools/perf/Documentation/perf-script-python.txt
tools/perf/arch/common.c
tools/perf/builtin-stat.c
tools/perf/builtin-trace.c
tools/perf/tests/bp_signal.c
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/tests.h
tools/perf/util/annotate.c
tools/perf/util/build-id.c
tools/perf/util/build-id.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/header.c
tools/perf/util/machine.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/unwind-libdw.c

index 15f79c27748df1611b1643b77ca68e2a5e7cfaab..0f5c3b4347c6f94a82385f193e76bc99dba19db5 100644 (file)
 
        dscc4.setup=    [NET]
 
+       dt_cpu_ftrs=    [PPC]
+                       Format: {"off" | "known"}
+                       Control how the dt_cpu_ftrs device-tree binding is
+                       used for CPU feature discovery and setup (if it
+                       exists).
+                       off: Do not use it, fall back to legacy cpu table.
+                       known: Do not pass through unknown features to guests
+                       or userspace, only those that the kernel is aware of.
+
        dump_apple_properties   [X86]
                        Dump name and content of EFI device properties on
                        x86 Macs.  Useful for driver authors to determine
index fb60bf6dfead7f324e759670fc434995fb5581c5..4d8e525b84eeb876cd61235e86ea76f01c1455e2 100644 (file)
@@ -1172,7 +1172,7 @@ N:        clps711x
 
 ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
 M:     Hartley Sweeten <hsweeten@visionengravers.com>
-M:     Ryan Mallon <rmallon@gmail.com>
+M:     Alexander Sverdlin <alexander.sverdlin@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-ep93xx/
@@ -1489,13 +1489,15 @@ M:      Gregory Clement <gregory.clement@free-electrons.com>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     arch/arm/mach-mvebu/
-F:     drivers/rtc/rtc-armada38x.c
 F:     arch/arm/boot/dts/armada*
 F:     arch/arm/boot/dts/kirkwood*
+F:     arch/arm/configs/mvebu_*_defconfig
+F:     arch/arm/mach-mvebu/
 F:     arch/arm64/boot/dts/marvell/armada*
 F:     drivers/cpufreq/mvebu-cpufreq.c
-F:     arch/arm/configs/mvebu_*_defconfig
+F:     drivers/irqchip/irq-armada-370-xp.c
+F:     drivers/irqchip/irq-mvebu-*
+F:     drivers/rtc/rtc-armada38x.c
 
 ARM/Marvell Berlin SoC support
 M:     Jisheng Zhang <jszhang@marvell.com>
@@ -1721,7 +1723,6 @@ N:        rockchip
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
 M:     Krzysztof Kozlowski <krzk@kernel.org>
-R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1830,6 @@ F:        drivers/edac/altera_edac.
 ARM/STI ARCHITECTURE
 M:     Patrice Chotard <patrice.chotard@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L:     kernel@stlinux.com
 W:     http://www.stlinux.com
 S:     Maintained
 F:     arch/arm/mach-sti/
@@ -7707,7 +7707,7 @@ F:        drivers/platform/x86/hp_accel.c
 
 LIVE PATCHING
 M:     Josh Poimboeuf <jpoimboe@redhat.com>
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Jiri Kosina <jikos@kernel.org>
 M:     Miroslav Benes <mbenes@suse.cz>
 R:     Petr Mladek <pmladek@suse.com>
@@ -8588,7 +8588,7 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
 MODULE SUPPORT
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Rusty Russell <rusty@rustcorp.com.au>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
 S:     Maintained
@@ -11268,7 +11268,6 @@ F:      drivers/media/rc/serial_ir.c
 
 STI CEC DRIVER
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-L:     kernel@stlinux.com
 S:     Maintained
 F:     drivers/staging/media/st-cec/
 F:     Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11778,6 +11777,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
 S:     Supported
 F:     arch/arm/mach-davinci/
 F:     drivers/i2c/busses/i2c-davinci.c
+F:     arch/arm/boot/dts/da850*
 
 TI DAVINCI SERIES MEDIA DRIVER
 M:     "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -13861,7 +13861,7 @@ S:      Odd fixes
 F:     drivers/net/wireless/wl3501*
 
 WOLFSON MICROELECTRONICS DRIVERS
-L:     patches@opensource.wolfsonmicro.com
+L:     patches@opensource.cirrus.com
 T:     git https://github.com/CirrusLogic/linux-drivers.git
 W:     https://github.com/CirrusLogic/linux-drivers/wiki
 S:     Supported
index 561f27d8d92224fe8f4f8c3224a5441f2d41175a..9444a9a9ba1057e6b594dc8e2595ac1e5ec593fb 100644 (file)
@@ -3,6 +3,11 @@
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
 
+/* firmware-provided startup stubs live here, where the secondary CPUs are
+ * spinning.
+ */
+/memreserve/ 0x00000000 0x00001000;
+
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
  * bcm2835.dtsi and bcm2836.dtsi.
index b6f26824e83a96a88e34d33b99a0f4dace08e306..66f615a74118b9da1fe77088b63be8738c17f6c4 100644 (file)
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
        /* NetCP address range */
        ranges = <0 0x26000000 0x1000000>;
 
-       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
-       clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
+       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
+       clock-names = "pa_clk", "ethss_clk", "cpts";
        dma-coherent;
 
        ti,navigator-dmas = <&dma_gbe 0>,
index b58e7ebc091994645dd1adb35c7e7dc843fae7b0..148650406cf701cd7ffc5ac92d9054d606e97bfd 100644 (file)
                        };
                };
 
+               osr: sram@70000000 {
+                       compatible = "mmio-sram";
+                       reg = <0x70000000 0x10000>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       clocks = <&clkosr>;
+               };
+
                dspgpio0: keystone_dsp_gpio@02620240 {
                        compatible = "ti,keystone-dsp-gpio";
                        gpio-controller;
index 33a8eb28374eaa8d3b8aca95d8801227bedd87ca..06e2331f666d45fb2a2432ac1ef5401c3c50e37d 100644 (file)
@@ -1,4 +1,4 @@
-#include <versatile-ab.dts>
+#include "versatile-ab.dts"
 
 / {
        model = "ARM Versatile PB";
index 36ec9c8f6e161d59d56caafbbe92fe0d2239d3f9..3234fe9bba6e76196d8a852db8c26d078150d989 100644 (file)
@@ -19,7 +19,8 @@ struct dev_archdata {
 #ifdef CONFIG_XEN
        const struct dma_map_ops *dev_dma_ops;
 #endif
-       bool dma_coherent;
+       unsigned int dma_coherent:1;
+       unsigned int dma_ops_setup:1;
 };
 
 struct omap_device;
index 841e924143f90e089bae9269acacdff65ab597f9..cbd959b73654c43deb72cbe084bdb99043a22b66 100644 (file)
@@ -1,6 +1,7 @@
 menuconfig ARCH_AT91
        bool "Atmel SoCs"
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+       select ARM_CPU_SUSPEND if PM
        select COMMON_CLK_AT91
        select GPIOLIB
        select PINCTRL
index efb80354f3034d856ab259bb1568dab41173d3b3..b5cc05dc2cb27c9e20e5f8290b65fff2efeec8ec 100644 (file)
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
        davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
        if (!davinci_sram_suspend) {
                pr_err("PM: cannot allocate SRAM memory\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto no_sram_mem;
        }
 
        davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
 
        suspend_set_ops(&davinci_pm_ops);
 
+       return 0;
+
+no_sram_mem:
+       iounmap(pm_config.ddrpsc_reg_base);
 no_ddrpsc_mem:
        iounmap(pm_config.ddrpll_reg_base);
 no_ddrpll_mem:
index c742dfd2967bcae6057c3ea59b81979fcf60aa55..bd83c531828a7349eecf233b5c213a65388e9567 100644 (file)
@@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 
-static void __arm_iommu_detach_device(struct device *dev)
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
 {
        struct dma_iommu_mapping *mapping;
 
@@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
        iommu_detach_device(mapping->domain, dev);
        kref_put(&mapping->kref, release_iommu_mapping);
        to_dma_iommu_mapping(dev) = NULL;
+       set_dma_ops(dev, NULL);
 
        pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 }
-
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
- */
-void arm_iommu_detach_device(struct device *dev)
-{
-       __arm_iommu_detach_device(dev);
-       set_dma_ops(dev, NULL);
-}
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
        if (!mapping)
                return;
 
-       __arm_iommu_detach_device(dev);
+       arm_iommu_detach_device(dev);
        arm_iommu_release_mapping(mapping);
 }
 
@@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                dev->dma_ops = xen_dma_ops;
        }
 #endif
+       dev->archdata.dma_ops_setup = true;
 }
 
 void arch_teardown_dma_ops(struct device *dev)
 {
+       if (!dev->archdata.dma_ops_setup)
+               return;
+
        arm_teardown_iommu_dma_ops(dev);
 }
index ac8df5201cd656d70073bc03cd13436435b79c66..b4bc42ece7541154431a5855c4bbe0f984094445 100644 (file)
                        cpm_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
index 7740a75a823084d027ffab1c02d221f3083dea87..6e2058847ddcd59ca9fd0d472bfb94f5331b00cd 100644 (file)
                        cps_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
index 65cdd878cfbd603b323a08006872f5de90e90aee..97c123e09e45bfd80173029de0da0161dd4be0c7 100644 (file)
@@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
 CONFIG_PCIE_ARMADA_8K=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_ARM64_VA_BITS_48=y
@@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PM8941_PWRKEY=y
@@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
@@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_EXYNOS_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
 CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
 CONFIG_BCM2835_WDT=y
 CONFIG_MFD_CROS_EC=y
 CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
 CONFIG_MFD_EXYNOS_LPASS=m
 CONFIG_MFD_HI655X_PMIC=y
 CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FAN53555=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI655X=y
@@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
 CONFIG_PWM=y
 CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
@@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_ARM_SCPI_PROTOCOL=y
index f7c8f9972f618109209e4892512ed903f6b865f3..964da1891ea9cc6b5dc174131db1c7bbdc2f07f4 100644 (file)
@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
 
 menu "Kernel options"
 
-config PPC_DT_CPU_FTRS
-       bool "Device-tree based CPU feature discovery & setup"
-       depends on PPC_BOOK3S_64
-       default n
-       help
-         This enables code to use a new device tree binding for describing CPU
-         compatibility and features. Saying Y here will attempt to use the new
-         binding if the firmware provides it. Currently only the skiboot
-         firmware provides this binding.
-         If you're not sure say Y.
-
-config PPC_CPUFEATURES_ENABLE_UNKNOWN
-       bool "cpufeatures pass through unknown features to guest/userspace"
-       depends on PPC_DT_CPU_FTRS
-       default y
-
 config HIGHMEM
        bool "High memory support"
        depends on PPC32
index b4b5e6b671ca4dedc27fc35d59b30d4ad488e1c3..0c4e470571ca0faa74d3e9fa38fa57a384cab4bf 100644 (file)
@@ -8,7 +8,7 @@
 #define H_PTE_INDEX_SIZE  9
 #define H_PMD_INDEX_SIZE  7
 #define H_PUD_INDEX_SIZE  9
-#define H_PGD_INDEX_SIZE  12
+#define H_PGD_INDEX_SIZE  9
 
 #ifndef __ASSEMBLY__
 #define H_PTE_TABLE_SIZE       (sizeof(pte_t) << H_PTE_INDEX_SIZE)
index c2d509584a98070accd6be62519fa992e5bc1f3f..d02ad93bf70892f8d342b9d8890a4e1b8065eed6 100644 (file)
@@ -214,7 +214,6 @@ enum {
 #define CPU_FTR_DAWR                   LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DABRX                  LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_PMAO_BUG               LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE                        LONG_ASM_CONST(0x2000000000000000)
 #define CPU_FTR_POWER9_DD1             LONG_ASM_CONST(0x4000000000000000)
 
 #ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
            CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
            CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
-           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
index a2123f291ab0c5c8dc13cc9364c3a12848a4bb2c..bb99b651085aaf292e5f98ee23c7cdc53d443cd2 100644 (file)
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
 #define TASK_SIZE_128TB (0x0000800000000000UL)
 #define TASK_SIZE_512TB (0x0002000000000000UL)
 
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
 /*
  * Max value currently used:
  */
-#define TASK_SIZE_USER64       TASK_SIZE_512TB
+#define TASK_SIZE_USER64               TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_128TB
 #else
-#define TASK_SIZE_USER64       TASK_SIZE_64TB
+#define TASK_SIZE_USER64               TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_64TB
 #endif
 
 /*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
  * space during mmap's.
  */
 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
 
 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
                TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
  * with 128TB and conditionally enable upto 512TB
  */
 #ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ? \
-                                TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ?                    \
+                                TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
 #else
 #define DEFAULT_MAP_WINDOW     TASK_SIZE
 #endif
 
 #ifdef __powerpc64__
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
-#else
-#define STACK_TOP_USER64 TASK_SIZE_USER64
-#endif
-
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
 
 #define STACK_TOP (is_32bit_task() ? \
index 8b3b46b7b0f2795b6195eb95ee649d3dece6dc9a..329771559cbbb16048d67d27450865703a248c90 100644 (file)
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 
+static inline int early_cpu_to_node(int cpu)
+{
+       int nid;
+
+       nid = numa_cpu_lookup_table[cpu];
+
+       /*
+        * Fall back to node 0 if nid is unset (it should be, except bugs).
+        * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+        */
+       return (nid < 0) ? 0 : nid;
+}
 #else
 
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
 static inline void dump_numa_cpu_topology(void) {}
 
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)
index fcc7588a96d694265899935eae34521eda29690d..4c7656dc4e04f09bed8b9bbc8d8f979876237202 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/jump_label.h>
+#include <linux/libfdt.h>
 #include <linux/memblock.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
        {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-utilization-of-resources-register", feat_enable_purr, 0},
-       {"subcore", feat_enable, CPU_FTR_SUBCORE},
        {"no-execute", feat_enable, 0},
        {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
        {"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
        {"wait-v3", feat_enable, 0},
 };
 
-/* XXX: how to configure this? Default + boot time? */
-#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
-#define CPU_FEATURE_ENABLE_UNKNOWN 1
-#else
-#define CPU_FEATURE_ENABLE_UNKNOWN 0
-#endif
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+       if (!str)
+               return 0;
+
+       if (!strcmp(str, "off"))
+               using_dt_cpu_ftrs = false;
+       else if (!strcmp(str, "known"))
+               enable_unknown = false;
+       else
+               return 1;
+
+       return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
 
 static void __init cpufeatures_setup_start(u32 isa)
 {
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
                }
        }
 
-       if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+       if (!known && enable_unknown) {
                if (!feat_try_enable_unknown(f)) {
                        pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
                                f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
                cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 }
 
+static int __init disabled_on_cmdline(void)
+{
+       unsigned long root, chosen;
+       const char *p;
+
+       root = of_get_flat_dt_root();
+       chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+       if (chosen == -FDT_ERR_NOTFOUND)
+               return false;
+
+       p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+       if (!p)
+               return false;
+
+       if (strstr(p, "dt_cpu_ftrs=off"))
+               return true;
+
+       return false;
+}
+
 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
                                        int depth, void *data)
 {
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
        return 0;
 }
 
-static bool __initdata using_dt_cpu_ftrs = false;
-
 bool __init dt_cpu_ftrs_in_use(void)
 {
        return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
 
 bool __init dt_cpu_ftrs_init(void *fdt)
 {
+       using_dt_cpu_ftrs = false;
+
        /* Setup and verify the FDT, if it fails we just bail */
        if (!early_init_dt_verify(fdt))
                return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
        if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
                return false;
 
+       if (disabled_on_cmdline())
+               return false;
+
        cpufeatures_setup_cpu();
 
        using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
 
 void __init dt_cpu_ftrs_scan(void)
 {
+       if (!using_dt_cpu_ftrs)
+               return;
+
        of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
 }
index baae104b16c7ba9f7cdf4a305ab5227ebf002467..2ad725ef4368a3e525681b0ce4a56ef8e960bf48 100644 (file)
@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
+       current->thread.load_fp = 0;
        memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
        current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.vr_save_area = NULL;
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
+       current->thread.load_vec = 0;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
        memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.tm_tfhar = 0;
        current->thread.tm_texasr = 0;
        current->thread.tm_tfiar = 0;
+       current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 }
 EXPORT_SYMBOL(start_thread);
index 71dcda91755d51a2e5705f29308239e7e9c7e506..857129acf960a1bf93c0c5791a6de1d84585caa3 100644 (file)
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC64
-       init_mm.context.addr_limit = TASK_SIZE_128TB;
+       init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 #else
 #error "context.addr_limit not initialized."
 #endif
index f35ff9dea4fb4607459c10d42a29c47f2984e613..a8c1f99e96072530cb1f2d9ed702dffd78665720 100644 (file)
@@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
-       return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+       return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
                                    __pa(MAX_DMA_ADDRESS));
 }
 
@@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-       if (cpu_to_node(from) == cpu_to_node(to))
+       if (early_cpu_to_node(from) == early_cpu_to_node(to))
                return LOCAL_DISTANCE;
        else
                return REMOTE_DISTANCE;
index c6dca2ae78ef9f1225dd6a13e0997034132315a4..a3edf813d4556c547e5b00155c5f9a0dc411872d 100644 (file)
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
         * mm->context.addr_limit. Default to max task size so that we copy the
         * default values to paca which will help us to handle slb miss early.
         */
-       mm->context.addr_limit = TASK_SIZE_128TB;
+       mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 
        /*
         * The old code would re-promote on fork, we don't do that when using
index 018f8e90ac35fd19bf37bdbb99438f970621634a..bb28e1a412576ea15492be658a5bf78ee75950a5 100644 (file)
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = ISA207_TEST_ADDER,
+       .test_adder             = P9_DD1_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = P9_DD1_TEST_ADDER,
+       .test_adder             = ISA207_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
index 33244e3d9375eae3ccd1224b7dbac87b3822f92a..4fd64d3f5c4429206b8c838ca99387e6668aee11 100644 (file)
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
 
          In case of doubt, say Y
 
+config PPC_DT_CPU_FTRS
+       bool "Device-tree based CPU feature discovery & setup"
+       depends on PPC_BOOK3S_64
+       default y
+       help
+         This enables code to use a new device tree binding for describing CPU
+         compatibility and features. Saying Y here will attempt to use the new
+         binding if the firmware provides it. Currently only the skiboot
+         firmware provides this binding.
+         If you're not sure say Y.
+
 config UDBG_RTAS_CONSOLE
        bool "RTAS based debug console"
        depends on PPC_RTAS
index e5a891ae80ee5e6881bd10be148c04519d3dffd0..84b7ac926ce65682e83781ec6ab4ac97b6d0727f 100644 (file)
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
        if (!dump_skip(cprm, skip))
                goto Eio;
+
+       rc = 0;
 out:
        free_page((unsigned long)buf);
        return rc;
index 0babef11136fc8daba7f2666fed3f3b649cc2bd8..8c6119280c1306afd399d2c86ce88381882ab5df 100644 (file)
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
 
 static int subcore_init(void)
 {
-       if (!cpu_has_feature(CPU_FTR_SUBCORE))
+       unsigned pvr_ver;
+
+       pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+       if (pvr_ver != PVR_POWER8 &&
+           pvr_ver != PVR_POWER8E &&
+           pvr_ver != PVR_POWER8NVL)
                return 0;
 
        /*
index e104c71ea44ab5bf715fa5720c0a1ed47c6221c7..1fb162ba9d1c6aaa730123d07258a3923b3d245d 100644 (file)
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
                lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+               lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
                lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
        }
 
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
                lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+               lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
                lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
        }
 
index ef470b470b04ae85488d1a9ffcbe27519884a4db..6afddae2fb4796dc61de3258f683bcc158b15a2e 100644 (file)
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+       struct u8_gpio_chip *u8_gc =
+               container_of(mm_gc, struct u8_gpio_chip, mm_gc);
 
        u8_gc->data = in_8(mm_gc->regs);
 }
index c8a32fb345cf5db7bac8d1a7b6a6c5e2b1d1a0fe..78b2e0db4fb2c0adba7f0b7ec089cf61f0f311c8 100644 (file)
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
 BFQG_FLAG_FNS(empty)
 #undef BFQG_FLAG_FNS
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
 {
        unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
        bfqg_stats_clear_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
                                                 struct bfq_group *curr_bfqg)
 {
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
        bfqg_stats_mark_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
 {
        unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 
 static void bfqg_get(struct bfq_group *bfqg)
 {
-       return blkg_get(bfqg_to_blkg(bfqg));
+       bfqg->ref++;
 }
 
 void bfqg_put(struct bfq_group *bfqg)
 {
-       return blkg_put(bfqg_to_blkg(bfqg));
+       bfqg->ref--;
+
+       if (bfqg->ref == 0)
+               kfree(bfqg);
+}
+
+static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+{
+       /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+       bfqg_get(bfqg);
+
+       blkg_get(bfqg_to_blkg(bfqg));
+}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg)
+{
+       bfqg_put(bfqg);
+
+       blkg_put(bfqg_to_blkg(bfqg));
 }
 
 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
        if (bfqq) {
                bfqq->ioprio = bfqq->new_ioprio;
                bfqq->ioprio_class = bfqq->new_ioprio_class;
-               bfqg_get(bfqg);
+               /*
+                * Make sure that bfqg and its associated blkg do not
+                * disappear before entity.
+                */
+               bfqg_and_blkg_get(bfqg);
        }
        entity->parent = bfqg->my_entity; /* NULL for root group */
        entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
                return NULL;
        }
 
+       /* see comments in bfq_bic_update_cgroup for why refcounting */
+       bfqg_get(bfqg);
        return &bfqg->pd;
 }
 
@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
        struct bfq_group *bfqg = pd_to_bfqg(pd);
 
        bfqg_stats_exit(&bfqg->stats);
-       return kfree(bfqg);
+       bfqg_put(bfqg);
 }
 
 void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  * it on the new one.  Avoid putting the entity on the old group idle tree.
  *
- * Must be called under the queue lock; the cgroup owning @bfqg must
- * not disappear (by now this just means that we are called under
- * rcu_read_lock()).
+ * Must be called under the scheduler lock, to make sure that the blkg
+ * owning @bfqg does not disappear (see comments in
+ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
+ * objects).
  */
 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                   struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                bfq_deactivate_bfqq(bfqd, bfqq, false, false);
        else if (entity->on_st)
                bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
-       bfqg_put(bfqq_group(bfqq));
+       bfqg_and_blkg_put(bfqq_group(bfqq));
 
-       /*
-        * Here we use a reference to bfqg.  We don't need a refcounter
-        * as the cgroup reference will not be dropped, so that its
-        * destroy() callback will not be invoked.
-        */
        entity->parent = bfqg->my_entity;
        entity->sched_data = &bfqg->sched_data;
-       bfqg_get(bfqg);
+       /* pin down bfqg and its associated blkg  */
+       bfqg_and_blkg_get(bfqg);
 
        if (bfq_bfqq_busy(bfqq)) {
                bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  * @bic: the bic to move.
  * @blkcg: the blk-cgroup to move to.
  *
- * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
- * has to make sure that the reference to cgroup is valid across the call.
+ * Move bic to blkcg, assuming that bfqd->lock is held; which makes
+ * sure that the reference to cgroup is valid across the call (see
+ * comments in bfq_bic_update_cgroup on this issue)
  *
  * NOTE: an alternative approach might have been to store the current
  * cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
                goto out;
 
        bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+       /*
+        * Update blkg_path for bfq_log_* functions. We cache this
+        * path, and update it here, for the following
+        * reasons. Operations on blkg objects in blk-cgroup are
+        * protected with the request_queue lock, and not with the
+        * lock that protects the instances of this scheduler
+        * (bfqd->lock). This exposes BFQ to the following sort of
+        * race.
+        *
+        * The blkg_lookup performed in bfq_get_queue, protected
+        * through rcu, may happen to return the address of a copy of
+        * the original blkg. If this is the case, then the
+        * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
+        * the blkg, is useless: it does not prevent blk-cgroup code
+        * from destroying both the original blkg and all objects
+        * directly or indirectly referred by the copy of the
+        * blkg.
+        *
+        * On the bright side, destroy operations on a blkg invoke, as
+        * a first step, hooks of the scheduler associated with the
+        * blkg. And these hooks are executed with bfqd->lock held for
+        * BFQ. As a consequence, for any blkg associated with the
+        * request queue this instance of the scheduler is attached
+        * to, we are guaranteed that such a blkg is not destroyed, and
+        * that all the pointers it contains are consistent, while we
+        * are holding bfqd->lock. A blkg_lookup performed with
+        * bfqd->lock held then returns a fully consistent blkg, which
+        * remains consistent until this lock is held.
+        *
+        * Thanks to the last fact, and to the fact that: (1) bfqg has
+        * been obtained through a blkg_lookup in the above
+        * assignment, and (2) bfqd->lock is being held, here we can
+        * safely use the policy data for the involved blkg (i.e., the
+        * field bfqg->pd) to get to the blkg associated with bfqg,
+        * and then we can safely use any field of blkg. After we
+        * release bfqd->lock, even just getting blkg through this
+        * bfqg may cause dangling references to be traversed, as
+        * bfqg->pd may not exist any more.
+        *
+        * In view of the above facts, here we cache, in the bfqg, any
+        * blkg data we may need for this bic, and for its associated
+        * bfq_queue. As of now, we need to cache only the path of the
+        * blkg, which is used in the bfq_log_* functions.
+        *
+        * Finally, note that bfqg itself needs to be protected from
+        * destruction on the blkg_free of the original blkg (which
+        * invokes bfq_pd_free). We use an additional private
+        * refcounter for bfqg, to let it disappear only after no
+        * bfq_queue refers to it any longer.
+        */
+       blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
        bic->blkcg_serial_nr = serial_nr;
 out:
        rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
  * @bfqd: the device data structure with the root group.
  * @bfqg: the group to move from.
  * @st: the service tree with the entities.
- *
- * Needs queue_lock to be taken and reference to be valid over the call.
  */
 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
                                         struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
                /*
                 * The idle tree may still contain bfq_queues belonging
                 * to exited task because they never migrated to a different
-                * cgroup from the one being destroyed now.  No one else
-                * can access them so it's safe to act without any lock.
+                * cgroup from the one being destroyed now.
                 */
                bfq_flush_idle_tree(st);
 
index 08ce45096350561896fb6c8959c5c04603e98555..ed93da2462abbc94ab75c10a0d9c7ce251f3f0fb 100644 (file)
@@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 
        kmem_cache_free(bfq_pool, bfqq);
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
-       bfqg_put(bfqg);
+       bfqg_and_blkg_put(bfqg);
 #endif
 }
 
index ae783c06dfd9ca73c9a3832e41e5617c510bdaf5..5c3bf986149215b3d98f753548cba9b4880f6e62 100644 (file)
@@ -759,6 +759,12 @@ struct bfq_group {
        /* must be the first member */
        struct blkg_policy_data pd;
 
+       /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
+       char blkg_path[128];
+
+       /* reference counter (see comments in bfq_bic_update_cgroup) */
+       int ref;
+
        struct bfq_entity entity;
        struct bfq_sched_data sched_data;
 
@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
-void bfqg_put(struct bfq_group *bfqg);
+void bfqg_and_blkg_put(struct bfq_group *bfqg);
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
 extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 
 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
-       blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
+       blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
                        bfq_bfqq_sync((bfqq)) ? 'S' : 'A',              \
-                         __pbuf, ##args);                              \
+                       bfqq_group(bfqq)->blkg_path, ##args);           \
 } while (0)
 
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf));          \
-       blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args);    \
-} while (0)
+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
+       blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
 
 #else /* CONFIG_BFQ_GROUP_IOSCHED */
 
index 5384713d48bc9929e2a4dc8b1b9f22b2e1c5bcd3..b5009a896a7faa1dfc9fe4320181798cc42ccfa5 100644 (file)
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
        if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
                return false;
 
+       if (!bio_sectors(bio))
+               return false;
+
        /* Already protected? */
        if (bio_integrity(bio))
                return false;
index 1bcccedcc74f0b48f58363640acb1eae04704800..bb66c96850b18cb419b0e44aab1894169352f9af 100644 (file)
@@ -1461,22 +1461,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
-                                     bool may_sleep)
+static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                       struct request *rq,
+                                       blk_qc_t *cookie, bool may_sleep)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_queue_data bd = {
                .rq = rq,
                .last = true,
        };
-       struct blk_mq_hw_ctx *hctx;
        blk_qc_t new_cookie;
        int ret;
+       bool run_queue = true;
+
+       if (blk_mq_hctx_stopped(hctx)) {
+               run_queue = false;
+               goto insert;
+       }
 
        if (q->elevator)
                goto insert;
 
-       if (!blk_mq_get_driver_tag(rq, &hctx, false))
+       if (!blk_mq_get_driver_tag(rq, NULL, false))
                goto insert;
 
        new_cookie = request_to_qc_t(hctx, rq);
@@ -1500,7 +1506,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
 
        __blk_mq_requeue_request(rq);
 insert:
-       blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
+       blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
 }
 
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,7 +1514,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
        if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
                rcu_read_lock();
-               __blk_mq_try_issue_directly(rq, cookie, false);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, false);
                rcu_read_unlock();
        } else {
                unsigned int srcu_idx;
@@ -1516,7 +1522,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                might_sleep();
 
                srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
-               __blk_mq_try_issue_directly(rq, cookie, true);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, true);
                srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
        }
 }
@@ -1619,9 +1625,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                blk_mq_put_ctx(data.ctx);
 
-               if (same_queue_rq)
+               if (same_queue_rq) {
+                       data.hctx = blk_mq_map_queue(q,
+                                       same_queue_rq->mq_ctx->cpu);
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
+               }
        } else if (q->nr_hw_queues > 1 && is_sync) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
index fc13dd0c6e3956a84913d9e71132c0f321a67280..a7285bf2831c7bdbb89b753fccb198f8640e8780 100644 (file)
@@ -27,6 +27,13 @@ static int throtl_quantum = 32;
 #define MIN_THROTL_IOPS (10)
 #define DFL_LATENCY_TARGET (-1L)
 #define DFL_IDLE_THRESHOLD (0)
+#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
+#define LATENCY_FILTERED_SSD (0)
+/*
+ * For HD, very small latency comes from sequential IO. Such IO is helpless to
+ * help determine if its IO is impacted by others, hence we ignore the IO
+ */
+#define LATENCY_FILTERED_HD (1000L) /* 1ms */
 
 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
 
@@ -212,6 +219,7 @@ struct throtl_data
        struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
        struct latency_bucket __percpu *latency_buckets;
        unsigned long last_calculate_time;
+       unsigned long filtered_latency;
 
        bool track_bio_latency;
 };
@@ -698,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
                                          unsigned long expires)
 {
-       unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
+       unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 
        /*
         * Since we are adjusting the throttle limit dynamically, the sleep
@@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
                throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
                        bio_op(bio), lat);
 
-       if (tg->latency_target) {
+       if (tg->latency_target && lat >= tg->td->filtered_latency) {
                int bucket;
                unsigned int threshold;
 
@@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
 void blk_throtl_register_queue(struct request_queue *q)
 {
        struct throtl_data *td;
+       int i;
 
        td = q->td;
        BUG_ON(!td);
 
-       if (blk_queue_nonrot(q))
+       if (blk_queue_nonrot(q)) {
                td->throtl_slice = DFL_THROTL_SLICE_SSD;
-       else
+               td->filtered_latency = LATENCY_FILTERED_SSD;
+       } else {
                td->throtl_slice = DFL_THROTL_SLICE_HD;
+               td->filtered_latency = LATENCY_FILTERED_HD;
+               for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
+                       td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
+       }
 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
        /* if no low limit, use previous default */
        td->throtl_slice = DFL_THROTL_SLICE_HD;
index d3a989e718f53518bafe93ddb9efea419e5d9b30..3cd6e12cfc467d27ccf2830fffde82e1aaf0f45e 100644 (file)
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
         * signature and returns that to us.
         */
        ret = crypto_akcipher_verify(req);
-       if (ret == -EINPROGRESS) {
+       if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
                wait_for_completion(&compl.completion);
                ret = compl.err;
        }
index fa749f47013508d562366fb0484e7ea80535ba0a..cdb27ac4b2266eccff2ba89a5388ec2fbf6d18bc 100644 (file)
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
                        break;
                case -EINPROGRESS:
                case -EBUSY:
-                       ret = wait_for_completion_interruptible(
-                               &drbg->ctr_completion);
-                       if (!ret && !drbg->ctr_async_err) {
+                       wait_for_completion(&drbg->ctr_completion);
+                       if (!drbg->ctr_async_err) {
                                reinit_completion(&drbg->ctr_completion);
                                break;
                        }
index b7ad808be3d4ec6c3822ce2cc5c0428d8f3b3dd0..3841b5eafa7ee244f605c28fd56c5a8c5dcaba9b 100644 (file)
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 
        err = crypto_skcipher_encrypt(&data->req);
        if (err == -EINPROGRESS || err == -EBUSY) {
-               err = wait_for_completion_interruptible(
-                       &data->result.completion);
-               if (!err)
-                       err = data->result.err;
+               wait_for_completion(&data->result.completion);
+               err = data->result.err;
        }
 
        if (err)
index c5fecf97ee2f52bd11188a0cd2295bd82d5d02db..797b28dc7b3410cda1e775e55d2e1d2362254dd9 100644 (file)
@@ -666,14 +666,6 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
        int ret = -ENODEV;
        struct fwnode_handle *iort_fwnode;
 
-       /*
-        * If we already translated the fwspec there
-        * is nothing left to do, return the iommu_ops.
-        */
-       ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
-       if (ops)
-               return ops;
-
        if (node) {
                iort_fwnode = iort_get_fwnode(node);
                if (!iort_fwnode)
@@ -735,6 +727,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
        u32 streamid = 0;
        int err;
 
+       /*
+        * If we already translated the fwspec there
+        * is nothing left to do, return the iommu_ops.
+        */
+       ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+       if (ops)
+               return ops;
+
        if (dev_is_pci(dev)) {
                struct pci_bus *bus = to_pci_dev(dev)->bus;
                u32 rid;
@@ -782,6 +782,12 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
        if (err)
                ops = ERR_PTR(err);
 
+       /* Ignore all other errors apart from EPROBE_DEFER */
+       if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+               dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+               ops = NULL;
+       }
+
        return ops;
 }
 
index a9a9ab3399d47ff8087e62d495f1d2ba930fc1d3..d42eeef9d9287815ce5f4c82d8d915ae5deabe51 100644 (file)
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
        if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
            (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
             (battery->capacity_now <= battery->alarm)))
-               pm_wakeup_hard_event(&battery->device->dev);
+               pm_wakeup_event(&battery->device->dev, 0);
 
        return result;
 }
index 9ad8cdb58743b765a6daa615729d78745d70af30..e19f530f1083a13732328516925e3bbeb6493e14 100644 (file)
@@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
        }
 
        if (state)
-               pm_wakeup_hard_event(&device->dev);
+               pm_wakeup_event(&device->dev, 0);
 
        ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
        if (ret == NOTIFY_DONE)
@@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                } else {
                        int keycode;
 
-                       pm_wakeup_hard_event(&device->dev);
+                       pm_wakeup_event(&device->dev, 0);
                        if (button->suspended)
                                break;
 
@@ -534,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
                lid_device = device;
        }
 
-       device_init_wakeup(&device->dev, true);
        printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
        return 0;
 
index 798d5003a039d876f275fc2d933be71cb7ebfbed..993fd31394c854c99e5ce0c2af824f36c50b7a22 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pm_qos.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
-#include <linux/suspend.h>
 
 #include "internal.h"
 
@@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
        mutex_lock(&acpi_pm_notifier_lock);
 
        if (adev->wakeup.flags.notifier_present) {
-               pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
+               __pm_wakeup_event(adev->wakeup.ws, 0);
                if (adev->wakeup.context.work.func)
                        queue_pm_work(&adev->wakeup.context.work);
        }
index e39ec7b7cb674fbad3cab60f4e5139d7e06a1f09..3a10d7573477e7dea0139c5f885e9514a1886a7a 100644 (file)
@@ -1371,8 +1371,8 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
        iort_set_dma_mask(dev);
 
        iommu = iort_iommu_configure(dev);
-       if (IS_ERR(iommu))
-               return PTR_ERR(iommu);
+       if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
 
        size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
        /*
index a6574d62634031ac6e351418b935a333b556b665..097d630ab8867267326121f9f4db2525cf06ef4b 100644 (file)
@@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
        acpi_os_wait_events_complete();
        if (acpi_sci_irq_valid())
                enable_irq_wake(acpi_sci_irq);
-
        return 0;
 }
 
-static void acpi_freeze_wake(void)
-{
-       /*
-        * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
-        * that the SCI has triggered while suspended, so cancel the wakeup in
-        * case it has not been a wakeup event (the GPEs will be checked later).
-        */
-       if (acpi_sci_irq_valid() &&
-           !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
-               pm_system_cancel_wakeup();
-}
-
-static void acpi_freeze_sync(void)
-{
-       /*
-        * Process all pending events in case there are any wakeup ones.
-        *
-        * The EC driver uses the system workqueue, so that one needs to be
-        * flushed too.
-        */
-       acpi_os_wait_events_complete();
-       flush_scheduled_work();
-}
-
 static void acpi_freeze_restore(void)
 {
        acpi_disable_wakeup_devices(ACPI_STATE_S0);
        if (acpi_sci_irq_valid())
                disable_irq_wake(acpi_sci_irq);
-
        acpi_enable_all_runtime_gpes();
 }
 
@@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
 static const struct platform_freeze_ops acpi_freeze_ops = {
        .begin = acpi_freeze_begin,
        .prepare = acpi_freeze_prepare,
-       .wake = acpi_freeze_wake,
-       .sync = acpi_freeze_sync,
        .restore = acpi_freeze_restore,
        .end = acpi_freeze_end,
 };
index e987a6f55d36747f79b470b0372a1abcff6390d7..9faee1c893e53c8dea6e14d472a73a8b7131bf96 100644 (file)
@@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
        if (async_error)
                goto Complete;
 
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
+
        if (dev->power.syscore || dev->power.direct_complete)
                goto Complete;
 
index 9c36b27996fc2b56a141bb388acf4947a45b104b..c313b600d356260fd9b98fe4848340f9cbdcf9ae 100644 (file)
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
 /* First wakeup IRQ seen by the kernel in the last cycle. */
 unsigned int pm_wakeup_irq __read_mostly;
 
-/* If greater than 0 and the system is suspending, terminate the suspend. */
-static atomic_t pm_abort_suspend __read_mostly;
+/* If set and the system is suspending, terminate the suspend. */
+static bool pm_abort_suspend __read_mostly;
 
 /*
  * Combined counters of registered wakeup events and wakeup events in progress.
@@ -855,26 +855,20 @@ bool pm_wakeup_pending(void)
                pm_print_active_wakeup_sources();
        }
 
-       return ret || atomic_read(&pm_abort_suspend) > 0;
+       return ret || pm_abort_suspend;
 }
 
 void pm_system_wakeup(void)
 {
-       atomic_inc(&pm_abort_suspend);
+       pm_abort_suspend = true;
        freeze_wake();
 }
 EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
-void pm_system_cancel_wakeup(void)
-{
-       atomic_dec(&pm_abort_suspend);
-}
-
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(void)
 {
+       pm_abort_suspend = false;
        pm_wakeup_irq = 0;
-       if (reset)
-               atomic_set(&pm_abort_suspend, 0);
 }
 
 void pm_system_irq_wakeup(unsigned int irq_number)
index 28d932906f24c19e7250e6c69ec903b4a4460761..ebbd0c3fe0ed997973271ba9b57020d2519843d0 100644 (file)
@@ -608,6 +608,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
  */
 static int loop_flush(struct loop_device *lo)
 {
+       /* loop not yet configured, no running thread, nothing to flush */
+       if (lo->lo_state != Lo_bound)
+               return 0;
        return loop_switch(lo, NULL);
 }
 
index b7de5bd76a31743f52cc9095845495e596d0817d..eb1158532de31e7aee418162135a7495b10f9860 100644 (file)
@@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
 static int min_perf_pct_min(void)
 {
        struct cpudata *cpu = all_cpu_data[0];
+       int turbo_pstate = cpu->pstate.turbo_pstate;
 
-       return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
-                           cpu->pstate.turbo_pstate);
+       return turbo_pstate ?
+               DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
 }
 
 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
index 8bf27323f7a37c34591c45f8b39d2091ae096260..b58233e4ed71ac41709318e03ce06c1d1b2c20ce 100644 (file)
@@ -27,6 +27,26 @@ struct bmp_header {
        u32 size;
 } __packed;
 
+static bool efi_bgrt_addr_valid(u64 addr)
+{
+       efi_memory_desc_t *md;
+
+       for_each_efi_memory_desc(md) {
+               u64 size;
+               u64 end;
+
+               if (md->type != EFI_BOOT_SERVICES_DATA)
+                       continue;
+
+               size = md->num_pages << EFI_PAGE_SHIFT;
+               end = md->phys_addr + size;
+               if (addr >= md->phys_addr && addr < end)
+                       return true;
+       }
+
+       return false;
+}
+
 void __init efi_bgrt_init(struct acpi_table_header *table)
 {
        void *image;
@@ -36,7 +56,7 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
        if (acpi_disabled)
                return;
 
-       if (!efi_enabled(EFI_BOOT))
+       if (!efi_enabled(EFI_MEMMAP))
                return;
 
        if (table->length < sizeof(bgrt_tab)) {
@@ -65,6 +85,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
                goto out;
        }
 
+       if (!efi_bgrt_addr_valid(bgrt->image_address)) {
+               pr_notice("Ignoring BGRT: invalid image address\n");
+               goto out;
+       }
        image = early_memremap(bgrt->image_address, sizeof(bmp_header));
        if (!image) {
                pr_notice("Ignoring BGRT: failed to map image header memory\n");
index 8be9719284b047f3d6046fd6a22c80bb617fa0f7..aa885a614e27c9882ea597456c4b2e16fb25b62b 100644 (file)
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                bool has_connectors =
                        !!new_crtc_state->connector_mask;
 
+               WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
                if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
                        DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
                                         crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
        for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
                const struct drm_connector_helper_funcs *funcs = connector->helper_private;
 
+               WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
                /*
                 * This only sets crtc->connectors_changed for routing changes,
                 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
+               WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+
                funcs = plane->helper_private;
 
                drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
 
        drm_modeset_acquire_init(&ctx, 0);
        while (1) {
+               err = drm_modeset_lock_all_ctx(dev, &ctx);
+               if (err)
+                       goto out;
+
                err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+out:
                if (err != -EDEADLK)
                        break;
 
index b5c6bb46a4251bdac218832ba61e52109e5a1768..37b8ad3e30d80440aea9ea2654a7a99696b50a57 100644 (file)
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
 void drm_unplug_dev(struct drm_device *dev)
 {
        /* for a USB device */
-       drm_dev_unregister(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_modeset_unregister_all(dev);
+
+       drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+       drm_minor_unregister(dev, DRM_MINOR_RENDER);
+       drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 
        mutex_lock(&drm_global_mutex);
 
index 5abc69c9630fc28789a32c87b19e44397d1d58ba..f77dcfaade6c5dfb74d7d600d8181d4fbb006f76 100644 (file)
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
         * Get the endpoint node. In our case, dsi has one output port1
         * to which the external HDMI bridge is connected.
         */
-       ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+       ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
        if (ret)
                return ret;
 
index c994fe6e65b2eafe6a133fccb70f7c5db5019b00..48428672fc6ece0927416d17a8dde8c41f00f500 100644 (file)
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_fini;
 
        pci_set_drvdata(pdev, &dev_priv->drm);
+       /*
+        * Disable the system suspend direct complete optimization, which can
+        * leave the device suspended skipping the driver's suspend handlers
+        * if the device was already runtime suspended. This is needed due to
+        * the difference in our runtime and system suspend sequence and
+        * becaue the HDA driver may require us to enable the audio power
+        * domain during system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
        ret = i915_driver_init_early(dev_priv, ent);
        if (ret < 0)
index 963f6d4481f76ec54b5aeab138b0cca3f4ff90e5..2c453a4e97d5ba28ca3a21da372f73eed0131268 100644 (file)
@@ -2991,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
        return false;
 }
 
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
+
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                                int enable_ppgtt);
 
index b6ac3df18b582534b118ab44aae1dbfe9f75186e..462031cbd77f714b23a3b7645039c0d8dba71f40 100644 (file)
@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 {
        int ret;
 
+       /* If the device is asleep, we have no requests outstanding */
+       if (!READ_ONCE(i915->gt.awake))
+               return 0;
+
        if (flags & I915_WAIT_LOCKED) {
                struct i915_gem_timeline *tl;
 
index 50b8f1139ff99d6dc8d3ec225abf251d6af4465d..f1989b8792dd6f21ba1a944113b424fb8dc3184d 100644 (file)
@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                gen8_set_pte(&gtt_base[i], scratch_pte);
 }
 
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = vm->i915;
+
+       /*
+        * Make sure the internal GAM fifo has been cleared of all GTT
+        * writes before exiting stop_machine(). This guarantees that
+        * any aperture accesses waiting to start in another process
+        * cannot back up behind the GTT writes causing a hang.
+        * The register can be any arbitrary GAM register.
+        */
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+       struct i915_address_space *vm;
+       dma_addr_t addr;
+       u64 offset;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+       struct insert_page *arg = _arg;
+
+       gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+                                         dma_addr_t addr,
+                                         u64 offset,
+                                         enum i915_cache_level level,
+                                         u32 unused)
+{
+       struct insert_page arg = { vm, addr, offset, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+       struct i915_address_space *vm;
+       struct sg_table *st;
+       u64 start;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+       struct insert_entries *arg = _arg;
+
+       gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+                                            struct sg_table *st,
+                                            u64 start,
+                                            enum i915_cache_level level,
+                                            u32 unused)
+{
+       struct insert_entries arg = { vm, st, start, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+       struct i915_address_space *vm;
+       u64 start;
+       u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+       struct clear_range *arg = _arg;
+
+       gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+                                         u64 start,
+                                         u64 length)
+{
+       struct clear_range arg = { vm, start, length };
+
+       stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  u64 start, u64 length)
 {
@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
        ggtt->base.insert_entries = gen8_ggtt_insert_entries;
 
+       /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+       if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+               ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+               ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+               if (ggtt->base.clear_range != nop_clear_range)
+                       ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+       }
+
        ggtt->invalidate = gen6_ggtt_invalidate;
 
        return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
-       i915->ggtt.invalidate = gen6_ggtt_invalidate;
+       if (i915->ggtt.invalidate == guc_ggtt_invalidate)
+               i915->ggtt.invalidate = gen6_ggtt_invalidate;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
index a0d6d4317a490bba6487891a4048ddef6b358fe4..fb5231f98c0d620f1ccf03a9872607b1373dc0e2 100644 (file)
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
                        obj->mm.quirked = false;
                }
                if (!i915_gem_object_is_tiled(obj)) {
-                       GEM_BUG_ON(!obj->mm.quirked);
+                       GEM_BUG_ON(obj->mm.quirked);
                        __i915_gem_object_pin_pages(obj);
                        obj->mm.quirked = true;
                }
index f87b0c4e564d8b85de91e93f7a8d9a6e6f219b61..1a78363c7f4a9e974edbc4e4f31ec7d64d26b6ad 100644 (file)
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
        GEN5_FEATURES,
        .platform = INTEL_IRONLAKE,
-       .is_mobile = 1,
+       .is_mobile = 1, .has_fbc = 1,
 };
 
 #define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
        .has_hw_contexts = 1, \
        .has_logical_ring_contexts = 1, \
        .has_guc = 1, \
-       .has_decoupled_mmio = 1, \
        .has_aliasing_ppgtt = 1, \
        .has_full_ppgtt = 1, \
        .has_full_48bit_ppgtt = 1, \
index 3cabe52a4e3b168e176d1f55abdae65f67219ef7..569717a1272367a91cf682a9cae7640f9ae32777 100644 (file)
@@ -12203,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
         * type. For DP ports it behaves like most other platforms, but on HDMI
         * there's an extra 1 line difference. So we need to add two instead of
         * one to the value.
+        *
+        * On VLV/CHV DSI the scanline counter would appear to increment
+        * approx. 1/3 of a scanline before start of vblank. Unfortunately
+        * that means we can't tell whether we're in vblank or not while
+        * we're on that particular line. We must still set scanline_offset
+        * to 1 so that the vblank timestamps come out correct when we query
+        * the scanline counter from within the vblank interrupt handler.
+        * However if queried just before the start of vblank we'll get an
+        * answer that's slightly in the future.
         */
        if (IS_GEN2(dev_priv)) {
                const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
index 854e8e0c836bd2099c1cfcb72e12e3ec5ff21915..f94eacff196c5d0980690ae95cda45c42e3a4e9b 100644 (file)
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        return 0;
 }
 
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       bool idle = true;
+
+       intel_runtime_pm_get(dev_priv);
+
+       /* No bit for gen2, so assume the CS parser is idle */
+       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+               idle = false;
+
+       intel_runtime_pm_put(dev_priv);
+
+       return idle;
+}
+
 /**
  * intel_engine_is_idle() - Report if the engine has finished process all work
  * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-
        /* Any inflight/incomplete requests? */
        if (!i915_seqno_passed(intel_engine_get_seqno(engine),
                               intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return false;
 
        /* Ring stopped? */
-       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+       if (!ring_is_idle(engine))
                return false;
 
        return true;
index ded2add18b26122d7f6395d0d5532da26dd21f34..d93c58410bffe9701d148e546db753dff84c4083 100644 (file)
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
                                            int *width, int *height)
 {
-       int w, h;
-
-       if (drm_rotation_90_or_270(cache->plane.rotation)) {
-               w = cache->plane.src_h;
-               h = cache->plane.src_w;
-       } else {
-               w = cache->plane.src_w;
-               h = cache->plane.src_h;
-       }
-
        if (width)
-               *width = w;
+               *width = cache->plane.src_w;
        if (height)
-               *height = h;
+               *height = cache->plane.src_h;
 }
 
 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
 
        cache->plane.rotation = plane_state->base.rotation;
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
        cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
        cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
        cache->plane.visible = plane_state->base.visible;
index 570bd603f401d513ac3f08c67fc78d6d1523b762..2ca481b5aa691872d39263605ef67b9c7335cec6 100644 (file)
@@ -4335,10 +4335,18 @@ skl_compute_wm(struct drm_atomic_state *state)
        struct drm_crtc_state *cstate;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct skl_wm_values *results = &intel_state->wm_results;
+       struct drm_device *dev = state->dev;
        struct skl_pipe_wm *pipe_wm;
        bool changed = false;
        int ret, i;
 
+       /*
+        * When we distrust bios wm we always need to recompute to set the
+        * expected DDB allocations for each CRTC.
+        */
+       if (to_i915(dev)->wm.distrust_bios_wm)
+               changed = true;
+
        /*
         * If this transaction isn't actually touching any CRTC's, don't
         * bother with watermark calculation.  Note that if we pass this
@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state)
         */
        for_each_new_crtc_in_state(state, crtc, cstate, i)
                changed = true;
+
        if (!changed)
                return 0;
 
index c3780d0d2baf752ce9d590b6f6c8db67674ec745..559f1ab42bfc23e005020d9bb3cb88e0f0d57943 100644 (file)
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
        }
 
        /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-       if (intel_crtc->config->pipe_src_w > 3200 ||
-                               intel_crtc->config->pipe_src_h > 2000) {
+       if (dev_priv->psr.psr2_support &&
+           (intel_crtc->config->pipe_src_w > 3200 ||
+            intel_crtc->config->pipe_src_h > 2000)) {
                dev_priv->psr.psr2_support = false;
                return false;
        }
index 8c87c717c7cda92c4256cf277828e594f96a0ad1..e6517edcd16b55608c125452b56904f2b48e90df 100644 (file)
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  */
 void intel_pipe_update_start(struct intel_crtc *crtc)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        long timeout = msecs_to_jiffies_timeout(1);
        int scanline, min, max, vblank_start;
        wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+       bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+               intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
        DEFINE_WAIT(wait);
 
        vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
 
        drm_crtc_vblank_put(&crtc->base);
 
+       /*
+        * On VLV/CHV DSI the scanline counter would appear to
+        * increment approx. 1/3 of a scanline before start of vblank.
+        * The registers still get latched at start of vblank however.
+        * This means we must not write any registers on the first
+        * line of vblank (since not the whole line is actually in
+        * vblank). And unfortunately we can't use the interrupt to
+        * wait here since it will fire too soon. We could use the
+        * frame start interrupt instead since it will fire after the
+        * critical scanline, but that would require more changes
+        * in the interrupt code. So for now we'll just do the nasty
+        * thing and poll for the bad scanline to pass us by.
+        *
+        * FIXME figure out if BXT+ DSI suffers from this as well
+        */
+       while (need_vlv_dsi_wa && scanline == vblank_start)
+               scanline = intel_get_crtc_scanline(crtc);
+
        crtc->debug.scanline_start = scanline;
        crtc->debug.start_vbl_time = ktime_get();
        crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
index 4b7f73aeddac6475db31d184853f833c8ba3d510..f84115261ae78b02591a64cb77de96c3fda167bb 100644 (file)
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
  *                available in the work queue (note, the queue is shared,
  *                not per-engine). It is OK for this to be nonzero, but
  *                it should not be huge!
- *   q_fail: failed to enqueue a work item. This should never happen,
- *           because we check for space beforehand.
  *   b_fail: failed to ring the doorbell. This should never happen, unless
  *           somehow the hardware misbehaves, or maybe if the GuC firmware
  *           crashes? We probably need to reset the GPU to recover.
index 8fb801fab039b10225765b044a4e535cf7a4201d..8b05ecb8fdefccafeed07755d501e8902ccba0c3 100644 (file)
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                ret = drm_of_find_panel_or_bridge(child,
                                                  imx_ldb->lvds_mux ? 4 : 2, 0,
                                                  &channel->panel, &channel->bridge);
-               if (ret)
+               if (ret && ret != -ENODEV)
                        return ret;
 
                /* panel ddc only if there is no bridge */
index 808b995a990f5529b303e23cb1085b4b7f478355..b5cc6e12334cf96e8faacc01a1a8fb5dcec48202 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_of.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
 
 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
 {
-       u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
-
-       while (timeout_ms--) {
-               if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
-                       break;
-
-               usleep_range(2, 4);
-       }
+       int ret;
+       u32 val;
 
-       if (timeout_ms == 0) {
+       ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
+                                4, 2000000);
+       if (ret) {
                DRM_WARN("polling dsi wait not busy timeout!\n");
 
                mtk_dsi_enable(dsi);
index 41a1c03b03476b620a511731518b8d0f7772417d..0a4ffd7241468dcbd064fa3a210f17094d10697b 100644 (file)
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
        }
 
        err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
-       if (err) {
+       if (err < 0) {
                dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
                        err);
                return err;
index 75382f5f0fcec00a8749df932cfd7dba9eb19542..10b227d83e9ac7af98b8177188bb56d48823f2b1 100644 (file)
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
        .max_register   = 0x1000,
 };
 
-static int meson_drv_bind(struct device *dev)
+static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
        if (ret)
                goto free_drm;
 
-       ret = component_bind_all(drm->dev, drm);
-       if (ret) {
-               dev_err(drm->dev, "Couldn't bind all components\n");
-               goto free_drm;
+       if (has_components) {
+               ret = component_bind_all(drm->dev, drm);
+               if (ret) {
+                       dev_err(drm->dev, "Couldn't bind all components\n");
+                       goto free_drm;
+               }
        }
 
        ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
        return ret;
 }
 
+static int meson_drv_bind(struct device *dev)
+{
+       return meson_drv_bind_master(dev, true);
+}
+
 static void meson_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
                count += meson_probe_remote(pdev, &match, np, remote);
        }
 
+       if (count && !match)
+               return meson_drv_bind_master(&pdev->dev, false);
+
        /* If some endpoints were found, initialize the nodes */
        if (count) {
                dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
index 6a567fe347b369a2c01d5e89c67ac0a881a49420..820a4805916f1da8115b798cf3c93d5750ae8196 100644 (file)
@@ -4,6 +4,7 @@
 
 struct nvkm_alarm {
        struct list_head head;
+       struct list_head exec;
        u64 timestamp;
        void (*func)(struct nvkm_alarm *);
 };
index 36268e1802b5afcd65c6b3d623b273c4ac60af87..15a13d09d431c9a8d4822fb5f997bcf3225a2d4c 100644 (file)
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
-int nouveau_runtime_pm = -1;
+static int nouveau_runtime_pm = -1;
 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
 
 static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        nouveau_fbcon_init(dev);
        nouveau_led_init(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_get_sync(dev->dev);
                pm_runtime_forbid(dev->dev);
        }
@@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
        return nouveau_do_resume(drm_dev, false);
 }
 
+bool
+nouveau_pmops_runtime()
+{
+       if (nouveau_runtime_pm == -1)
+               return nouveau_is_optimus() || nouveau_is_v1_dsm();
+       return nouveau_runtime_pm == 1;
+}
+
 static int
 nouveau_pmops_runtime_suspend(struct device *dev)
 {
@@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
@@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
        int ret;
 
-       if (nouveau_runtime_pm == 0)
-               return -EINVAL;
+       if (!nouveau_pmops_runtime()) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
        struct nouveau_drm *drm = nouveau_drm(drm_dev);
        struct drm_crtc *crtc;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
index eadec2f49ad318cf44d3464ff39dbe201e7074cb..a11b6aaed325f17ddf6f82c8fd8ced531191574a 100644 (file)
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
 #include <nvif/object.h>
 #include <nvif/device.h>
 
-extern int nouveau_runtime_pm;
-
 struct nouveau_drm {
        struct nouveau_cli client;
        struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
 
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
+bool nouveau_pmops_runtime(void);
 
 #include <nvkm/core/tegra.h>
 
index a4aacbc0cec8efe603d18152099aaecdeab93dce..02fe0efb9e1643f3a4802b947b3ab306bdd690bd 100644 (file)
@@ -87,7 +87,7 @@ void
 nouveau_vga_init(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        /* only relevant for PCI devices */
        if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
        vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
 
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
 nouveau_vga_fini(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
-
        vga_switcheroo_unregister_client(dev->pdev);
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
                vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
index a7663249b3baf2df1c5c75d87d3b32109984ba97..06e564a9ccb253b3018b45d2f0a96cc53430ab2c 100644 (file)
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
                                        asyc->set.dither = true;
                        }
                } else {
-                       asyc->set.mask = ~0;
+                       if (asyc)
+                               asyc->set.mask = ~0;
                        asyh->set.mask = ~0;
                }
 
index f2a86eae0a0d624b31cb8ee9a65e6487705a6c1a..2437f7d41ca20de616a7193f2d6fdec6d7daea00 100644 (file)
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
                /* Move to completed list.  We'll drop the lock before
                 * executing the callback so it can reschedule itself.
                 */
-               list_move_tail(&alarm->head, &exec);
+               list_del_init(&alarm->head);
+               list_add(&alarm->exec, &exec);
        }
 
        /* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
        spin_unlock_irqrestore(&tmr->lock, flags);
 
        /* Execute completed callbacks. */
-       list_for_each_entry_safe(alarm, atemp, &exec, head) {
-               list_del_init(&alarm->head);
+       list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+               list_del(&alarm->exec);
                alarm->func(alarm);
        }
 }
index d8fa7a9c9240bdf53f06d214f5952af7f85e5ee4..ce5f2d1f9994113b6322a708f47f1e23049ef3ba 100644 (file)
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
                                      struct drm_connector_state *conn_state)
 {
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
-       struct rockchip_dp_device *dp = to_dp(encoder);
-       int ret;
 
        /*
         * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
        s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
        s->output_type = DRM_MODE_CONNECTOR_eDP;
-       if (dp->data->chip_type == RK3399_EDP) {
-               /*
-                * For RK3399, VOP Lit must code the out mode to RGB888,
-                * VOP Big must code the out mode to RGB10.
-                */
-               ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
-                                                       encoder);
-               if (ret > 0)
-                       s->output_mode = ROCKCHIP_OUT_MODE_P888;
-       }
 
        return 0;
 }
index a2169dd3d26b915c851bd089f25373495c188174..14fa1f8351e8df22ab30560fbb6a1906841ba43d 100644 (file)
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 {
        struct cdn_dp_device *dp = encoder_to_dp(encoder);
        int ret, val;
-       struct rockchip_crtc_state *state;
 
        ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
        if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 
        DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
                          (ret) ? "LIT" : "BIG");
-       state = to_rockchip_crtc_state(encoder->crtc->state);
-       if (ret) {
+       if (ret)
                val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
-               state->output_mode = ROCKCHIP_OUT_MODE_P888;
-       } else {
+       else
                val = DP_SEL_VOP_LIT << 16;
-               state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
-       }
 
        ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
        if (ret)
index 3f7a82d1e0956e6a37e1478412210955db38aa19..45589d6ce65ed0fd0a7e1be60f83dd03bd3d47b5 100644 (file)
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
 static void vop_crtc_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
+       const struct vop_data *vop_data = vop->data;
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
        struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
        u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
                DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
                              s->output_type);
        }
+
+       /*
+        * if vop is not support RGB10 output, need force RGB10 to RGB888.
+        */
+       if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
+           !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
+               s->output_mode = ROCKCHIP_OUT_MODE_P888;
        VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
        VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
index 5a4faa85dbd29d91af08ae51a8c2ac69012cb33c..9979fd0c22821d7efa3d7054468e0914619e0692 100644 (file)
@@ -142,6 +142,9 @@ struct vop_data {
        const struct vop_intr *intr;
        const struct vop_win_data *win;
        unsigned int win_size;
+
+#define VOP_FEATURE_OUTPUT_RGB10       BIT(0)
+       u64 feature;
 };
 
 /* interrupt define */
index 0da44442aab097b8f4b40d67c8995be625bccfcd..bafd698a28b1b491c01823d2be293a41e67c3722 100644 (file)
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
 static const struct vop_data rk3288_vop = {
        .init_table = rk3288_init_reg_table,
        .table_size = ARRAY_SIZE(rk3288_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3288_vop_intr,
        .ctrl = &rk3288_ctrl_data,
        .win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
 static const struct vop_data rk3399_vop_big = {
        .init_table = rk3399_init_reg_table,
        .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3399_vop_intr,
        .ctrl = &rk3399_ctrl_data,
        /*
index 130d51c5ec6a2dab1211337d71313e6b1de15323..4b948fba9eec274b794a0546fec0179b3de1cadf 100644 (file)
@@ -41,9 +41,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20170221"
+#define VMWGFX_DRIVER_DATE "20170607"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 12
+#define VMWGFX_DRIVER_MINOR 13
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
index b6a0806b06bffaf6da9178905f9b2f6bb037d384..a1c68e6a689e32fd0dd4d74c805ee4afd0836a99 100644 (file)
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
                                return fifo_state->static_buffer;
                        else {
                                fifo_state->dynamic_buffer = vmalloc(bytes);
+                               if (!fifo_state->dynamic_buffer)
+                                       goto out_err;
                                return fifo_state->dynamic_buffer;
                        }
                }
index ef9f3a2a40303290287b5259b7a71d2a8791ddb4..1d2db5d912b03c572b50f9b64b2f5d2a39de1365 100644 (file)
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 }
 
 
-
-/**
- * vmw_du_cursor_plane_update() - Update cursor image and location
- *
- * @plane: plane object to update
- * @crtc: owning CRTC of @plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of plane on crtc
- * @crtc_y: y offset of plane on crtc
- * @crtc_w: width of plane rectangle on crtc
- * @crtc_h: height of plane rectangle on crtc
- * @src_x: Not used
- * @src_y: Not used
- * @src_w: Not used
- * @src_h: Not used
- *
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h)
-{
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-       struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-       struct vmw_surface *surface = NULL;
-       struct vmw_dma_buffer *dmabuf = NULL;
-       s32 hotspot_x, hotspot_y;
-       int ret;
-
-       hotspot_x = du->hotspot_x + fb->hot_x;
-       hotspot_y = du->hotspot_y + fb->hot_y;
-
-       /* A lot of the code assumes this */
-       if (crtc_w != 64 || crtc_h != 64) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (vmw_framebuffer_to_vfb(fb)->dmabuf)
-               dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
-       else
-               surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
-       if (surface && !surface->snooper.image) {
-               DRM_ERROR("surface not suitable for cursor\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* setup new image */
-       ret = 0;
-       if (surface) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_surface = surface;
-
-               du->cursor_age = du->cursor_surface->snooper.age;
-
-               ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
-                                             64, 64, hotspot_x, hotspot_y);
-       } else if (dmabuf) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_dmabuf = dmabuf;
-
-               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
-                                              hotspot_x, hotspot_y);
-       } else {
-               vmw_cursor_update_position(dev_priv, false, 0, 0);
-               goto out;
-       }
-
-       if (!ret) {
-               du->cursor_x = crtc_x + du->set_gui_x;
-               du->cursor_y = crtc_y + du->set_gui_y;
-
-               vmw_cursor_update_position(dev_priv, true,
-                                          du->cursor_x + hotspot_x,
-                                          du->cursor_y + hotspot_y);
-       }
-
-out:
-       return ret;
-}
-
-
-int vmw_du_cursor_plane_disable(struct drm_plane *plane)
-{
-       if (plane->fb) {
-               drm_framebuffer_unreference(plane->fb);
-               plane->fb = NULL;
-       }
-
-       return -EINVAL;
-}
-
-
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 {
        vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -472,18 +370,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 }
 
 
-void
-vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                  struct drm_plane_state *old_state)
-{
-       struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-
-       drm_atomic_set_fb_for_plane(plane->state, NULL);
-       vmw_cursor_update_position(dev_priv, false, 0, 0);
-}
-
-
 void
 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                  struct drm_plane_state *old_state)
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
         */
        if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
            dmabuf && only_2d &&
+           mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
            dev_priv->active_display_unit == vmw_du_screen_target) {
                ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
                                              dmabuf, &surface);
index 13f2f1d2818a755012098df126938989ba1fb297..5f8d678ae675156178dc306efc2fc83338c390ba 100644 (file)
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
                           u16 *r, u16 *g, u16 *b,
                           uint32_t size,
                           struct drm_modeset_acquire_ctx *ctx);
-int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
-                           uint32_t handle, uint32_t width, uint32_t height,
-                           int32_t hot_x, int32_t hot_y);
-int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
 int vmw_du_connector_set_property(struct drm_connector *connector,
                                  struct drm_property *property,
                                  uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
 /* Universal Plane Helpers */
 void vmw_du_primary_plane_destroy(struct drm_plane *plane);
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_cursor_plane_disable(struct drm_plane *plane);
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h);
 
 /* Atomic Helpers */
 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
                                     struct drm_plane_state *state);
 void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                       struct drm_plane_state *old_state);
-void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                       struct drm_plane_state *old_state);
 int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
                                   struct drm_plane_state *new_state);
 void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
index bad31bdf09b6c1d8bd31663c7973e1ebb912f340..50be1f034f9efa701f2c6feda57fe28d8cf6d596 100644 (file)
@@ -56,6 +56,8 @@ enum stdu_content_type {
  * @right: Right side of bounding box.
  * @top: Top side of bounding box.
  * @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
  * @buf: DMA buffer when DMA-ing between buffer and screen targets.
  * @sid: Surface ID when copying between surface and screen targets.
  */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
        struct vmw_kms_dirty base;
        SVGA3dTransferType  transfer;
        s32 left, right, top, bottom;
+       s32 fb_left, fb_top;
        u32 pitch;
        union {
                struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  *
  * @dirty: The closure structure.
  *
- * This function calculates the bounding box for all the incoming clips
+ * This function calculates the bounding box for all the incoming clips.
  */
 static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 {
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 
        dirty->num_hits = 1;
 
-       /* Calculate bounding box */
+       /* Calculate destination bounding box */
        ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
        ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
        ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
        ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+       /*
+        * Calculate content bounding box.  We only need the top-left
+        * coordinate because width and height will be the same as the
+        * destination bounding box above
+        */
+       ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+       ddirty->fb_top  = min_t(s32, ddirty->fb_top, dirty->fb_y);
 }
 
 
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
        src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
        src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
-       src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp;
+       src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
        dst_pitch = ddirty->pitch;
        dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
-       dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp;
+       dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
        /* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        }
 
 out_cleanup:
-       ddirty->left = ddirty->top = S32_MAX;
+       ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
        ddirty->right = ddirty->bottom = S32_MIN;
 }
 
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
                SVGA3D_READ_HOST_VRAM;
        ddirty.left = ddirty.top = S32_MAX;
        ddirty.right = ddirty.bottom = S32_MIN;
+       ddirty.fb_left = ddirty.fb_top = S32_MAX;
        ddirty.pitch = vfb->base.pitches[0];
        ddirty.buf = buf;
        ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                DRM_ERROR("Failed to bind surface to STDU.\n");
        else
                crtc->primary->fb = plane->state->fb;
+
+       ret = vmw_stdu_update_st(dev_priv, stdu);
+
+       if (ret)
+               DRM_ERROR("Failed to update STDU.\n");
 }
 
 
index 7681341fe32b8725840d70b137782f5f1f316bc0..6b70bd259953580204ccecd4ec4334c73e73eed7 100644 (file)
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        int ret;
        uint32_t size;
-       uint32_t backup_handle;
+       uint32_t backup_handle = 0;
 
        if (req->multisample_count != 0)
                return -EINVAL;
 
+       if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+               return -EINVAL;
+
        if (unlikely(vmw_user_surface_size == 0))
                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
                        128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
                ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
                                             &res->backup,
                                             &user_srf->backup_base);
-               if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
-                   res->backup_size) {
-                       DRM_ERROR("Surface backup buffer is too small.\n");
-                       vmw_dmabuf_unreference(&res->backup);
-                       ret = -EINVAL;
-                       goto out_unlock;
+               if (ret == 0) {
+                       if (res->backup->base.num_pages * PAGE_SIZE <
+                           res->backup_size) {
+                               DRM_ERROR("Surface backup buffer is too small.\n");
+                               vmw_dmabuf_unreference(&res->backup);
+                               ret = -EINVAL;
+                               goto out_unlock;
+                       } else {
+                               backup_handle = req->buffer_handle;
+                       }
                }
        } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
                ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
                                 dev_priv->stdu_max_height);
 
                if (size.width > max_width || size.height > max_height) {
-                       DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
+                       DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
                                  size.width, size.height,
                                  max_width, max_height);
                        return -EINVAL;
index 16d556816b5fcaa62758549d9bceaa88bd4bc839..2fb5f432a54c1afd0f7c6104facb1860dbcb3f3a 100644 (file)
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
        spin_lock_irqsave(&ipu->lock, flags);
 
        val = ipu_cm_read(ipu, IPU_CONF);
-       if (vdi) {
+       if (vdi)
                val |= IPU_CONF_IC_INPUT;
-       } else {
+       else
                val &= ~IPU_CONF_IC_INPUT;
-               if (csi_id == 1)
-                       val |= IPU_CONF_CSI_SEL;
-               else
-                       val &= ~IPU_CONF_CSI_SEL;
-       }
+
+       if (csi_id == 1)
+               val |= IPU_CONF_CSI_SEL;
+       else
+               val &= ~IPU_CONF_CSI_SEL;
+
        ipu_cm_write(ipu, val, IPU_CONF);
 
        spin_unlock_irqrestore(&ipu->lock, flags);
index c55563379e2e3ca2a1957ce777b2ac2c3586d9b7..c35f74c830657f26a3e29c34f7cef7e9f864f71a 100644 (file)
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
        if (pre->in_use)
                return -EBUSY;
 
-       clk_prepare_enable(pre->clk_axi);
-
        /* first get the engine out of reset and remove clock gating */
        writel(0, pre->regs + IPU_PRE_CTRL);
 
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
 
 void ipu_pre_put(struct ipu_pre *pre)
 {
-       u32 val;
-
-       val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
-       writel(val, pre->regs + IPU_PRE_CTRL);
-
-       clk_disable_unprepare(pre->clk_axi);
+       writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
 
        pre->in_use = false;
 }
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
        if (!pre->buffer_virt)
                return -ENOMEM;
 
+       clk_prepare_enable(pre->clk_axi);
+
        pre->dev = dev;
        platform_set_drvdata(pdev, pre);
        mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
        available_pres--;
        mutex_unlock(&ipu_pre_list_mutex);
 
+       clk_disable_unprepare(pre->clk_axi);
+
        if (pre->buffer_virt)
                gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
                              IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
index e73d968023f7ce7de418dcf1315b7c554773d604..f1fa1f172107722ef13d8c98e0c2d539d096c2aa 100644 (file)
@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E546   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+ * Fujitsu LIFEBOOK E557   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
@@ -1524,6 +1526,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E546  does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
+               },
+       },
        {
                /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
                .matches = {
@@ -1545,6 +1554,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
+               },
+       },
        {
                /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
                .matches = {
index 77dad045a4683026460dca491419112d730897d7..ad71a5e768dc46432b18b84fb344a7bb514bb56c 100644 (file)
@@ -146,7 +146,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
        if (!serio)
                return -ENOMEM;
 
-       serio->id.type = SERIO_8042;
+       serio->id.type = SERIO_PS_PSTHRU;
        serio->write = rmi_f03_pt_write;
        serio->port_data = f03;
 
index 9f44ee8ea1bc8a10a6ff4c0dad297b78b8c2f57a..19779b88a47973eef332b88b05d1ac76193f788d 100644 (file)
@@ -118,6 +118,7 @@ static const struct iommu_ops
 
        ops = iommu_ops_from_fwnode(fwnode);
        if ((ops && !ops->of_xlate) ||
+           !of_device_is_available(iommu_spec->np) ||
            (!ops && !of_iommu_driver_present(iommu_spec->np)))
                return NULL;
 
@@ -236,6 +237,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                        ops = ERR_PTR(err);
        }
 
+       /* Ignore all other errors apart from EPROBE_DEFER */
+       if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+               dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+               ops = NULL;
+       }
+
        return ops;
 }
 
index 212a6777ff3172dd9e20401dd7bf87ad5d2f7468..87edc342ccb3d5c51bb45313f1218f9839528917 100644 (file)
@@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws)
 
 static void no_op(struct percpu_ref *r) {}
 
+int mddev_init_writes_pending(struct mddev *mddev)
+{
+       if (mddev->writes_pending.percpu_count_ptr)
+               return 0;
+       if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
+               return -ENOMEM;
+       /* We want to start with the refcount at zero */
+       percpu_ref_put(&mddev->writes_pending);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
+
 static int md_alloc(dev_t dev, char *name)
 {
        /*
@@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name)
        blk_queue_make_request(mddev->queue, md_make_request);
        blk_set_stacking_limits(&mddev->queue->limits);
 
-       if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
-               goto abort;
-       /* We want to start with the refcount at zero */
-       percpu_ref_put(&mddev->writes_pending);
        disk = alloc_disk(1 << shift);
        if (!disk) {
                blk_cleanup_queue(mddev->queue);
index 11f15146ce5177de0468c706a5f82a037b42c132..0fa1de42c42bcb328276a42fc56809d53217a285 100644 (file)
@@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp);
 extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
 extern void md_reap_sync_thread(struct mddev *mddev);
+extern int mddev_init_writes_pending(struct mddev *mddev);
 extern void md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
 extern void md_write_end(struct mddev *mddev);
index af5056d568788a53f6c3a2456a353cba3bbfe35a..e1a7e3d4c5e4f17d0dedb4f171ad3bd47ff70022 100644 (file)
@@ -3063,6 +3063,8 @@ static int raid1_run(struct mddev *mddev)
                        mdname(mddev));
                return -EIO;
        }
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
        /*
         * copy the already verified devices into our private RAID1
         * bookkeeping area. [whatever we allocate in run(),
index 4343d7ff9916bee9a9a399572c2bd3313723fa3a..797ed60abd5e27cd2f32d0f80d36c660ed1e4419 100644 (file)
@@ -3611,6 +3611,9 @@ static int raid10_run(struct mddev *mddev)
        int first = 1;
        bool discard_supported = false;
 
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
+
        if (mddev->private == NULL) {
                conf = setup_conf(mddev);
                if (IS_ERR(conf))
index 722064689e822f3b876411f076921e244abbec2f..ec0f951ae19fbc8fa392306b7c6c45a38f3eb0e4 100644 (file)
@@ -7118,6 +7118,9 @@ static int raid5_run(struct mddev *mddev)
        long long min_offset_diff = 0;
        int first = 1;
 
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
+
        if (mddev->recovery_cp != MaxSector)
                pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
                          mdname(mddev));
index 35910f945bfad02823f7146c0746feb90aa2cda9..99e644cda4d13db301b713a5752788c0f646dfa1 100644 (file)
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        return of_platform_populate(np, NULL, NULL, dev);
 }
 
-static int atmel_ebi_resume(struct device *dev)
+static __maybe_unused int atmel_ebi_resume(struct device *dev)
 {
        struct atmel_ebi *ebi = dev_get_drvdata(dev);
        struct atmel_ebi_dev *ebid;
index 17b433f1ce23b7deeb2e3f35139d3bef57ef20d0..0761271d68c5613b23152c03522f6a388950ea86 100644 (file)
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        /* Do this outside the status_mutex to avoid a circular dependency with
         * the locking in cxl_mmap_fault() */
-       if (copy_from_user(&work, uwork,
-                          sizeof(struct cxl_ioctl_start_work))) {
-               rc = -EFAULT;
-               goto out;
-       }
+       if (copy_from_user(&work, uwork, sizeof(work)))
+               return -EFAULT;
 
        mutex_lock(&ctx->status_mutex);
        if (ctx->status != OPENED) {
index 871a2f09c71845b2803bab920618c36de634e4fc..8d6ea9712dbd1830fcdc5d6eecda3d28d69a9376 100644 (file)
@@ -1302,13 +1302,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
 
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 {
-       if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+       if (adapter->native->err_virq == 0 ||
+           adapter->native->err_virq !=
+           irq_find_mapping(NULL, adapter->native->err_hwirq))
                return;
 
        cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
        cxl_unmap_irq(adapter->native->err_virq, adapter);
        cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
        kfree(adapter->irq_name);
+       adapter->native->err_virq = 0;
 }
 
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
 
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 {
-       if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+       if (afu->serr_virq == 0 ||
+           afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
                return;
 
        cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
        cxl_unmap_irq(afu->serr_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
        kfree(afu->err_irq_name);
+       afu->serr_virq = 0;
 }
 
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
 
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 {
-       if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+       if (afu->native->psl_virq == 0 ||
+           afu->native->psl_virq !=
+           irq_find_mapping(NULL, afu->native->psl_hwirq))
                return;
 
        cxl_unmap_irq(afu->native->psl_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
        kfree(afu->psl_irq_name);
+       afu->native->psl_virq = 0;
 }
 
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
index a60926410438b98c2e414de081f7c8093bac5862..903d5813023a93588c08857ff0db1339bbb99c86 100644 (file)
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
 static int nvme_char_major;
 module_param(nvme_char_major, int, 0);
 
-static unsigned long default_ps_max_latency_us = 25000;
+static unsigned long default_ps_max_latency_us = 100000;
 module_param(default_ps_max_latency_us, ulong, 0644);
 MODULE_PARM_DESC(default_ps_max_latency_us,
                 "max power saving latency for new devices; use PM QOS to change per device");
@@ -1342,7 +1342,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
         * transitioning between power states.  Therefore, when running
         * in any given state, we will enter the next lower-power
         * non-operational state after waiting 50 * (enlat + exlat)
-        * microseconds, as long as that state's total latency is under
+        * microseconds, as long as that state's exit latency is under
         * the requested maximum latency.
         *
         * We will not autonomously enter any non-operational state for
@@ -1387,7 +1387,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                 * lowest-power state, not the number of states.
                 */
                for (state = (int)ctrl->npss; state >= 0; state--) {
-                       u64 total_latency_us, transition_ms;
+                       u64 total_latency_us, exit_latency_us, transition_ms;
 
                        if (target)
                                table->entries[state] = target;
@@ -1408,12 +1408,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                              NVME_PS_FLAGS_NON_OP_STATE))
                                continue;
 
-                       total_latency_us =
-                               (u64)le32_to_cpu(ctrl->psd[state].entry_lat) +
-                               + le32_to_cpu(ctrl->psd[state].exit_lat);
-                       if (total_latency_us > ctrl->ps_max_latency_us)
+                       exit_latency_us =
+                               (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+                       if (exit_latency_us > ctrl->ps_max_latency_us)
                                continue;
 
+                       total_latency_us =
+                               exit_latency_us +
+                               le32_to_cpu(ctrl->psd[state].entry_lat);
+
                        /*
                         * This state is good.  Use it as the APST idle
                         * target for higher power states.
@@ -2438,6 +2441,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
        struct nvme_ns *ns;
 
        mutex_lock(&ctrl->namespaces_mutex);
+
+       /* Forcibly start all queues to avoid having stuck requests */
+       blk_mq_start_hw_queues(ctrl->admin_q);
+
        list_for_each_entry(ns, &ctrl->namespaces, list) {
                /*
                 * Revalidating a dead namespace sets capacity to 0. This will
index 5b14cbefb7240d5e7d50bb1ade8fd958417282e8..92964cef0f4be5795bed3e874407c74a3e3cc725 100644 (file)
@@ -1139,6 +1139,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
 /* *********************** NVME Ctrl Routines **************************** */
 
 static void __nvme_fc_final_op_cleanup(struct request *rq);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
 
 static int
 nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1265,7 +1266,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
        struct nvme_command *sqe = &op->cmd_iu.sqe;
        __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
        union nvme_result result;
-       bool complete_rq;
+       bool complete_rq, terminate_assoc = true;
 
        /*
         * WARNING:
@@ -1294,6 +1295,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
         * fabricate a CQE, the following fields will not be set as they
         * are not referenced:
         *      cqe.sqid,  cqe.sqhd,  cqe.command_id
+        *
+        * Failure or error of an individual i/o, in a transport
+        * detected fashion unrelated to the nvme completion status,
+        * potentially cause the initiator and target sides to get out
+        * of sync on SQ head/tail (aka outstanding io count allowed).
+        * Per FC-NVME spec, failure of an individual command requires
+        * the connection to be terminated, which in turn requires the
+        * association to be terminated.
         */
 
        fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1359,6 +1368,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
                goto done;
        }
 
+       terminate_assoc = false;
+
 done:
        if (op->flags & FCOP_FLAGS_AEN) {
                nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1366,7 +1377,7 @@ done:
                atomic_set(&op->state, FCPOP_STATE_IDLE);
                op->flags = FCOP_FLAGS_AEN;     /* clear other flags */
                nvme_fc_ctrl_put(ctrl);
-               return;
+               goto check_error;
        }
 
        complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
@@ -1379,6 +1390,10 @@ done:
                nvme_end_request(rq, status, result);
        } else
                __nvme_fc_final_op_cleanup(rq);
+
+check_error:
+       if (terminate_assoc)
+               nvme_fc_error_recovery(ctrl, "transport detected io error");
 }
 
 static int
@@ -2791,6 +2806,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                ctrl->ctrl.opts = NULL;
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
+               nvme_put_ctrl(&ctrl->ctrl);
 
                /* as we're past the point where we transition to the ref
                 * counting teardown path, if we return a bad pointer here,
index d52701df72457d0fa2b85a168c500fd022b8b717..951042a375d6b22dbd34988e38fef7114593c366 100644 (file)
@@ -1367,7 +1367,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
        bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
 
        /* If there is a reset ongoing, we shouldn't reset again. */
-       if (work_busy(&dev->reset_work))
+       if (dev->ctrl.state == NVME_CTRL_RESETTING)
                return false;
 
        /* We shouldn't reset unless the controller is on fatal error state
@@ -1903,7 +1903,7 @@ static void nvme_reset_work(struct work_struct *work)
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result = -ENODEV;
 
-       if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
+       if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
                goto out;
 
        /*
@@ -1913,9 +1913,6 @@ static void nvme_reset_work(struct work_struct *work)
        if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
 
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
-               goto out;
-
        result = nvme_pci_enable(dev);
        if (result)
                goto out;
@@ -2009,8 +2006,8 @@ static int nvme_reset(struct nvme_dev *dev)
 {
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
                return -ENODEV;
-       if (work_busy(&dev->reset_work))
-               return -ENODEV;
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+               return -EBUSY;
        if (!queue_work(nvme_workq, &dev->reset_work))
                return -EBUSY;
        return 0;
@@ -2136,6 +2133,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release_pools;
 
+       nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
        queue_work(nvme_workq, &dev->reset_work);
@@ -2179,6 +2177,7 @@ static void nvme_remove(struct pci_dev *pdev)
 
        nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
+       cancel_work_sync(&dev->reset_work);
        pci_set_drvdata(pdev, NULL);
 
        if (!pci_device_is_present(pdev)) {
index 28bd255c144dcca10aa60cede2c9a51cd101426a..24397d306d532213cf66e1ca0de9aa43bf12d3d5 100644 (file)
@@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        if (ret)
                goto requeue;
 
-       blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
-
        ret = nvmf_connect_admin_queue(&ctrl->ctrl);
        if (ret)
-               goto stop_admin_q;
+               goto requeue;
 
        set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
 
        ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (ret)
-               goto stop_admin_q;
+               goto requeue;
 
        nvme_start_keep_alive(&ctrl->ctrl);
 
        if (ctrl->queue_count > 1) {
                ret = nvme_rdma_init_io_queues(ctrl);
                if (ret)
-                       goto stop_admin_q;
+                       goto requeue;
 
                ret = nvme_rdma_connect_io_queues(ctrl);
                if (ret)
-                       goto stop_admin_q;
+                       goto requeue;
        }
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        ctrl->ctrl.opts->nr_reconnects = 0;
 
        if (ctrl->queue_count > 1) {
-               nvme_start_queues(&ctrl->ctrl);
                nvme_queue_scan(&ctrl->ctrl);
                nvme_queue_async_events(&ctrl->ctrl);
        }
@@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
        return;
 
-stop_admin_q:
-       blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
                        ctrl->ctrl.opts->nr_reconnects);
@@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
 
+       /*
+        * queues are not a live anymore, so restart the queues to fail fast
+        * new IO
+        */
+       blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+       nvme_start_queues(&ctrl->ctrl);
+
        nvme_rdma_reconnect_or_remove(ctrl);
 }
 
@@ -1433,7 +1435,7 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
 /*
  * We cannot accept any other command until the Connect command has completed.
  */
-static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
+static inline int nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
@@ -1441,11 +1443,22 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
 
                if (!blk_rq_is_passthrough(rq) ||
                    cmd->common.opcode != nvme_fabrics_command ||
-                   cmd->fabrics.fctype != nvme_fabrics_type_connect)
-                       return false;
+                   cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+                       /*
+                        * reconnecting state means transport disruption, which
+                        * can take a long time and even might fail permanently,
+                        * so we can't let incoming I/O be requeued forever.
+                        * fail it fast to allow upper layers a chance to
+                        * failover.
+                        */
+                       if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
+                               return -EIO;
+                       else
+                               return -EAGAIN;
+               }
        }
 
-       return true;
+       return 0;
 }
 
 static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
@@ -1463,8 +1476,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        WARN_ON_ONCE(rq->tag < 0);
 
-       if (!nvme_rdma_queue_is_ready(queue, rq))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+       ret = nvme_rdma_queue_is_ready(queue, rq);
+       if (unlikely(ret))
+               goto err;
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
index 9416d052cb89474e811d8c4e8ca5ca3a81732fbb..28c38c756f92858906ca2aee54a9c922522f9253 100644 (file)
@@ -144,8 +144,8 @@ int of_dma_configure(struct device *dev, struct device_node *np)
                coherent ? " " : " not ");
 
        iommu = of_iommu_configure(dev, np);
-       if (IS_ERR(iommu))
-               return PTR_ERR(iommu);
+       if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
 
        dev_dbg(dev, "device is%sbehind an iommu\n",
                iommu ? " " : " not ");
index 35ce53edabf90009efcd228e71a109e410337708..d5e5229308f2291136ebe7c6061346507a733907 100644 (file)
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
 }
 
 postcore_initcall(hi6220_reset_init);
+
+MODULE_LICENSE("GPL v2");
index 7a92a5e1d40c6f17227936ee2b6925286deab511..feca75b07fddce01e6e121542d3e0b74d321f85f 100644 (file)
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
                                st->global_error = 1;
                }
        }
-       st->va += PAGE_SIZE * nr;
-       st->index += nr;
+       st->va += XEN_PAGE_SIZE * nr;
+       st->index += nr / XEN_PFN_PER_PAGE;
 
        return 0;
 }
index 4eac2670bfa1a016679971f36ad698fe1d9f6a4e..92f20832fd28770c16ab9d34c5560efce2a888c6 100644 (file)
@@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
 struct iommu_domain;
 struct msi_msg;
+struct device;
 
 static inline int iommu_dma_init(void)
 {
index 9ec5e22846e0f302e7e1b1d174d9ead17e619ee3..0e306c5a86d6ee90debc824aa5d18e8f6d078f4d 100644 (file)
@@ -153,7 +153,7 @@ struct elevator_type
 #endif
 
        /* managed by elevator core */
-       char icq_cache_name[ELV_NAME_MAX + 5];  /* elvname + "_io_cq" */
+       char icq_cache_name[ELV_NAME_MAX + 6];  /* elvname + "_io_cq" */
        struct list_head list;
 };
 
index 0b1cf32edfd7ba1c456252124e23c68450d5bcc3..d9718378a8bee0b327d08c2e80a6fd3b5490b967 100644 (file)
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
 struct platform_freeze_ops {
        int (*begin)(void);
        int (*prepare)(void);
-       void (*wake)(void);
-       void (*sync)(void);
        void (*restore)(void);
        void (*end)(void);
 };
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
 
 extern bool pm_wakeup_pending(void);
 extern void pm_system_wakeup(void);
-extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(void);
 extern void pm_system_irq_wakeup(unsigned int irq_number);
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
 
 static inline bool pm_wakeup_pending(void) { return false; }
 static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(bool reset) {}
+static inline void pm_wakeup_clear(void) {}
 static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
 
 static inline void lock_system_sleep(void) {}
index 6e75a5c9412dee17daabc1cb131bb517ad431207..6c4e523dc1e2e6b53d44bf6a540bc168bcde1eca 100644 (file)
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
        return __perf_event_account_interrupt(event, 1);
 }
 
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+       /*
+        * Due to interrupt latency (AKA "skid"), we may enter the
+        * kernel before taking an overflow, even if the PMU is only
+        * counting user events.
+        * To avoid leaking information to userspace, we must always
+        * reject kernel samples when exclude_kernel is set.
+        */
+       if (event->attr.exclude_kernel && !user_mode(regs))
+               return false;
+
+       return true;
+}
+
 /*
  * Generic event overflow handling, sampling.
  */
@@ -7336,6 +7351,12 @@ static int __perf_event_overflow(struct perf_event *event,
 
        ret = __perf_event_account_interrupt(event, throttle);
 
+       /*
+        * For security, drop the skid kernel samples if necessary.
+        */
+       if (!sample_is_allowed(event, regs))
+               return ret;
+
        /*
         * XXX event_limit might not quite work as expected on inherited
         * events
index 78672d324a6ef95394ad72a0b0ba29c7d1155d5d..c7209f060eeb7c8672cf8f07ba9c83ac6c9460ac 100644 (file)
@@ -132,7 +132,7 @@ int freeze_processes(void)
        if (!pm_freezing)
                atomic_inc(&system_freezing_cnt);
 
-       pm_wakeup_clear(true);
+       pm_wakeup_clear();
        pr_info("Freezing user space processes ... ");
        pm_freezing = true;
        error = try_to_freeze_tasks(true);
index c0248c74d6d4cef6dbf09f485f36686862c29094..15e6baef5c73f90b6817c0b1c4e871ea40e30318 100644 (file)
@@ -72,8 +72,6 @@ static void freeze_begin(void)
 
 static void freeze_enter(void)
 {
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
-
        spin_lock_irq(&suspend_freeze_lock);
        if (pm_wakeup_pending())
                goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
  out:
        suspend_freeze_state = FREEZE_STATE_NONE;
        spin_unlock_irq(&suspend_freeze_lock);
-
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
-}
-
-static void s2idle_loop(void)
-{
-       do {
-               freeze_enter();
-
-               if (freeze_ops && freeze_ops->wake)
-                       freeze_ops->wake();
-
-               dpm_resume_noirq(PMSG_RESUME);
-               if (freeze_ops && freeze_ops->sync)
-                       freeze_ops->sync();
-
-               if (pm_wakeup_pending())
-                       break;
-
-               pm_wakeup_clear(false);
-       } while (!dpm_suspend_noirq(PMSG_SUSPEND));
 }
 
 void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
         * all the devices are suspended.
         */
        if (state == PM_SUSPEND_FREEZE) {
-               s2idle_loop();
-               goto Platform_early_resume;
+               trace_suspend_resume(TPS("machine_suspend"), state, true);
+               freeze_enter();
+               trace_suspend_resume(TPS("machine_suspend"), state, false);
+               goto Platform_wake;
        }
 
        error = disable_nonboot_cpus();
index a1aecf44ab07c70ab9f33d455646559313926344..a1db38abac5b750e8ce228b441b27900360665ec 100644 (file)
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
 #define MAX_CMDLINECONSOLES 8
 
 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
-static int console_cmdline_cnt;
 
 static int preferred_console = -1;
 int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
         *      See if this tty is not yet registered, and
         *      if we have a slot free.
         */
-       for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (strcmp(c->name, name) == 0 && c->index == idx) {
-                       if (brl_options)
-                               return 0;
-
-                       /*
-                        * Maintain an invariant that will help to find if
-                        * the matching console is preferred, see
-                        * register_console():
-                        *
-                        * The last non-braille console is always
-                        * the preferred one.
-                        */
-                       if (i != console_cmdline_cnt - 1)
-                               swap(console_cmdline[i],
-                                    console_cmdline[console_cmdline_cnt - 1]);
-
-                       preferred_console = console_cmdline_cnt - 1;
-
+                       if (!brl_options)
+                               preferred_console = i;
                        return 0;
                }
        }
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
        braille_set_options(c, brl_options);
 
        c->index = idx;
-       console_cmdline_cnt++;
        return 0;
 }
 /*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
        }
 
        /*
-        * See if this console matches one we selected on the command line.
-        *
-        * There may be several entries in the console_cmdline array matching
-        * with the same console, one with newcon->match(), another by
-        * name/index:
-        *
-        *      pl011,mmio,0x87e024000000,115200 -- added from SPCR
-        *      ttyAMA0 -- added from command line
-        *
-        * Traverse the console_cmdline array in reverse order to be
-        * sure that if this console is preferred then it will be the first
-        * matching entry.  We use the invariant that is maintained in
-        * __add_preferred_console().
+        *      See if this console matches one we selected on
+        *      the command line.
         */
-       for (i = console_cmdline_cnt - 1; i >= 0; i--) {
-               c = console_cmdline + i;
-
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (!newcon->match ||
                    newcon->match(newcon, c->name, c->index, c->options) != 0) {
                        /* default matching */
index 2f836ca09860e83995630de7be6546731523489a..cd67d1c12cf1ca9a32daa4de797dc0a5ec7bbb86 100644 (file)
@@ -1618,6 +1618,7 @@ static int snd_timer_user_tselect(struct file *file,
        if (err < 0)
                goto __err;
 
+       tu->qhead = tu->qtail = tu->qused = 0;
        kfree(tu->queue);
        tu->queue = NULL;
        kfree(tu->tqueue);
@@ -1959,6 +1960,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
        tu = file->private_data;
        unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+       mutex_lock(&tu->ioctl_lock);
        spin_lock_irq(&tu->qlock);
        while ((long)count - result >= unit) {
                while (!tu->qused) {
@@ -1974,7 +1976,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                        add_wait_queue(&tu->qchange_sleep, &wait);
 
                        spin_unlock_irq(&tu->qlock);
+                       mutex_unlock(&tu->ioctl_lock);
                        schedule();
+                       mutex_lock(&tu->ioctl_lock);
                        spin_lock_irq(&tu->qlock);
 
                        remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
-               mutex_lock(&tu->ioctl_lock);
                if (tu->tread) {
                        if (copy_to_user(buffer, &tu->tqueue[qhead],
                                         sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                                         sizeof(struct snd_timer_read)))
                                err = -EFAULT;
                }
-               mutex_unlock(&tu->ioctl_lock);
 
                spin_lock_irq(&tu->qlock);
                if (err < 0)
@@ -2014,6 +2016,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
        }
  _error:
        spin_unlock_irq(&tu->qlock);
+       mutex_unlock(&tu->ioctl_lock);
        return result > 0 ? result : err;
 }
 
index a57988d617e934847bff6b56f08b64b813071875..cbeebc0a9711e8283090762450525a84022db600 100644 (file)
@@ -5854,7 +5854,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5862,13 +5866,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+       SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
-       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
index 7ae46c2647d453bcad1176b6877fcbbae110416b..b7ef8c59b49a2bdb2895f2203e0c207170227c6f 100644 (file)
@@ -301,6 +301,14 @@ static int atmel_classd_codec_probe(struct snd_soc_codec *codec)
        return 0;
 }
 
+static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
+{
+       struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
+       struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
+
+       return regcache_sync(dd->regmap);
+}
+
 static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 {
        return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 
 static struct snd_soc_codec_driver soc_codec_dev_classd = {
        .probe          = atmel_classd_codec_probe,
+       .resume         = atmel_classd_codec_resume,
        .get_regmap     = atmel_classd_codec_get_remap,
        .component_driver = {
                .controls               = atmel_classd_snd_controls,
index 6dd7578f0bb8da118adfdb6da76e3247289bfc32..024d83fa6a7f78b81da8b55c8dbdec977871ef46 100644 (file)
@@ -772,7 +772,7 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
                                ++i;
                                msleep(50);
                        }
-               } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock));
+               } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
 
                if (!srm_lock)
                        dev_warn(codec->dev, "SRM failed to lock\n");
index 9c365a7f758dbb9f1227c726f148b19672402f7c..7899a2cdeb42f46c5d76cd051319a4b547b1ed04 100644 (file)
@@ -1108,6 +1108,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
                }
        },
+       {
+               .ident = "Thinkpad Helix 2nd",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
+               }
+       },
 
        { }
 };
index 2c9dedab5184ff74909caf163f7d0697d8b67949..bc136d2bd7cdeb68b5ca7a24db94a193ede323b3 100644 (file)
@@ -202,7 +202,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
        if (ret < 0)
                return ret;
 
-       ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+       ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
        if (ret < 0)
                return ret;
 
index 58c525096a7cbcd6ea4fd833d06e03a0127201fe..498b15345b1a657d608a3fcff773dae206e819b0 100644 (file)
@@ -413,8 +413,11 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
        u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
        u64 *ipc_header = (u64 *)(&header);
        struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&ipc->dsp->spinlock, flags);
        msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+       spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
        if (msg == NULL) {
                dev_dbg(ipc->dev, "ipc: rx list is empty\n");
                return;
@@ -456,8 +459,10 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
                }
        }
 
+       spin_lock_irqsave(&ipc->dsp->spinlock, flags);
        list_del(&msg->list);
        sst_ipc_tx_msg_reply_complete(ipc, msg);
+       spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 }
 
 irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
index 3a99712e44a80df81f7ad27d52e69501f617e948..64a0f8ed33e135eb5c0af683624afa3ebdb616b9 100644 (file)
@@ -2502,7 +2502,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
 
                        if (ret < 0)
                                return ret;
-                       tkn_count += ret;
+                       tkn_count = ret;
 
                        tuple_size += tkn_count *
                                sizeof(struct snd_soc_tplg_vendor_string_elem);
index 6df3b317a4768e008b539f0a619308e9545a3992..4c9b5781282bb149e8d749b32f2a1bd5c2987338 100644 (file)
@@ -410,7 +410,7 @@ static int skl_free(struct hdac_ext_bus *ebus)
        struct skl *skl  = ebus_to_skl(ebus);
        struct hdac_bus *bus = ebus_to_hbus(ebus);
 
-       skl->init_failed = 1; /* to be sure */
+       skl->init_done = 0; /* to be sure */
 
        snd_hdac_ext_stop_streams(ebus);
 
@@ -428,8 +428,10 @@ static int skl_free(struct hdac_ext_bus *ebus)
 
        snd_hdac_ext_bus_exit(ebus);
 
+       cancel_work_sync(&skl->probe_work);
        if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
                snd_hdac_i915_exit(&ebus->bus);
+
        return 0;
 }
 
@@ -566,6 +568,84 @@ static const struct hdac_bus_ops bus_core_ops = {
        .get_response = snd_hdac_bus_get_response,
 };
 
+static int skl_i915_init(struct hdac_bus *bus)
+{
+       int err;
+
+       /*
+        * The HDMI codec is in GPU so we need to ensure that it is powered
+        * up and ready for probe
+        */
+       err = snd_hdac_i915_init(bus);
+       if (err < 0)
+               return err;
+
+       err = snd_hdac_display_power(bus, true);
+       if (err < 0)
+               dev_err(bus->dev, "Cannot turn on display power on i915\n");
+
+       return err;
+}
+
+static void skl_probe_work(struct work_struct *work)
+{
+       struct skl *skl = container_of(work, struct skl, probe_work);
+       struct hdac_ext_bus *ebus = &skl->ebus;
+       struct hdac_bus *bus = ebus_to_hbus(ebus);
+       struct hdac_ext_link *hlink = NULL;
+       int err;
+
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               err = skl_i915_init(bus);
+               if (err < 0)
+                       return;
+       }
+
+       err = skl_init_chip(bus, true);
+       if (err < 0) {
+               dev_err(bus->dev, "Init chip failed with err: %d\n", err);
+               goto out_err;
+       }
+
+       /* codec detection */
+       if (!bus->codec_mask)
+               dev_info(bus->dev, "no hda codecs found!\n");
+
+       /* create codec instances */
+       err = skl_codec_create(ebus);
+       if (err < 0)
+               goto out_err;
+
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               err = snd_hdac_display_power(bus, false);
+               if (err < 0) {
+                       dev_err(bus->dev, "Cannot turn off display power on i915\n");
+                       return;
+               }
+       }
+
+       /* register platform dai and controls */
+       err = skl_platform_register(bus->dev);
+       if (err < 0)
+               return;
+       /*
+        * we are done probing so decrement link counts
+        */
+       list_for_each_entry(hlink, &ebus->hlink_list, list)
+               snd_hdac_ext_bus_link_put(ebus, hlink);
+
+       /* configure PM */
+       pm_runtime_put_noidle(bus->dev);
+       pm_runtime_allow(bus->dev);
+       skl->init_done = 1;
+
+       return;
+
+out_err:
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+               err = snd_hdac_display_power(bus, false);
+}
+
 /*
  * constructor
  */
@@ -593,6 +673,7 @@ static int skl_create(struct pci_dev *pci,
        snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
        ebus->bus.use_posbuf = 1;
        skl->pci = pci;
+       INIT_WORK(&skl->probe_work, skl_probe_work);
 
        ebus->bus.bdl_pos_adj = 0;
 
@@ -601,27 +682,6 @@ static int skl_create(struct pci_dev *pci,
        return 0;
 }
 
-static int skl_i915_init(struct hdac_bus *bus)
-{
-       int err;
-
-       /*
-        * The HDMI codec is in GPU so we need to ensure that it is powered
-        * up and ready for probe
-        */
-       err = snd_hdac_i915_init(bus);
-       if (err < 0)
-               return err;
-
-       err = snd_hdac_display_power(bus, true);
-       if (err < 0) {
-               dev_err(bus->dev, "Cannot turn on display power on i915\n");
-               return err;
-       }
-
-       return err;
-}
-
 static int skl_first_init(struct hdac_ext_bus *ebus)
 {
        struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
        /* initialize chip */
        skl_init_pci(skl);
 
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-               err = skl_i915_init(bus);
-               if (err < 0)
-                       return err;
-       }
-
-       skl_init_chip(bus, true);
-
-       /* codec detection */
-       if (!bus->codec_mask) {
-               dev_info(bus->dev, "no hda codecs found!\n");
-       }
-
-       return 0;
+       return skl_init_chip(bus, true);
 }
 
 static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@ static int skl_probe(struct pci_dev *pci,
        struct skl *skl;
        struct hdac_ext_bus *ebus = NULL;
        struct hdac_bus *bus = NULL;
-       struct hdac_ext_link *hlink = NULL;
        int err;
 
        /* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@ static int skl_probe(struct pci_dev *pci,
 
        if (skl->nhlt == NULL) {
                err = -ENODEV;
-               goto out_display_power_off;
+               goto out_free;
        }
 
        err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@ static int skl_probe(struct pci_dev *pci,
        if (bus->mlcap)
                snd_hdac_ext_bus_get_ml_capabilities(ebus);
 
+       snd_hdac_bus_stop_chip(bus);
+
        /* create device for soc dmic */
        err = skl_dmic_device_register(skl);
        if (err < 0)
                goto out_dsp_free;
 
-       /* register platform dai and controls */
-       err = skl_platform_register(bus->dev);
-       if (err < 0)
-               goto out_dmic_free;
-
-       /* create codec instances */
-       err = skl_codec_create(ebus);
-       if (err < 0)
-               goto out_unregister;
-
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-               err = snd_hdac_display_power(bus, false);
-               if (err < 0) {
-                       dev_err(bus->dev, "Cannot turn off display power on i915\n");
-                       return err;
-               }
-       }
-
-       /*
-        * we are done probling so decrement link counts
-        */
-       list_for_each_entry(hlink, &ebus->hlink_list, list)
-               snd_hdac_ext_bus_link_put(ebus, hlink);
-
-       /* configure PM */
-       pm_runtime_put_noidle(bus->dev);
-       pm_runtime_allow(bus->dev);
+       schedule_work(&skl->probe_work);
 
        return 0;
 
-out_unregister:
-       skl_platform_unregister(bus->dev);
-out_dmic_free:
-       skl_dmic_device_unregister(skl);
 out_dsp_free:
        skl_free_dsp(skl);
 out_mach_free:
        skl_machine_device_unregister(skl);
 out_nhlt_free:
        skl_nhlt_free(skl->nhlt);
-out_display_power_off:
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-               snd_hdac_display_power(bus, false);
 out_free:
-       skl->init_failed = 1;
        skl_free(ebus);
 
        return err;
@@ -828,7 +842,7 @@ static void skl_shutdown(struct pci_dev *pci)
 
        skl = ebus_to_skl(ebus);
 
-       if (skl->init_failed)
+       if (!skl->init_done)
                return;
 
        snd_hdac_ext_stop_streams(ebus);
index a454f6035f3e64b3be01ea4c53153c141e3a9561..2a630fcb7f088c1d548f06de31933ca662eafca2 100644 (file)
@@ -46,7 +46,7 @@ struct skl {
        struct hdac_ext_bus ebus;
        struct pci_dev *pci;
 
-       unsigned int init_failed:1; /* delayed init failed */
+       unsigned int init_done:1; /* delayed init status */
        struct platform_device *dmic_dev;
        struct platform_device *i2s_dev;
        struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@ struct skl {
        const struct firmware *tplg;
 
        int supend_active;
+
+       struct work_struct probe_work;
 };
 
 #define skl_to_ebus(s) (&(s)->ebus)
index 66203d107a11e5ff17e150adb145b15a0d27934e..d3b0dc145a560c8a35ddc4320810039f481db5aa 100644 (file)
@@ -507,7 +507,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                                rbga = rbgx;
                                adg->rbga_rate_for_441khz = rate / div;
                                ckr |= brg_table[i] << 20;
-                               if (req_441kHz_rate)
+                               if (req_441kHz_rate &&
+                                   !(adg_mode_flags(adg) & AUDIO_OUT_48))
                                        parent_clk_name = __clk_get_name(clk);
                        }
                }
@@ -522,7 +523,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                                rbgb = rbgx;
                                adg->rbgb_rate_for_48khz = rate / div;
                                ckr |= brg_table[i] << 16;
-                               if (req_48kHz_rate)
+                               if (req_48kHz_rate &&
+                                   (adg_mode_flags(adg) & AUDIO_OUT_48))
                                        parent_clk_name = __clk_get_name(clk);
                        }
                }
index 7d92a24b7cfa558afbb8331401c974c59d5f1ae5..d879c010cf03c4607ebdab3c854a582d816ddf62 100644 (file)
@@ -89,6 +89,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
        dev_dbg(dev, "ctu/mix path = 0x%08x", data);
 
        rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
+       rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
        rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
        rsnd_adg_set_cmd_timsel_gen2(mod, io);
index 1744015408c38f2ad530fbcbae9a22027f5f0828..8c1f4e2e0c4fb8c3ac09a641b4b8a928defb871c 100644 (file)
@@ -343,6 +343,57 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
                return 0x76543210;
 }
 
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
+{
+       enum rsnd_mod_type playback_mods[] = {
+               RSND_MOD_SRC,
+               RSND_MOD_CMD,
+               RSND_MOD_SSIU,
+       };
+       enum rsnd_mod_type capture_mods[] = {
+               RSND_MOD_CMD,
+               RSND_MOD_SRC,
+               RSND_MOD_SSIU,
+       };
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       struct rsnd_mod *tmod = NULL;
+       enum rsnd_mod_type *mods =
+               rsnd_io_is_play(io) ?
+               playback_mods : capture_mods;
+       int i;
+
+       /*
+        * This is needed for 24bit data
+        * We need to shift 8bit
+        *
+        * Linux 24bit data is located as 0x00******
+        * HW    24bit data is located as 0x******00
+        *
+        */
+       switch (runtime->sample_bits) {
+       case 16:
+               return 0;
+       case 32:
+               break;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
+               tmod = rsnd_io_to_mod(io, mods[i]);
+               if (tmod)
+                       break;
+       }
+
+       if (tmod != mod)
+               return 0;
+
+       if (rsnd_io_is_play(io))
+               return  (0 << 20) | /* shift to Left */
+                       (8 << 16);  /* 8bit */
+       else
+               return  (1 << 20) | /* shift to Right */
+                       (8 << 16);  /* 8bit */
+}
+
 /*
  *     rsnd_dai functions
  */
index 63b6d3c28021024b1f06278c5c4f217a394faf8c..4b0980728e13ec75f18ac07135ab4971290b5310 100644 (file)
@@ -236,6 +236,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
                RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc,    0x20),
                RSND_GEN_M_REG(SRC_CTRL,        0x10,   0x20),
                RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18,   0x20),
+               RSND_GEN_M_REG(CMD_BUSIF_MODE,  0x184,  0x20),
                RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188,  0x20),
                RSND_GEN_M_REG(CMD_ROUTE_SLCT,  0x18c,  0x20),
                RSND_GEN_M_REG(CMD_CTRL,        0x190,  0x20),
index dbf4163427e808d62dbc37aa62f64b482e65e7c4..323af41ecfcb8ffea222f25fbe6c524968f29fd5 100644 (file)
@@ -73,6 +73,7 @@ enum rsnd_reg {
        RSND_REG_SCU_SYS_INT_EN0,
        RSND_REG_SCU_SYS_INT_EN1,
        RSND_REG_CMD_CTRL,
+       RSND_REG_CMD_BUSIF_MODE,
        RSND_REG_CMD_BUSIF_DALIGN,
        RSND_REG_CMD_ROUTE_SLCT,
        RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
                    u32 mask, u32 data);
 u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
 u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
 
 /*
  *     R-Car DMA
index 20b5b2ec625ea7b1e1812ea83d07d35b48b948ea..76a477a3ccb5d88e18fd8398d9ad2b2616a99f48 100644 (file)
@@ -190,11 +190,13 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       int is_play = rsnd_io_is_play(io);
        int use_src = 0;
        u32 fin, fout;
        u32 ifscr, fsrate, adinr;
        u32 cr, route;
        u32 bsdsr, bsisr;
+       u32 i_busif, o_busif, tmp;
        uint ratio;
 
        if (!runtime)
@@ -270,6 +272,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
                break;
        }
 
+       /* BUSIF_MODE */
+       tmp = rsnd_get_busif_shift(io, mod);
+       i_busif = ( is_play ? tmp : 0) | 1;
+       o_busif = (!is_play ? tmp : 0) | 1;
+
        rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
 
        rsnd_mod_write(mod, SRC_SRCIR, 1);      /* initialize */
@@ -281,8 +288,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        rsnd_mod_write(mod, SRC_BSISR, bsisr);
        rsnd_mod_write(mod, SRC_SRCIR, 0);      /* cancel initialize */
 
-       rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1);
-       rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1);
+       rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
+       rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
+
        rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
        rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
index 135c5669f7963bd228c9a1187bc6f5dfd13bc04e..91e5c07911b4a5b14364becf64c568d7a61cc1c4 100644 (file)
@@ -302,7 +302,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
         * always use 32bit system word.
         * see also rsnd_ssi_master_clk_enable()
         */
-       cr_own = FORCE | SWL_32 | PDTA;
+       cr_own = FORCE | SWL_32;
 
        if (rdai->bit_clk_inv)
                cr_own |= SCKP;
@@ -550,6 +550,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
                u32 *buf = (u32 *)(runtime->dma_area +
                                   rsnd_dai_pointer_offset(io, 0));
+               int shift = 0;
+
+               switch (runtime->sample_bits) {
+               case 32:
+                       shift = 8;
+                       break;
+               }
 
                /*
                 * 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                 * see rsnd_ssi_init()
                 */
                if (rsnd_io_is_play(io))
-                       rsnd_mod_write(mod, SSITDR, *buf);
+                       rsnd_mod_write(mod, SSITDR, (*buf) << shift);
                else
-                       *buf = rsnd_mod_read(mod, SSIRDR);
+                       *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
 
                elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
        }
@@ -709,6 +716,11 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
                               struct rsnd_priv *priv)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+
+       /* Do nothing for SSI parent mod */
+       if (ssi_parent_mod == mod)
+               return 0;
 
        /* PIO will request IRQ again */
        free_irq(ssi->irq, mod);
index 14fafdaf1395f9737191df18599ee58fc4f858fd..512d238b79e2895f13a4b1b7be3e145859a65280 100644 (file)
@@ -144,7 +144,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
                               (rsnd_io_is_play(io) ?
                                rsnd_runtime_channel_after_ctu(io) :
                                rsnd_runtime_channel_original(io)));
-               rsnd_mod_write(mod, SSI_BUSIF_MODE,  1);
+               rsnd_mod_write(mod, SSI_BUSIF_MODE,
+                              rsnd_get_busif_shift(io, mod) | 1);
                rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
                               rsnd_get_dalign(mod, io));
        }
index aae099c0e50280d67f153d6769ac4237d531e169..754e3ef8d7ae1b8b188c3e52986f2c306fb7b763 100644 (file)
@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        list_for_each_entry(rtd, &card->rtd_list, list)
                flush_delayed_work(&rtd->delayed_work);
 
+       /* free the ALSA card at first; this syncs with pending operations */
+       snd_card_free(card->snd_card);
+
        /* remove and free each DAI */
        soc_remove_dai_links(card);
        soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        if (card->remove)
                card->remove(card);
 
-       snd_card_free(card->snd_card);
        return 0;
-
 }
 
 /* removes a socdev */
index e6c9902c6d82b0e2535059c1a915356e84856589..165c2b1d43177b7df8870d85d7911a1a615ce7e7 100644 (file)
@@ -240,9 +240,13 @@ Add a probe on schedule() function 12th line with recording cpu local variable:
  or
  ./perf probe --add='schedule:12 cpu'
 
- this will add one or more probes which has the name start with "schedule".
+Add one or more probes which has the name start with "schedule".
 
- Add probes on lines in schedule() function which calls update_rq_clock().
+ ./perf probe schedule*
+ or
+ ./perf probe --add='schedule*'
+
+Add probes on lines in schedule() function which calls update_rq_clock().
 
  ./perf probe 'schedule;update_rq_clock*'
  or
index dfbb506d2c349744399637c78148fe6446b124bc..142606c0ec9c1dcbb9329f23cb9b9bc65085c419 100644 (file)
@@ -39,7 +39,7 @@ EVENT HANDLERS
 When perf script is invoked using a trace script, a user-defined
 'handler function' is called for each event in the trace.  If there's
 no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
 next event is processed.
 
 Most of the event's field values are passed as arguments to the
index 54acba22155865a8fafc7629b9e3987c061fed47..51ec2d20068ad5df7b0a00a0983b5ec08d49979a 100644 (file)
@@ -149,10 +149,8 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
                print "id=%d, args=%s\n" % \
                (id, args),
 
-def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
-               common_pid, common_comm):
-               print_header(event_name, common_cpu, common_secs, common_nsecs,
-               common_pid, common_comm)
+def trace_unhandled(event_name, context, event_fields_dict):
+               print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
 
 def print_header(event_name, cpu, secs, nsecs, pid, comm):
        print "%-20s %5u %05u.%09u %8u %-20s " % \
@@ -321,7 +319,7 @@ So those are the essential steps in writing and running a script.  The
 process can be generalized to any tracepoint or set of tracepoints
 you're interested in - basically find the tracepoint(s) you're
 interested in by looking at the list of available events shown by
-'perf list' and/or look in /sys/kernel/debug/tracing events for
+'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
 detailed event and field info, record the corresponding trace data
 using 'perf record', passing it the list of interesting events,
 generate a skeleton script using 'perf script -g python' and modify the
@@ -334,7 +332,7 @@ right place, you can have your script listed alongside the other
 scripts listed by the 'perf script -l' command e.g.:
 
 ----
-root@tropicana:~# perf script -l
+# perf script -l
 List of available trace scripts:
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
@@ -383,8 +381,6 @@ source tree:
 
 ----
 # ls -al kernel-source/tools/perf/scripts/python
-
-root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
 total 32
 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
@@ -399,7 +395,7 @@ otherwise your script won't show up at run-time), 'perf script -l'
 should show a new entry for your script:
 
 ----
-root@tropicana:~# perf script -l
+# perf script -l
 List of available trace scripts:
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
@@ -437,7 +433,7 @@ EVENT HANDLERS
 When perf script is invoked using a trace script, a user-defined
 'handler function' is called for each event in the trace.  If there's
 no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
 next event is processed.
 
 Most of the event's field values are passed as arguments to the
@@ -532,7 +528,7 @@ can implement a set of optional functions:
 gives scripts a chance to do setup tasks:
 
 ----
-def trace_begin:
+def trace_begin():
     pass
 ----
 
@@ -541,7 +537,7 @@ def trace_begin:
  as display results:
 
 ----
-def trace_end:
+def trace_end():
     pass
 ----
 
@@ -550,8 +546,7 @@ def trace_end:
  of common arguments are passed into it:
 
 ----
-def trace_unhandled(event_name, context, common_cpu, common_secs,
-        common_nsecs, common_pid, common_comm):
+def trace_unhandled(event_name, context, event_fields_dict):
     pass
 ----
 
index 837067f48a4c54a88b883e8de6dc95df0dd5b3d0..6b40e9f017404f87877668645f7afa8f28649788 100644 (file)
@@ -26,6 +26,7 @@ const char *const arm64_triplets[] = {
 
 const char *const powerpc_triplets[] = {
        "powerpc-unknown-linux-gnu-",
+       "powerpc-linux-gnu-",
        "powerpc64-unknown-linux-gnu-",
        "powerpc64-linux-gnu-",
        "powerpc64le-linux-gnu-",
index a935b502373253217d6f43680e6b88a6ab143bfe..ad9324d1daf9f29a990a0d8f903563873ac91ef9 100644 (file)
@@ -1578,6 +1578,7 @@ static void print_header(int argc, const char **argv)
 static void print_footer(void)
 {
        FILE *output = stat_config.output;
+       int n;
 
        if (!null_run)
                fprintf(output, "\n");
@@ -1590,7 +1591,9 @@ static void print_footer(void)
        }
        fprintf(output, "\n\n");
 
-       if (print_free_counters_hint)
+       if (print_free_counters_hint &&
+           sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
+           n > 0)
                fprintf(output,
 "Some events weren't counted. Try disabling the NMI watchdog:\n"
 "      echo 0 > /proc/sys/kernel/nmi_watchdog\n"
index d014350adc526722da3a12f29421cd6d3c00f4af..4b2a5d2981970baf86e4e458ef4ced256fc98c0e 100644 (file)
@@ -681,6 +681,10 @@ static struct syscall_fmt {
        { .name     = "mlockall",   .errmsg = true,
          .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
        { .name     = "mmap",       .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+       .alias = "old_mmap",
+#endif
          .arg_scnprintf = { [0] = SCA_HEX,       /* addr */
                             [2] = SCA_MMAP_PROT, /* prot */
                             [3] = SCA_MMAP_FLAGS, /* flags */ }, },
index e7664fe3bd33739fd92be2579c30102e481e8f03..8ba2c4618fe90231d1157e8218bf10a2cb82f6a0 100644 (file)
@@ -288,3 +288,17 @@ int test__bp_signal(int subtest __maybe_unused)
        return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
                TEST_OK : TEST_FAIL;
 }
+
+bool test__bp_signal_is_supported(void)
+{
+/*
+ * The powerpc so far does not have support to even create
+ * instruction breakpoint using the perf event interface.
+ * Once it's there we can release this.
+ */
+#ifdef __powerpc__
+       return false;
+#else
+       return true;
+#endif
+}
index 9e08d297f1a905f57554bf6fd7b9555e980ad10a..3ccfd58a8c3cf3e8b16cc513a67324b8f11eae7b 100644 (file)
@@ -97,10 +97,12 @@ static struct test generic_tests[] = {
        {
                .desc = "Breakpoint overflow signal handler",
                .func = test__bp_signal,
+               .is_supported = test__bp_signal_is_supported,
        },
        {
                .desc = "Breakpoint overflow sampling",
                .func = test__bp_signal_overflow,
+               .is_supported = test__bp_signal_is_supported,
        },
        {
                .desc = "Number of exit events of a simple workload",
@@ -401,6 +403,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
                if (!perf_test__matches(t, curr, argc, argv))
                        continue;
 
+               if (t->is_supported && !t->is_supported()) {
+                       pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
+                       continue;
+               }
+
                pr_info("%2d: %-*s:", i, width, t->desc);
 
                if (intlist__find(skiplist, i)) {
index 1f14e7612cbb1615e993c73758a3c9ac7024a0e1..94b7c7b02bdefbb33f2987677c11b3577ab062ec 100644 (file)
@@ -229,6 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
        unsigned char buf2[BUFSZ];
        size_t ret_len;
        u64 objdump_addr;
+       const char *objdump_name;
+       char decomp_name[KMOD_DECOMP_LEN];
        int ret;
 
        pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -289,9 +291,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
                state->done[state->done_cnt++] = al.map->start;
        }
 
+       objdump_name = al.map->dso->long_name;
+       if (dso__needs_decompress(al.map->dso)) {
+               if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
+                                                decomp_name,
+                                                sizeof(decomp_name)) < 0) {
+                       pr_debug("decompression failed\n");
+                       return -1;
+               }
+
+               objdump_name = decomp_name;
+       }
+
        /* Read the object code using objdump */
        objdump_addr = map__rip_2objdump(al.map, al.addr);
-       ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
+       ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
+
+       if (dso__needs_decompress(al.map->dso))
+               unlink(objdump_name);
+
        if (ret > 0) {
                /*
                 * The kernel maps are inaccurate - assume objdump is right in
index 6318596294032602b2858acf42c2aa80993fdac9..577363809c9b1b54731f7e80b291278bf2764e78 100644 (file)
@@ -34,6 +34,7 @@ struct test {
                int (*get_nr)(void);
                const char *(*get_desc)(int subtest);
        } subtest;
+       bool (*is_supported)(void);
 };
 
 /* Tests */
@@ -99,6 +100,8 @@ const char *test__clang_subtest_get_desc(int subtest);
 int test__clang_subtest_get_nr(void);
 int test__unit_number__scnprint(int subtest);
 
+bool test__bp_signal_is_supported(void);
+
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
index 683f8340460c1777f82f35860b3bb581a07f222a..ddbd56df91878884a4de5da2a29aae991e0689a0 100644 (file)
@@ -239,10 +239,20 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
        const char *s = strchr(ops->raw, '+');
        const char *c = strchr(ops->raw, ',');
 
-       if (c++ != NULL)
+       /*
+        * skip over possible up to 2 operands to get to address, e.g.:
+        * tbnz  w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
+        */
+       if (c++ != NULL) {
                ops->target.addr = strtoull(c, NULL, 16);
-       else
+               if (!ops->target.addr) {
+                       c = strchr(c, ',');
+                       if (c++ != NULL)
+                               ops->target.addr = strtoull(c, NULL, 16);
+               }
+       } else {
                ops->target.addr = strtoull(ops->raw, NULL, 16);
+       }
 
        if (s++ != NULL) {
                ops->target.offset = strtoull(s, NULL, 16);
@@ -257,10 +267,27 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
+       const char *c = strchr(ops->raw, ',');
+
        if (!ops->target.addr || ops->target.offset < 0)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
-       return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
+       if (c != NULL) {
+               const char *c2 = strchr(c + 1, ',');
+
+               /* check for 3-op insn */
+               if (c2 != NULL)
+                       c = c2;
+               c++;
+
+               /* mirror arch objdump's space-after-comma style */
+               if (*c == ' ')
+                       c++;
+       }
+
+       return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+                        ins->name, c ? c - ops->raw : 0, ops->raw,
+                        ops->target.offset);
 }
 
 static struct ins_ops jump_ops = {
@@ -1294,6 +1321,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
        char linkname[PATH_MAX];
        char *build_id_filename;
        char *build_id_path = NULL;
+       char *pos;
 
        if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
            !dso__is_kcore(dso))
@@ -1313,7 +1341,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
        if (!build_id_path)
                return -1;
 
-       dirname(build_id_path);
+       /*
+        * old style build-id cache has name of XX/XXXXXXX.. while
+        * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
+        * extract the build-id part of dirname in the new style only.
+        */
+       pos = strrchr(build_id_path, '/');
+       if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
+               dirname(build_id_path);
 
        if (dso__is_kcore(dso) ||
            readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
@@ -1396,31 +1431,10 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
                                sizeof(symfs_filename));
                }
        } else if (dso__needs_decompress(dso)) {
-               char tmp[PATH_MAX];
-               struct kmod_path m;
-               int fd;
-               bool ret;
-
-               if (kmod_path__parse_ext(&m, symfs_filename))
-                       goto out;
-
-               snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
-
-               fd = mkstemp(tmp);
-               if (fd < 0) {
-                       free(m.ext);
-                       goto out;
-               }
-
-               ret = decompress_to_file(m.ext, symfs_filename, fd);
-
-               if (ret)
-                       pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
-
-               free(m.ext);
-               close(fd);
+               char tmp[KMOD_DECOMP_LEN];
 
-               if (!ret)
+               if (dso__decompress_kmodule_path(dso, symfs_filename,
+                                                tmp, sizeof(tmp)) < 0)
                        goto out;
 
                strcpy(symfs_filename, tmp);
@@ -1429,7 +1443,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
        snprintf(command, sizeof(command),
                 "%s %s%s --start-address=0x%016" PRIx64
                 " --stop-address=0x%016" PRIx64
-                " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand",
+                " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
                 objdump_path ? objdump_path : "objdump",
                 disassembler_style ? "-M " : "",
                 disassembler_style ? disassembler_style : "",
index 168cc49654e7a4b18f69d177e29373f8a13e3471..e0148b081bdfbb7cead2c118a51ca81bd238e740 100644 (file)
@@ -278,51 +278,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
        return bf;
 }
 
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
-{
-       char *id_name = NULL, *ch;
-       struct stat sb;
-       char sbuild_id[SBUILD_ID_SIZE];
-
-       if (!dso->has_build_id)
-               goto err;
-
-       build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
-       id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
-       if (!id_name)
-               goto err;
-       if (access(id_name, F_OK))
-               goto err;
-       if (lstat(id_name, &sb) == -1)
-               goto err;
-       if ((size_t)sb.st_size > size - 1)
-               goto err;
-       if (readlink(id_name, bf, size - 1) < 0)
-               goto err;
-
-       bf[sb.st_size] = '\0';
-
-       /*
-        * link should be:
-        * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
-        */
-       ch = strrchr(bf, '/');
-       if (!ch)
-               goto err;
-       if (ch - 3 < bf)
-               goto err;
-
-       free(id_name);
-       return strncmp(".ko", ch - 3, 3) == 0;
-err:
-       pr_err("Invalid build id: %s\n", id_name ? :
-                                        dso->long_name ? :
-                                        dso->short_name ? :
-                                        "[unknown]");
-       free(id_name);
-       return false;
-}
-
 #define dsos__for_each_with_build_id(pos, head)        \
        list_for_each_entry(pos, head, node)    \
                if (!pos->has_build_id)         \
index 8a89b195c1fc3a5c36d7ca260dafe9c6b3fa8262..96690a55c62c40394444a8f23df1cd03b840acfe 100644 (file)
@@ -17,7 +17,6 @@ char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
                                    size_t size);
 
 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
 
 int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
                           struct perf_sample *sample, struct perf_evsel *evsel,
index a96a99d2369f800634025bcdfa9838d1d6bc9d97..4e7ab611377acd56c1be78cbd983058e5ce5142b 100644 (file)
@@ -248,6 +248,64 @@ bool dso__needs_decompress(struct dso *dso)
                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 }
 
+static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
+{
+       int fd = -1;
+       struct kmod_path m;
+
+       if (!dso__needs_decompress(dso))
+               return -1;
+
+       if (kmod_path__parse_ext(&m, dso->long_name))
+               return -1;
+
+       if (!m.comp)
+               goto out;
+
+       fd = mkstemp(tmpbuf);
+       if (fd < 0) {
+               dso->load_errno = errno;
+               goto out;
+       }
+
+       if (!decompress_to_file(m.ext, name, fd)) {
+               dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
+               close(fd);
+               fd = -1;
+       }
+
+out:
+       free(m.ext);
+       return fd;
+}
+
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
+{
+       char tmpbuf[] = KMOD_DECOMP_NAME;
+       int fd;
+
+       fd = decompress_kmodule(dso, name, tmpbuf);
+       unlink(tmpbuf);
+       return fd;
+}
+
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+                                char *pathname, size_t len)
+{
+       char tmpbuf[] = KMOD_DECOMP_NAME;
+       int fd;
+
+       fd = decompress_kmodule(dso, name, tmpbuf);
+       if (fd < 0) {
+               unlink(tmpbuf);
+               return -1;
+       }
+
+       strncpy(pathname, tmpbuf, len);
+       close(fd);
+       return 0;
+}
+
 /*
  * Parses kernel module specified in @path and updates
  * @m argument like:
@@ -335,6 +393,21 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
        return 0;
 }
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+                         struct machine *machine)
+{
+       if (machine__is_host(machine))
+               dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+       else
+               dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+
+       /* _KMODULE_COMP should be next to _KMODULE */
+       if (m->kmod && m->comp)
+               dso->symtab_type++;
+
+       dso__set_short_name(dso, strdup(m->name), true);
+}
+
 /*
  * Global list of open DSOs and the counter.
  */
@@ -381,7 +454,7 @@ static int do_open(char *name)
 
 static int __open_dso(struct dso *dso, struct machine *machine)
 {
-       int fd;
+       int fd = -EINVAL;
        char *root_dir = (char *)"";
        char *name = malloc(PATH_MAX);
 
@@ -392,15 +465,30 @@ static int __open_dso(struct dso *dso, struct machine *machine)
                root_dir = machine->root_dir;
 
        if (dso__read_binary_type_filename(dso, dso->binary_type,
-                                           root_dir, name, PATH_MAX)) {
-               free(name);
-               return -EINVAL;
-       }
+                                           root_dir, name, PATH_MAX))
+               goto out;
 
        if (!is_regular_file(name))
-               return -EINVAL;
+               goto out;
+
+       if (dso__needs_decompress(dso)) {
+               char newpath[KMOD_DECOMP_LEN];
+               size_t len = sizeof(newpath);
+
+               if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
+                       fd = -dso->load_errno;
+                       goto out;
+               }
+
+               strcpy(name, newpath);
+       }
 
        fd = do_open(name);
+
+       if (dso__needs_decompress(dso))
+               unlink(name);
+
+out:
        free(name);
        return fd;
 }
index 12350b17172730adf0dffdf324cb9e0b4a4be2ba..bd061ba7b47cc8eab2ff05e4027dc22990d7c7ef 100644 (file)
@@ -244,6 +244,12 @@ bool is_supported_compression(const char *ext);
 bool is_kernel_module(const char *pathname, int cpumode);
 bool decompress_to_file(const char *ext, const char *filename, int output_fd);
 bool dso__needs_decompress(struct dso *dso);
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+                                char *pathname, size_t len);
+
+#define KMOD_DECOMP_NAME  "/tmp/perf-kmod-XXXXXX"
+#define KMOD_DECOMP_LEN   sizeof(KMOD_DECOMP_NAME)
 
 struct kmod_path {
        char *name;
@@ -259,6 +265,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
 #define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
 #define kmod_path__parse_ext(__m, __p)  __kmod_path__parse(__m, __p, false, true)
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+                         struct machine *machine);
+
 /*
  * The dso__data_* external interface provides following functions:
  *   dso__data_get_fd
index 314a07151fb772377752dae62658b79ffdc87cd6..5cac8d5e009a88ff096d9e2f8026e39e8567c595 100644 (file)
@@ -1469,8 +1469,16 @@ static int __event_process_build_id(struct build_id_event *bev,
 
                dso__set_build_id(dso, &bev->build_id);
 
-               if (!is_kernel_module(filename, cpumode))
-                       dso->kernel = dso_type;
+               if (dso_type != DSO_TYPE_USER) {
+                       struct kmod_path m = { .name = NULL, };
+
+                       if (!kmod_path__parse_name(&m, filename) && m.kmod)
+                               dso__set_module_info(dso, &m, machine);
+                       else
+                               dso->kernel = dso_type;
+
+                       free(m.name);
+               }
 
                build_id__sprintf(dso->build_id, sizeof(dso->build_id),
                                  sbuild_id);
index d97e014c3df395e51e61da31f9624e12927a43b2..d7f31cb0a4cbeb41e6c02d58323a8848ff82cfc4 100644 (file)
@@ -572,16 +572,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
                if (dso == NULL)
                        goto out_unlock;
 
-               if (machine__is_host(machine))
-                       dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
-               else
-                       dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
-
-               /* _KMODULE_COMP should be next to _KMODULE */
-               if (m->kmod && m->comp)
-                       dso->symtab_type++;
-
-               dso__set_short_name(dso, strdup(m->name), true);
+               dso__set_module_info(dso, m, machine);
                dso__set_long_name(dso, strdup(filename), true);
        }
 
index 9d92af7d07182e662b1a6d7ad5e66c0147a78234..40de3cb40d2100fe918f26795ae29ae702d62665 100644 (file)
@@ -1219,7 +1219,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "# be retrieved using Python functions of the form "
                "common_*(context).\n");
 
-       fprintf(ofp, "# See the perf-trace-python Documentation for the list "
+       fprintf(ofp, "# See the perf-script-python Documentation for the list "
                "of available functions.\n\n");
 
        fprintf(ofp, "import os\n");
index e7ee47f7377ab17bbb414be4240cf01565ae863a..502505cf236af30226b22ff82878098060f19862 100644 (file)
@@ -637,43 +637,6 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
        return 0;
 }
 
-static int decompress_kmodule(struct dso *dso, const char *name,
-                             enum dso_binary_type type)
-{
-       int fd = -1;
-       char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
-       struct kmod_path m;
-
-       if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
-           type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
-           type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
-               return -1;
-
-       if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
-               name = dso->long_name;
-
-       if (kmod_path__parse_ext(&m, name) || !m.comp)
-               return -1;
-
-       fd = mkstemp(tmpbuf);
-       if (fd < 0) {
-               dso->load_errno = errno;
-               goto out;
-       }
-
-       if (!decompress_to_file(m.ext, name, fd)) {
-               dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
-               close(fd);
-               fd = -1;
-       }
-
-       unlink(tmpbuf);
-
-out:
-       free(m.ext);
-       return fd;
-}
-
 bool symsrc__possibly_runtime(struct symsrc *ss)
 {
        return ss->dynsym || ss->opdsec;
@@ -705,9 +668,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
        int fd;
 
        if (dso__needs_decompress(dso)) {
-               fd = decompress_kmodule(dso, name, type);
+               fd = dso__decompress_kmodule_fd(dso, name);
                if (fd < 0)
                        return -1;
+
+               type = dso->symtab_type;
        } else {
                fd = open(name, O_RDONLY);
                if (fd < 0) {
index 8f2b068ff7564900c989d1cdec96757c1b3ab45b..e7a98dbd2aed9133d99f38b71b3cac9fb297e0d6 100644 (file)
@@ -1562,10 +1562,6 @@ int dso__load(struct dso *dso, struct map *map)
        if (!runtime_ss && syms_ss)
                runtime_ss = syms_ss;
 
-       if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
-               if (dso__build_id_is_kmod(dso, name, PATH_MAX))
-                       kmod = true;
-
        if (syms_ss)
                ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
        else
index 943a06291587b064c3649569dd85b356805a7fab..da45c4be5fb3e77ee59131602667b4d675bc3a40 100644 (file)
@@ -39,6 +39,14 @@ static int __report_module(struct addr_location *al, u64 ip,
                return 0;
 
        mod = dwfl_addrmodule(ui->dwfl, ip);
+       if (mod) {
+               Dwarf_Addr s;
+
+               dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+               if (s != al->map->start)
+                       mod = 0;
+       }
+
        if (!mod)
                mod = dwfl_report_elf(ui->dwfl, dso->short_name,
                                      dso->long_name, -1, al->map->start,
@@ -224,7 +232,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
 
        err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
 
-       if (err && !ui->max_stack)
+       if (err && ui->max_stack != max_stack)
                err = 0;
 
        /*