]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'arm/omap', 'arm/msm', 'arm/smmu', 'arm/tegra', 'x86/vt-d', 'x86/amd...
authorJoerg Roedel <jroedel@suse.de>
Tue, 25 Aug 2015 09:39:50 +0000 (11:39 +0200)
committerJoerg Roedel <jroedel@suse.de>
Tue, 25 Aug 2015 09:39:50 +0000 (11:39 +0200)
290 files changed:
.get_maintainer.ignore [new file with mode: 0644]
.mailmap
Documentation/Intel-IOMMU.txt
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/iommu/arm,smmu.txt
Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
MAINTAINERS
Makefile
arch/arm/Makefile
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/k2e.dtsi
arch/arm/boot/dts/k2hk.dtsi
arch/arm/boot/dts/k2l.dtsi
arch/arm/boot/dts/keystone.dtsi
arch/arm/boot/dts/omap2430.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/vdso.c
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/vdso/Makefile
arch/arm64/kernel/vdso.c
arch/mips/kernel/genex.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/sigcontext.h
arch/x86/include/asm/switch_to.h
arch/x86/include/uapi/asm/sigcontext.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/process.c
arch/x86/kernel/signal.c
arch/x86/kernel/step.c
arch/x86/kvm/x86.c
arch/x86/math-emu/fpu_entry.c
arch/x86/math-emu/fpu_system.h
arch/x86/math-emu/get_address.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/xen-ops.h
block/blk-settings.c
crypto/authencesn.c
drivers/acpi/video_detect.c
drivers/ata/ahci_brcmstb.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/sata_sx4.c
drivers/base/regmap/regcache-rbtree.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/block/zram/zram_drv.c
drivers/clk/pxa/clk-pxa3xx.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/timer-imx-gpt.c
drivers/cpufreq/exynos-cpufreq.c
drivers/crypto/caam/caamhash.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/dma/dmaengine.c
drivers/edac/ppc4xx_edac.c
drivers/firmware/broadcom/bcm47xx_nvram.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/hid/hid-input.c
drivers/hid/hid-uclogic.c
drivers/hid/wacom_sys.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/input/keyboard/gpio_keys_polled.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/fsl_pamu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable.c
drivers/iommu/io-pgtable.h
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/irq_remapping.c
drivers/iommu/of_iommu.c
drivers/iommu/omap-iommu-debug.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.h
drivers/iommu/omap-iopgtable.h
drivers/iommu/tegra-smmu.c
drivers/irqchip/irq-crossbar.c
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-policy-smq.c
drivers/md/dm-thin-metadata.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/media/dvb-frontends/Kconfig
drivers/media/pci/cobalt/Kconfig
drivers/media/pci/cobalt/cobalt-irq.c
drivers/media/pci/mantis/mantis_dma.c
drivers/media/rc/ir-rc5-decoder.c
drivers/media/rc/ir-rc6-decoder.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/rc/rc-core-priv.h
drivers/media/rc/rc-ir-raw.c
drivers/media/rc/rc-loopback.c
drivers/media/rc/rc-main.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/memory/omap-gpmc.c
drivers/memory/tegra/tegra114.c
drivers/memory/tegra/tegra124.c
drivers/memory/tegra/tegra30.c
drivers/mfd/Kconfig
drivers/mfd/arizona-core.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mac-fec.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/hamradio/mkiss.c
drivers/net/ntb_netdev.c
drivers/net/phy/phy.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/wan/cosa.c
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
drivers/net/wireless/rsi/rsi_91x_usb_ops.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8723be/sw.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/ntb/ntb.c
drivers/ntb/ntb_transport.c
drivers/pci/Kconfig
drivers/pci/probe.c
drivers/platform/chrome/Kconfig
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_pm.c
drivers/scsi/sd.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_configfs.c
drivers/target/target_core_hba.c
drivers/target/target_core_spc.c
drivers/thermal/cpu_cooling.c
drivers/thermal/power_allocator.c
drivers/video/console/fbcon.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/omap2/dss/dss-of.c
drivers/video/fbdev/pxa3xx-gcu.c
drivers/video/of_videomode.c
drivers/xen/events/events_base.c
drivers/xen/events/events_fifo.c
drivers/xen/events/events_internal.h
drivers/xen/xenbus/xenbus_client.c
fs/fuse/dev.c
fs/namei.c
include/drm/drm_crtc.h
include/drm/drm_edid.h
include/drm/drm_pciids.h
include/linux/ata.h
include/linux/intel-iommu.h
include/linux/irq.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/skbuff.h
include/media/rc-core.h
include/media/videobuf2-core.h
include/scsi/scsi_eh.h
include/soc/tegra/mc.h
include/sound/soc-topology.h
include/uapi/sound/asoc.h
ipc/sem.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/irq/chip.c
kernel/locking/qspinlock_paravirt.h
kernel/time/timer.c
mm/cma.h
mm/kasan/kasan.c
mm/kasan/report.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/page_alloc.c
mm/slab.c
mm/slub.c
net/9p/client.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/gateway_client.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/mgmt.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/core/datagram.c
net/core/pktgen.c
net/core/request_sock.c
net/core/skbuff.c
net/dsa/slave.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/ip6_fib.c
net/ipv6/mcast_snoop.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/mac80211/rc80211_minstrel.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_synproxy_core.c
net/netfilter/xt_CT.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/rds/info.c
net/sched/act_mirred.c
net/sched/sch_fq_codel.c
scripts/kconfig/streamline_config.pl
sound/pci/hda/patch_realtek.c
sound/soc/Kconfig
sound/soc/Makefile
sound/usb/card.c
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/config/Makefile
tools/perf/util/machine.c
tools/perf/util/stat-shadow.c
tools/perf/util/thread.c

diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644 (file)
index 0000000..cca6d87
--- /dev/null
@@ -0,0 +1 @@
+Christoph Hellwig <hch@lst.de>
index b4091b7a78fe11ccd0e5f44f0703ace69dc09707..4b31af54ccd5864359c0810f9733f3026181a631 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andreas Herrmann <aherrman@de.ibm.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
index cf9431db873150d38a3d2477280d481ff42bfa83..7b57fc087088f49756eeb8eaabf403bfbbd92b93 100644 (file)
@@ -10,7 +10,7 @@ This guide gives a quick cheat sheet for some basic understanding.
 Some Keywords
 
 DMAR - DMA remapping
-DRHD - DMA Engine Reporting Structure
+DRHD - DMA Remapping Hardware Unit Definition
 RMRR - Reserved memory Region Reporting Structure
 ZLR  - Zero length reads from PCI devices
 IOVA - IO Virtual address.
index d6b794cef0b8b9907ab5a055a6502180b4350148..91e6e5c478d006245c5a88e7ae7e304d6fa7f097 100644 (file)
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
                            "qcom,kpss-acc-v1"
                            "qcom,kpss-acc-v2"
                            "rockchip,rk3066-smp"
+                           "ste,dbx500-smp"
 
        - cpu-release-addr
                Usage: required for systems that have an "enable-method"
index 06760503a819f5fbc15e747626ce3f734b2888ff..718074501fcbc97b5c7b5c33cab2736e433f5220 100644 (file)
@@ -43,6 +43,12 @@ conditions.
 
 ** System MMU optional properties:
 
+- dma-coherent  : Present if page table walks made by the SMMU are
+                  cache coherent with the CPU.
+
+                  NOTE: this only applies to the SMMU itself, not
+                  masters connected upstream of the SMMU.
+
 - calxeda,smmu-secure-config-access : Enable proper handling of buggy
                   implementations that always use secure access to
                   SMMU configuration registers. In this case non-secure
index 42531dc387aa6babaed955c1a821bf6a3eb77b84..869699925fd599e3d34c39155dfbb027d2262dca 100644 (file)
@@ -8,6 +8,11 @@ Required properties:
 - ti,hwmods  : Name of the hwmod associated with the IOMMU instance
 - reg        : Address space for the configuration registers
 - interrupts : Interrupt specifier for the IOMMU instance
+- #iommu-cells : Should be 0. OMAP IOMMUs are all "single-master" devices,
+                 and needs no additional data in the pargs specifier. Please
+                 also refer to the generic bindings document for more info
+                 on this property,
+                     Documentation/devicetree/bindings/iommu/iommu.txt
 
 Optional properties:
 - ti,#tlb-entries : Number of entries in the translation look-aside buffer.
@@ -18,6 +23,7 @@ Optional properties:
 Example:
        /* OMAP3 ISP MMU */
        mmu_isp: mmu@480bd400 {
+               #iommu-cells = <0>;
                compatible = "ti,omap2-iommu";
                reg = <0x480bd400 0x80>;
                interrupts = <24>;
index a9ae6c105520011994801168a7841b4d713b716e..569568f6644f2092211b7bb2690c7defe49977dd 100644 (file)
@@ -3587,6 +3587,15 @@ S:       Maintained
 F:     drivers/gpu/drm/rockchip/
 F:     Documentation/devicetree/bindings/video/rockchip*
 
+DRM DRIVERS FOR STI
+M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M:     Vincent Abriou <vincent.abriou@st.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S:     Maintained
+F:     drivers/gpu/drm/sti
+F:     Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
index 35b4c196c171306f8ce66791eb6f91f57fbbb215..246053f04fb5cecf72e477cd94257e052955d21f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc8
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index 07ab3d203916732337f909ec5f903db4c7bb1294..7451b447cc2d2cb8cc68a9bf59f125f2dd2ce347 100644 (file)
@@ -312,6 +312,9 @@ INSTALL_TARGETS     = zinstall uinstall install
 
 PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
 
+bootpImage uImage: zImage
+zImage: Image
+
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
index 4a0718ccf68e3fe6c262c81596e35c0ec2bfd697..1e29ccf77ea24f56fd16f8960d2b585ccc7ce9fc 100644 (file)
                                ranges = <0 0x2000 0x2000>;
 
                                scm_conf: scm_conf@0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon", "simple-bus";
                                        reg = <0x0 0x1400>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index e6d13592080d7c701056c2f6a73326aa11e715b5..b57033e8c633187a5f52c367a788f46196967fdc 100644 (file)
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
                                 <&clks IMX6QDL_CLK_LVDS1_GATE>,
                                 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
index 1b6494fbdb91b9301c607652efbd9a9fb34a9f36..675fb8e492c6aa0478a6d5df01b30fbe1e281b7d 100644 (file)
                                        <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
                        };
                };
+
+               mdio: mdio@24200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x24200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2e-netcp.dtsi"
        };
 };
-
-&mdio {
-       reg = <0x24200f00 0x100>;
-};
index ae6472407b2277012096d733bb80951592555d03..d0810a5f296857394397c7f1c60bffa0011bb6e1 100644 (file)
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x25c>;
                };
+
+               mdio: mdio@02090300 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x02090300 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2hk-netcp.dtsi"
        };
 };
index 0e007483615e4f097bb747a2d882b2e2d3a030aa..49fd414f680c93ab50cf0dae72d2e9261181da21 100644 (file)
@@ -29,7 +29,6 @@
        };
 
        soc {
-
                /include/ "k2l-clocks.dtsi"
 
                uart2: serial@02348400 {
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x24c>;
                };
+
+               mdio: mdio@26200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x26200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2l-netcp.dtsi"
        };
 };
        /* Pin muxed. Enabled and configured by Bootloader */
        status = "disabled";
 };
-
-&mdio {
-       reg = <0x26200f00 0x100>;
-};
index e7a6f6deabb6c0d89d4ca1e2c2ae63639249d010..72816d65f7ec3fcf5d7c47ce792ae57db369754b 100644 (file)
                                  1 0 0x21000A00 0x00000100>;
                };
 
-               mdio: mdio@02090300 {
-                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       reg             = <0x02090300 0x100>;
-                       status = "disabled";
-                       clocks = <&clkpa>;
-                       clock-names = "fck";
-                       bus_freq        = <2500000>;
-               };
-
                kirq0: keystone_irq@26202a0 {
                        compatible = "ti,keystone-irq";
                        interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
index 11a7963be0035a002fa77c2bec6809b34444e584..2390f387c27163bb76e918bb73e26966bee7fb48 100644 (file)
@@ -51,7 +51,8 @@
                                };
 
                                scm_conf: scm_conf@270 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x270 0x240>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index 7d31c6ff246f47b14afd5eeb332d01a955faef35..abc4473e6f8a17e51d5e66416be089ab24d7b472 100644 (file)
                                };
 
                                omap4_padconf_global: omap4_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0x170>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index c8fd648a7108515def0e9492936fd3760f156579..b1a1263e600168291091a963f9f56aefc87fd59e 100644 (file)
                                };
 
                                omap5_padconf_global: omap5_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0xec>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index a75f3289e653ab2973e2d7dd1cb12c8a12724451..b8f81fb418ce60039ad4e8e04f2892ca34d26bc8 100644 (file)
 #include "skeleton.dtsi"
 
 / {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               enable-method = "ste,dbx500-smp";
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+               };
+               CPU0: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x300>;
+               };
+               CPU1: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x301>;
+               };
+       };
+
        soc {
                #address-cells = <1>;
                #size-cells = <1>;
                interrupt-parent = <&intc>;
                ranges;
 
-               cpus {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       cpu-map {
-                               cluster0 {
-                                       core0 {
-                                               cpu = <&CPU0>;
-                                       };
-                                       core1 {
-                                               cpu = <&CPU1>;
-                                       };
-                               };
-                       };
-                       CPU0: cpu@0 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <0>;
-                       };
-                       CPU1: cpu@1 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <1>;
-                       };
-               };
-
                ptm@801ae000 {
                        compatible = "arm,coresight-etm3x", "arm,primecell";
                        reg = <0x801ae000 0x1000>;
index 92828a1dec80c1c33d051d9b76063727598495d5..b48dd4f37f8067e781ee3e135ed7aff27940371f 100644 (file)
@@ -61,6 +61,7 @@ work_pending:
        movlt   scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
        ldmia   sp, {r0 - r6}                   @ have to reload r0 - r6
        b       local_restart                   @ ... and off we go
+ENDPROC(ret_fast_syscall)
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
index bd755d97e459d77ff05cc8a1264f336c58c1b598..29e2991465cb27b579f729deec65e2293a0a04b5 100644 (file)
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
        sub     lr, r4, r5                      @ mmu has been enabled
        add     r3, r7, lr
        ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
+ARM_BE8(eor    r4, r4, r5)                     @ Swap r5 and r4 in BE:
+ARM_BE8(eor    r5, r4, r5)                     @ it can be done in 3 steps
+ARM_BE8(eor    r4, r4, r5)                     @ without using a temp reg.
        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
        badr    lr, __enable_mmu                @ return address
        mov     r13, r12                        @ __secondary_switched address
index efe17dd9b9218b7ef16299700a0f2a6d74ca61c1..54a5aeab988d3526657b8e3089942ca8cfe4fe5e 100644 (file)
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        struct timespec64 *wtm = &tk->wall_to_monotonic;
 
        if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
 
        vdso_write_begin(vdso_data);
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->tk_is_cntvct                 = tk_is_cntvct(tk);
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = (u32)(tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift);
        vdso_data->wtm_clock_sec                = wtm->tv_sec;
        vdso_data->wtm_clock_nsec               = wtm->tv_nsec;
 
index 3e58d710013c3ad9b377fc76e6dad58f377e88a7..4b39af2dfda9963345afe18c89131c1056a90b41 100644 (file)
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
        }
 
        /* the mmap semaphore is taken only if not in an atomic context */
-       atomic = in_atomic();
+       atomic = faulthandler_disabled();
 
        if (!atomic)
                down_read(&current->mm->mmap_sem);
index 6001f1c9d136f45fabd7d61e97638855d0beb46a..4a87e86dec45d1546153ca0ebb7310bbd5f82d93 100644 (file)
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
                pd->base = of_iomap(np, 0);
                if (!pd->base) {
                        pr_warn("%s: failed to map memory\n", __func__);
-                       kfree(pd->pd.name);
+                       kfree_const(pd->pd.name);
                        kfree(pd);
-                       of_node_put(np);
                        continue;
                }
 
index 8e52621b5a6bf3ab42ddef8a5c3db79c3391fcf1..e1d2e991d17a31fc15f1c44616c9b4b7f9de3a84 100644 (file)
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
        .irq_mask               = wakeupgen_mask,
        .irq_unmask             = wakeupgen_unmask,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_type           = irq_chip_set_type_parent,
        .flags                  = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
index 9d259d94e429c4cc493542ad4cf238a513b13743..1160434eece0509c3797733b49e8fcb1262e42e7 100644 (file)
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 VDSO_LDFLAGS += -nostdlib -shared
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
 
 obj-$(CONFIG_VDSO) += vdso.o
 extra-$(CONFIG_VDSO) += vdso.lds
index ec37ab3f524f303419d2cc3a82b79c119e61de1d..97bc68f4c689f28eac7188f5e0b792b5293c37da 100644 (file)
@@ -199,16 +199,15 @@ up_fail:
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
 
        ++vdso_data->tb_seq_count;
        smp_wmb();
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->use_syscall                  = use_syscall;
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift;
        vdso_data->wtm_clock_sec                = tk->wall_to_monotonic.tv_sec;
        vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
index af42e7003f12d025cd31e2a5d167f2f4b158d37a..baa7b6fc0a60b1879976c2d4158f73d01e0ca53b 100644 (file)
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    noat
        SAVE_ALL
        FEXPORT(handle_\exception\ext)
-       __BUILD_clear_\clear
+       __build_clear_\clear
        .set    at
        __BUILD_\verbose \exception
        move    a0, sp
index ad4d44635c7601162ca0dd8f1b626df28eeeafb2..a6f6b762c47a4c5a2d395e13a1d564964595abe1 100644 (file)
@@ -80,7 +80,7 @@ syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_64_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 446cc654da56c5f5fcaad749242dd98d593776e1..4b2010654c463158b7dee80194de736195c04595 100644 (file)
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_N32_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 5a1844765a7aba6dab47b878daf6eb723c044c03..a7e257d9cb90b9f34ecb03180fec8c54f2afd82f 100644 (file)
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
         */
        andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
        movl    RIP(%rsp), %ecx         /* User %eip */
+       movq    RAX(%rsp), %rax
        RESTORE_RSI_RDI
        xorl    %edx, %edx              /* Do not leak kernel information */
        xorq    %r8, %r8
@@ -219,7 +220,6 @@ sysexit_from_sys_call:
 1:     setbe   %al                     /* 1 if error, 0 if not */
        movzbl  %al, %edi               /* zero-extend that into %edi */
        call    __audit_syscall_exit
-       movq    RAX(%rsp), %rax         /* reload syscall return value */
        movl    $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
@@ -368,6 +368,7 @@ sysretl_from_sys_call:
        RESTORE_RSI_RDI_RDX
        movl    RIP(%rsp), %ecx
        movl    EFLAGS(%rsp), %r11d
+       movq    RAX(%rsp), %rax
        xorq    %r10, %r10
        xorq    %r9, %r9
        xorq    %r8, %r8
index 6fe6b182c9981dd891a9a5bc9a55b3e6591a6f9f..9dfce4e0417d92adc623d32ff93f67109316b451 100644 (file)
@@ -57,9 +57,9 @@ struct sigcontext {
        unsigned long ip;
        unsigned long flags;
        unsigned short cs;
-       unsigned short __pad2;  /* Was called gs, but was always zero. */
-       unsigned short __pad1;  /* Was called fs, but was always zero. */
-       unsigned short ss;
+       unsigned short gs;
+       unsigned short fs;
+       unsigned short __pad0;
        unsigned long err;
        unsigned long trapno;
        unsigned long oldmask;
index 751bf4b7bf114da12231a56f4217c2583ddeafb2..d7f3b3b78ac313ca8a871d3a53b3d118850a657d 100644 (file)
@@ -79,12 +79,12 @@ do {                                                                        \
 #else /* CONFIG_X86_32 */
 
 /* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
+#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
 
 #define __EXTRA_CLOBBER  \
        , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
-         "r12", "r13", "r14", "r15", "flags"
+         "r12", "r13", "r14", "r15"
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #define __switch_canary                                                          \
@@ -100,11 +100,7 @@ do {                                                                       \
 #define __switch_canary_iparam
 #endif /* CC_STACKPROTECTOR */
 
-/*
- * There is no need to save or restore flags, because flags are always
- * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
- * has no effect.
- */
+/* Save restore flags to clear handle leaking NT */
 #define switch_to(prev, next, last) \
        asm volatile(SAVE_CONTEXT                                         \
             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
index 0e8a973de9ee8aec0c555a5e9e8b23348e2cc10b..40836a9a7250c99a16dc5969057177f66aa57186 100644 (file)
@@ -177,24 +177,9 @@ struct sigcontext {
        __u64 rip;
        __u64 eflags;           /* RFLAGS */
        __u16 cs;
-
-       /*
-        * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
-        * Linux saved and restored fs and gs in these slots.  This
-        * was counterproductive, as fsbase and gsbase were never
-        * saved, so arch_prctl was presumably unreliable.
-        *
-        * If these slots are ever needed for any other purpose, there
-        * is some risk that very old 64-bit binaries could get
-        * confused.  I doubt that many such binaries still work,
-        * though, since the same patch in 2.5.64 also removed the
-        * 64-bit set_thread_area syscall, so it appears that there is
-        * no TLS API that works in both pre- and post-2.5.64 kernels.
-        */
-       __u16 __pad2;           /* Was gs. */
-       __u16 __pad1;           /* Was fs. */
-
-       __u16 ss;
+       __u16 gs;
+       __u16 fs;
+       __u16 __pad0;
        __u64 err;
        __u64 trapno;
        __u64 oldmask;
index f813261d97405710c99cd0982d047a227a4c4fd3..2683f36e4e0a5e67311a7bcdbceea61c17382ec6 100644 (file)
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
                irq_data->chip = &lapic_controller;
                irq_data->chip_data = data;
                irq_data->hwirq = virq + i;
-               err = assign_irq_vector_policy(virq, irq_data->node, data,
+               err = assign_irq_vector_policy(virq + i, irq_data->node, data,
                                               info);
                if (err)
                        goto error;
index b9826a981fb20fa45a7c1255e277e9ad1cd5d150..6326ae24e4d5b4f3d228111c10f5c85df0e40d3f 100644 (file)
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
-                       return NOTIFY_BAD;
+                       goto err;
        }
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
 
                cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
                if (!cpuc->constraint_list)
-                       return NOTIFY_BAD;
+                       goto err_shared_regs;
 
                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
-               if (!cpuc->excl_cntrs) {
-                       kfree(cpuc->constraint_list);
-                       kfree(cpuc->shared_regs);
-                       return NOTIFY_BAD;
-               }
+               if (!cpuc->excl_cntrs)
+                       goto err_constraint_list;
+
                cpuc->excl_thread_id = 0;
        }
 
        return NOTIFY_OK;
+
+err_constraint_list:
+       kfree(cpuc->constraint_list);
+       cpuc->constraint_list = NULL;
+
+err_shared_regs:
+       kfree(cpuc->shared_regs);
+       cpuc->shared_regs = NULL;
+
+err:
+       return NOTIFY_BAD;
 }
 
 static void intel_pmu_cpu_starting(int cpu)
index 63eb68b73589bcbbc21f9c526193adca0de2e52d..377e8f8ed39186ad4ef57b33264592ed8459a037 100644 (file)
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
        cpumask_set_cpu(cpu, &cqm_cpumask);
 }
 
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
 {
        struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
        unsigned int cpu  = (unsigned long)hcpu;
 
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               intel_cqm_cpu_prepare(cpu);
-               break;
        case CPU_DOWN_PREPARE:
                intel_cqm_cpu_exit(cpu);
                break;
        case CPU_STARTING:
+               intel_cqm_cpu_starting(cpu);
                cqm_pick_event_reader(cpu);
                break;
        }
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
                goto out;
 
        for_each_online_cpu(i) {
-               intel_cqm_cpu_prepare(i);
+               intel_cqm_cpu_starting(i);
                cqm_pick_event_reader(i);
        }
 
index 79de954626fd971f1d24553078bed1f199d52267..d25097c3fc1d1af8af35c156f05121f9f4d46a94 100644 (file)
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
        dst_fpu->fpregs_active = 0;
        dst_fpu->last_cpu = -1;
 
-       if (src_fpu->fpstate_active)
+       if (src_fpu->fpstate_active && cpu_has_fpu)
                fpu_copy(dst_fpu, src_fpu);
 
        return 0;
index 1e173f6285c73b76b2e6ab41daed7681406c5d15..d14e9ac3235a1ac73174ffb95b2990d5163d2933 100644 (file)
@@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
        write_cr0(cr0);
 
        /* Flush out any pending x87 state: */
-       asm volatile ("fninit");
+#ifdef CONFIG_MATH_EMULATION
+       if (!cpu_has_fpu)
+               fpstate_init_soft(&current->thread.fpu.state.soft);
+       else
+#endif
+               asm volatile ("fninit");
 }
 
 /*
index 397688beed4be5ce7d9445d7847d44613d2d84b5..c27cad7267655c3794972344adf0b7924e38c138 100644 (file)
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
 static void mwait_idle(void)
 {
        if (!current_set_polling_and_test()) {
+               trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
                        smp_mb(); /* quirk */
                        clflush((void *)&current_thread_info()->flags);
@@ -419,6 +420,7 @@ static void mwait_idle(void)
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
+               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
        }
index 206996c1669db344aba7ff072f734552723e7938..71820c42b6ce6bc1020bbc44277967d7e23f011e 100644 (file)
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                COPY(r15);
 #endif /* CONFIG_X86_64 */
 
+#ifdef CONFIG_X86_32
                COPY_SEG_CPL3(cs);
                COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+               /* Kernel saves and restores only the CS segment register on signals,
+                * which is the bare minimum needed to allow mixed 32/64-bit code.
+                * App's signal handler can save/restore other segments if needed. */
+               COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
 
                get_user_ex(tmpflags, &sc->flags);
                regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 #else /* !CONFIG_X86_32 */
                put_user_ex(regs->flags, &sc->flags);
                put_user_ex(regs->cs, &sc->cs);
-               put_user_ex(0, &sc->__pad2);
-               put_user_ex(0, &sc->__pad1);
-               put_user_ex(regs->ss, &sc->ss);
+               put_user_ex(0, &sc->gs);
+               put_user_ex(0, &sc->fs);
 #endif /* CONFIG_X86_32 */
 
                put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 
        regs->sp = (unsigned long)frame;
 
-       /*
-        * Set up the CS and SS registers to run signal handlers in
-        * 64-bit mode, even if the handler happens to be interrupting
-        * 32-bit or 16-bit code.
-        *
-        * SS is subtle.  In 64-bit mode, we don't need any particular
-        * SS descriptor, but we do need SS to be valid.  It's possible
-        * that the old SS is entirely bogus -- this can happen if the
-        * signal we're trying to deliver is #GP or #SS caused by a bad
-        * SS value.
-        */
+       /* Set up the CS register to run signal handlers in 64-bit mode,
+          even if the handler happens to be interrupting 32-bit code. */
        regs->cs = __USER_CS;
-       regs->ss = __USER_DS;
 
        return 0;
 }
index 6273324186ac5ca7adba69be5ded69f23d8882f7..0ccb53a9fcd9361b83c7acd26e1f64601816a3d1 100644 (file)
@@ -28,11 +28,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
                struct desc_struct *desc;
                unsigned long base;
 
-               seg &= ~7UL;
+               seg >>= 3;
 
                mutex_lock(&child->mm->context.lock);
                if (unlikely(!child->mm->context.ldt ||
-                            (seg >> 3) >= child->mm->context.ldt->size))
+                            seg >= child->mm->context.ldt->size))
                        addr = -1L; /* bogus selector, access would fault */
                else {
                        desc = &child->mm->context.ldt->entries[seg];
index 5ef2560075bfb80e6fdabcdf51f71258091e4339..8f0f6eca69da1dc6db95c16782871580bf57091d 100644 (file)
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (guest_cpuid_has_tsc_adjust(vcpu)) {
                        if (!msr_info->host_initiated) {
                                s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
-                               kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+                               adjust_tsc_offset_guest(vcpu, adj);
                        }
                        vcpu->arch.ia32_tsc_adjust_msr = data;
                }
@@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 static void process_smi(struct kvm_vcpu *vcpu)
 {
        struct kvm_segment cs, ds;
+       struct desc_ptr dt;
        char buf[512];
        u32 cr0;
 
@@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
 
        kvm_x86_ops->set_cr4(vcpu, 0);
 
+       /* Undocumented: IDT limit is set to zero on entry to SMM.  */
+       dt.address = dt.size = 0;
+       kvm_x86_ops->set_idt(vcpu, &dt);
+
        __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
 
        cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
index f37e84ab49f38e335bde57880a6cbe8640fb2c4b..3d8f2e421466a8af255eba9602748fee8753a377 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <asm/uaccess.h>
 #include <asm/traps.h>
-#include <asm/desc.h>
 #include <asm/user.h>
 #include <asm/fpu/internal.h>
 
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
                        math_abort(FPU_info, SIGILL);
                }
 
-               code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+               code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
                if (SEG_D_SIZE(code_descriptor)) {
                        /* The above test may be wrong, the book is not clear */
                        /* Segmented 32 bit protected mode */
index 9ccecb61a4fa129a82028b27edc18b91a2f99042..5e044d506b7aae8b17b2142966b11477cfe8e372 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mm.h>
 
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s)      (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+       static struct desc_struct zero_desc;
+       struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+       seg >>= 3;
+       mutex_lock(&current->mm->context.lock);
+       if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+               ret = current->mm->context.ldt->entries[seg];
+       mutex_unlock(&current->mm->context.lock);
+#endif
+       return ret;
+}
+
 #define SEG_D_SIZE(x)          ((x).b & (3 << 21))
 #define SEG_G_BIT(x)           ((x).b & (1 << 23))
 #define SEG_GRANULARITY(x)     (((x).b & (1 << 23)) ? 4096 : 1)
index 6ef5e99380f92134ba86a6a693b5ac6d3434e6d4..8300db71c2a62681006e137350961742190ec9dc 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/stddef.h>
 
 #include <asm/uaccess.h>
-#include <asm/desc.h>
 
 #include "fpu_system.h"
 #include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
                addr->selector = PM_REG_(segment);
        }
 
-       descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+       descriptor = FPU_get_ldt_descriptor(addr->selector);
        base_address = SEG_BASE_ADDR(descriptor);
        address = base_address + offset;
        limit = base_address
index e88fda867a33b198bc356aded57d59f48fcfb4ee..484145368a241207d8aa80a5f758a7d0f3ef54cb 100644 (file)
@@ -8,7 +8,7 @@ config XEN
        select PARAVIRT_CLOCK
        select XEN_HAVE_PVMMU
        depends on X86_64 || (X86_32 && X86_PAE)
-       depends on X86_TSC
+       depends on X86_LOCAL_APIC && X86_TSC
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
 config XEN_DOM0
        def_bool y
        depends on XEN && PCI_XEN && SWIOTLB_XEN
-       depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+       depends on X86_IO_APIC && ACPI && PCI
 
 config XEN_PVHVM
        def_bool y
index 7322755f337af760db6086450591c584c4dcda77..4b6e29ac0968c1a76451d3ff773652bc4afed138 100644 (file)
@@ -13,13 +13,13 @@ CFLAGS_mmu.o                        := $(nostackp)
 obj-y          := enlighten.o setup.o multicalls.o mmu.o irq.o \
                        time.o xen-asm.o xen-asm_$(BITS).o \
                        grant-table.o suspend.o platform-pci-unplug.o \
-                       p2m.o
+                       p2m.o apic.o
 
 obj-$(CONFIG_EVENT_TRACING) += trace.o
 
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
-obj-$(CONFIG_XEN_DOM0)         += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0)         += vga.o
 obj-$(CONFIG_SWIOTLB_XEN)      += pci-swiotlb-xen.o
 obj-$(CONFIG_XEN_EFI)          += efi.o
index c20fe29e65f48b4706789e0ad59bb33b1a3acc18..2292721b1d103844ade9f8a7f436a649c811f77d 100644 (file)
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
 
 #ifdef CONFIG_XEN_DOM0
 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
                                       size_t size)
 {
 }
-static inline void __init xen_init_apic(void)
-{
-}
 #endif
 
+void __init xen_init_apic(void);
+
 #ifdef CONFIG_XEN_EFI
 extern void xen_efi_init(void);
 #else
index 12600bfffca93f4547e2325eeda9669ff443a7a7..e0057d035200c4dd5e42d191f0395a7769489905 100644 (file)
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  * Description:
  *    Enables a low level driver to set a hard upper limit,
  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
- *    the device driver based upon the combined capabilities of I/O
- *    controller and storage device.
+ *    the device driver based upon the capabilities of the I/O
+ *    controller.
  *
  *    max_sectors is a soft limit imposed by the block layer for
  *    filesystem type requests.  This value can be overridden on a
index a3da6770bc9ed2bf66d59e8e74461829eeb4fe4e..b8efe36ce1142d0c6b0b8e45ec23965ec7135c40 100644 (file)
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        unsigned int cryptlen = req->cryptlen;
        struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = dst;
 
        areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        struct page *srcp;
        u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = src;
 
        areq_ctx->complete = authenc_esn_verify_ahash_done;
index 815f75ef24119eab28ce3c0c2047295c6e464c58..2922f1f252d58aafd2d6c233404ae7ca21abb524 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 #include <acpi/video.h>
 
 ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
 
 static bool backlight_notifier_registered;
 static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
 
 static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
 static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        { },
 };
 
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+       if (acpi_video_get_backlight_type() != acpi_backlight_video)
+               acpi_video_unregister_backlight();
+}
+
 static int acpi_video_backlight_notify(struct notifier_block *nb,
                                       unsigned long val, void *bd)
 {
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
 
        /* A raw bl registering may change video -> native */
        if (backlight->props.type == BACKLIGHT_RAW &&
-           val == BACKLIGHT_REGISTERED &&
-           acpi_video_get_backlight_type() != acpi_backlight_video)
-               acpi_video_unregister_backlight();
+           val == BACKLIGHT_REGISTERED)
+               schedule_work(&backlight_notify_work);
 
        return NOTIFY_OK;
 }
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                                    ACPI_UINT32_MAX, find_video, NULL,
                                    &video_caps, NULL);
+               INIT_WORK(&backlight_notify_work,
+                         acpi_video_backlight_notify_work);
                backlight_nb.notifier_call = acpi_video_backlight_notify;
                backlight_nb.priority = 0;
                if (backlight_register_notifier(&backlight_nb) == 0)
index ce1e3a8859815ca5724e376de6d6ab0d549c9831..14b7305d2ba0b3cc24aa101e76c87e242f01f537 100644 (file)
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
         * Other architectures (e.g., ARM) either do not support big endian, or
         * else leave I/O in little endian mode.
         */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(addr);
        else
                return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
 static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
 {
        /* See brcm_sata_readreg() comments */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(val, addr);
        else
                writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
                           priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int brcm_ahci_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
        brcm_sata_phys_enable(priv);
        return ahci_platform_resume(dev);
 }
+#endif
 
 static struct scsi_host_template ahci_platform_sht = {
        AHCI_SHT(DRV_NAME),
index db5d9f79a247c5ceb2cb590f206927c22f6f2b7c..19bcb80b20313932021b1ee613eed97f4473e17e 100644 (file)
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
  *     RETURNS:
  *     Block address read from @tf.
  */
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
 {
        u64 block = 0;
 
-       if (!dev || tf->flags & ATA_TFLAG_LBA) {
+       if (tf->flags & ATA_TFLAG_LBA) {
                if (tf->flags & ATA_TFLAG_LBA48) {
                        block |= (u64)tf->hob_lbah << 40;
                        block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
        return 0;
 }
 
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
-       unsigned int err_mask;
-
-       if (!ata_id_has_sense_reporting(dev->id))
-               return;
-
-       if (ata_id_sense_reporting_enabled(dev->id))
-               return;
-
-       err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to enable Sense Data Reporting, Emask 0x%x\n",
-                           err_mask);
-       }
-}
-
 /**
  *     ata_dev_configure - Configure the specified ATA/ATAPI device
  *     @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
                                        dev->devslp_timing[i] = sata_setting[j];
                                }
                }
-               ata_dev_config_sense_reporting(dev);
+
                dev->cdb_len = 16;
        }
 
index 7465031a893c60c9e61f2c911abf218b39c81d2e..cb0508af1459ac43f4aa26f1a16d94134bd9d0bc 100644 (file)
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        tf->hob_lbah = buf[10];
        tf->nsect = buf[12];
        tf->hob_nsect = buf[13];
-       if (ata_id_has_ncq_autosense(dev->id))
-               tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
 
        return 0;
 }
@@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
        return err_mask;
 }
 
-/**
- *     ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- *     @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- *     @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- *     @dfl_sense_key: default sense key to use
- *
- *     Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- *     SENSE.  This function is EH helper.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- *
- *     RETURNS:
- *     encoded sense data on success, 0 on failure or if sense data
- *     is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
-                               struct scsi_cmnd *cmd)
-{
-       struct ata_device *dev = qc->dev;
-       struct ata_taskfile tf;
-       unsigned int err_mask;
-
-       if (!cmd)
-               return 0;
-
-       DPRINTK("ATA request sense\n");
-       ata_dev_warn(dev, "request sense\n");
-       if (!ata_id_sense_reporting_enabled(dev->id)) {
-               ata_dev_warn(qc->dev, "sense data reporting disabled\n");
-               return 0;
-       }
-       ata_tf_init(dev, &tf);
-
-       tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
-       tf.command = ATA_CMD_REQ_SENSE_DATA;
-       tf.protocol = ATA_PROT_NODATA;
-
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
-       /*
-        * ACS-4 states:
-        * The device may set the SENSE DATA AVAILABLE bit to one in the
-        * STATUS field and clear the ERROR bit to zero in the STATUS field
-        * to indicate that the command returned completion without an error
-        * and the sense data described in table 306 is available.
-        *
-        * IOW the 'ATA_SENSE' bit might not be set even though valid
-        * sense data is available.
-        * So check for both.
-        */
-       if ((tf.command & ATA_SENSE) ||
-               tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
-               ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-               ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
-                            tf.lbah, tf.lbam, tf.lbal);
-       } else {
-               ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
-                            tf.command, err_mask);
-       }
-       return err_mask;
-}
-
 /**
  *     atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
  *     @dev: device to perform REQUEST_SENSE to
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        memcpy(&qc->result_tf, &tf, sizeof(tf));
        qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
        qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
-       if (qc->result_tf.auxiliary) {
-               char sense_key, asc, ascq;
-
-               sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
-               asc = (qc->result_tf.auxiliary >> 8) & 0xff;
-               ascq = qc->result_tf.auxiliary & 0xff;
-               ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
-                           sense_key, asc, ascq);
-               ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
-               ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-       }
-
        ehc->i.err_mask &= ~AC_ERR_DEV;
 }
 
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
                return ATA_EH_RESET;
        }
 
-       /*
-        * Sense data reporting does not work if the
-        * device fault bit is set.
-        */
-       if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
-           !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
-               if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
-                       tmp = ata_eh_request_sense(qc, qc->scsicmd);
-                       if (tmp)
-                               qc->err_mask |= tmp;
-                       else
-                               ata_scsi_set_sense_information(qc->scsicmd, tf);
-               } else {
-                       ata_dev_warn(qc->dev, "sense data available but port frozen\n");
-               }
-       }
-
-       /* Set by NCQ autosense or request sense above */
-       if (qc->flags & ATA_QCFLAG_SENSE_VALID)
-               return 0;
-
        if (stat & (ATA_ERR | ATA_DF))
                qc->err_mask |= AC_ERR_DEV;
        else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
 
 #ifdef CONFIG_ATA_VERBOSE_ERROR
                if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
-                                   ATA_SENSE | ATA_ERR)) {
+                                   ATA_ERR)) {
                        if (res->command & ATA_BUSY)
                                ata_dev_err(qc->dev, "status: { Busy }\n");
                        else
-                               ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+                               ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
                                  res->command & ATA_DRDY ? "DRDY " : "",
                                  res->command & ATA_DF ? "DF " : "",
                                  res->command & ATA_DRQ ? "DRQ " : "",
-                                 res->command & ATA_SENSE ? "SENSE " : "",
                                  res->command & ATA_ERR ? "ERR " : "");
                }
 
index 641a61a59e89c00036af65d3a31fe2cf67eb22b8..0d7f0da3a26929622080f94a2a3125c63676999e 100644 (file)
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
            ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
 {
-       if (!cmd)
-               return;
-
        cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
        scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
 }
 
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                   const struct ata_taskfile *tf)
-{
-       u64 information;
-
-       if (!cmd)
-               return;
-
-       information = ata_tf_read_block(tf, NULL);
-       scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
 static ssize_t
 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
            ((cdb[2] & 0x20) || need_sense)) {
                ata_gen_passthru_sense(qc);
        } else {
-               if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
-                       cmd->result = SAM_STAT_CHECK_CONDITION;
-               } else if (!need_sense) {
+               if (!need_sense) {
                        cmd->result = SAM_STAT_GOOD;
                } else {
                        /* TODO: decide which descriptor format to use
index a998a175f9f144b50e4df782bbf7d1afd5f506cb..f840ca18a7c014f5151d22e4bc55dff9fca459de 100644 (file)
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
                           u64 block, u32 n_block, unsigned int tf_flags,
                           unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
-                            struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern unsigned ata_exec_internal(struct ata_device *dev,
                                  struct ata_taskfile *tf, const u8 *cdb,
                                  int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
                              struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                          const struct ata_taskfile *tf);
 extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(struct work_struct *work);
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
index 3a18a8a719b4ff1fa562a515b4701da241e4aeb7..fab504fd9cfd7ace54d772927a01650373d02206 100644 (file)
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
        readl(mmio + PDC_SDRAM_CONTROL);
 
        /* Turn on for ECC */
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                data |= (0x01 << 16);
                writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
 
        /* ECC initiliazation. */
 
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                void *buf;
                VPRINTK("Start ECC initialization\n");
index 81751a49d8bf2334612350bba52406b9af352258..56486d92c4e72bd583630baea0fba541f36c926f 100644 (file)
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
        if (!blk)
                return -ENOMEM;
 
-       present = krealloc(rbnode->cache_present,
-                   BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
-       if (!present) {
-               kfree(blk);
-               return -ENOMEM;
+       if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+               present = krealloc(rbnode->cache_present,
+                                  BITS_TO_LONGS(blklen) * sizeof(*present),
+                                  GFP_KERNEL);
+               if (!present) {
+                       kfree(blk);
+                       return -ENOMEM;
+               }
+
+               memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+                      (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+                      * sizeof(*present));
+       } else {
+               present = rbnode->cache_present;
        }
 
        /* insert the register value in the correct place in the rbnode block */
index ced96777b677b9bcddd65bae004a7a51b5cf0dc3..954c0029fb3babc49d1a1f490f9d420934701e30 100644 (file)
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
                return;
        }
 
-       if (work_pending(&blkif->persistent_purge_work)) {
-               pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+       if (work_busy(&blkif->persistent_purge_work)) {
+               pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
                return;
        }
 
index 6d89ed35d80c0caaf8bf57ba82c7e9f3a9194bb9..7a8a73f1fc0462feab5bad706573ff6eb4536ef7 100644 (file)
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
        ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
 
 static int blkfront_setup_indirect(struct blkfront_info *info);
+static int blkfront_gather_backend_features(struct blkfront_info *info);
 
 static int get_id_from_freelist(struct blkfront_info *info)
 {
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                                 * Add the used indirect page back to the list of
                                 * available pages for indirect grefs.
                                 */
-                               indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
-                               list_add(&indirect_page->lru, &info->indirect_pages);
+                               if (!info->feature_persistent) {
+                                       indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+                                       list_add(&indirect_page->lru, &info->indirect_pages);
+                               }
                                s->indirect_grants[i]->gref = GRANT_INVALID_REF;
                                list_add_tail(&s->indirect_grants[i]->node, &info->grants);
                        }
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
        info->shadow_free = info->ring.req_prod_pvt;
        info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
 
-       rc = blkfront_setup_indirect(info);
+       rc = blkfront_gather_backend_features(info);
        if (rc) {
                kfree(copy);
                return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
 
 static int blkfront_setup_indirect(struct blkfront_info *info)
 {
-       unsigned int indirect_segments, segs;
+       unsigned int segs;
        int err, i;
 
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-max-indirect-segments", "%u", &indirect_segments,
-                           NULL);
-       if (err) {
-               info->max_indirect_segments = 0;
+       if (info->max_indirect_segments == 0)
                segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
-       } else {
-               info->max_indirect_segments = min(indirect_segments,
-                                                 xen_blkif_max_segments);
+       else
                segs = info->max_indirect_segments;
-       }
 
        err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
        if (err)
@@ -1796,6 +1792,68 @@ out_of_memory:
        return -ENOMEM;
 }
 
+/*
+ * Gather all backend feature-*
+ */
+static int blkfront_gather_backend_features(struct blkfront_info *info)
+{
+       int err;
+       int barrier, flush, discard, persistent;
+       unsigned int indirect_segments;
+
+       info->feature_flush = 0;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-barrier", "%d", &barrier,
+                       NULL);
+
+       /*
+        * If there's no "feature-barrier" defined, then it means
+        * we're dealing with a very old backend which writes
+        * synchronously; nothing to do.
+        *
+        * If there are barriers, then we use flush.
+        */
+       if (!err && barrier)
+               info->feature_flush = REQ_FLUSH | REQ_FUA;
+       /*
+        * And if there is "feature-flush-cache" use that above
+        * barriers.
+        */
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-flush-cache", "%d", &flush,
+                       NULL);
+
+       if (!err && flush)
+               info->feature_flush = REQ_FLUSH;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-discard", "%d", &discard,
+                       NULL);
+
+       if (!err && discard)
+               blkfront_setup_discard(info);
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-persistent", "%u", &persistent,
+                       NULL);
+       if (err)
+               info->feature_persistent = 0;
+       else
+               info->feature_persistent = persistent;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                           "feature-max-indirect-segments", "%u", &indirect_segments,
+                           NULL);
+       if (err)
+               info->max_indirect_segments = 0;
+       else
+               info->max_indirect_segments = min(indirect_segments,
+                                                 xen_blkif_max_segments);
+
+       return blkfront_setup_indirect(info);
+}
+
 /*
  * Invoked when the backend is finally 'ready' (and has told produced
  * the details about the physical device - #sectors, size, etc).
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
        unsigned int physical_sector_size;
        unsigned int binfo;
        int err;
-       int barrier, flush, discard, persistent;
 
        switch (info->connected) {
        case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
        if (err != 1)
                physical_sector_size = sector_size;
 
-       info->feature_flush = 0;
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-barrier", "%d", &barrier,
-                           NULL);
-
-       /*
-        * If there's no "feature-barrier" defined, then it means
-        * we're dealing with a very old backend which writes
-        * synchronously; nothing to do.
-        *
-        * If there are barriers, then we use flush.
-        */
-       if (!err && barrier)
-               info->feature_flush = REQ_FLUSH | REQ_FUA;
-       /*
-        * And if there is "feature-flush-cache" use that above
-        * barriers.
-        */
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-flush-cache", "%d", &flush,
-                           NULL);
-
-       if (!err && flush)
-               info->feature_flush = REQ_FLUSH;
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-discard", "%d", &discard,
-                           NULL);
-
-       if (!err && discard)
-               blkfront_setup_discard(info);
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-persistent", "%u", &persistent,
-                           NULL);
-       if (err)
-               info->feature_persistent = 0;
-       else
-               info->feature_persistent = persistent;
-
-       err = blkfront_setup_indirect(info);
+       err = blkfront_gather_backend_features(info);
        if (err) {
                xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
                                 info->xbdev->otherend);
index fb655e8d1e3b17bf4cda9fd09593bc7dc770f78d..763301c7828c72650f2abaa1c723425bdd3c73f4 100644 (file)
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
        kfree(meta);
 }
 
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 {
        size_t num_pages;
-       char pool_name[8];
        struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
 
        if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
                goto out_error;
        }
 
-       snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
        meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
        if (!meta->mem_pool) {
                pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
                return -EINVAL;
 
        disksize = PAGE_ALIGN(disksize);
-       meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+       meta = zram_meta_alloc(zram->disk->disk_name, disksize);
        if (!meta)
                return -ENOMEM;
 
index 4b93a1efb36d11fa7171735d29bac283e4bb6d97..ac03ba49e9d1952dff14e9383ed86874690a7176 100644 (file)
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
 PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
 PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
 
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
 #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
                    div_hp, bit, is_lp, flags)                          \
        PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
index b8ff3c64cc452a16fc4108426fb6e5b1c54e91e8..c96de14036a0adebfc7628dc9f9cd5413b5c5495 100644 (file)
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
        pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 }
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
        sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 }
index 2d59038dec43512e40ca46f4a0989c3b1253e0af..86c7eb66bdfb2e18628997319aa047b1aedc1da1 100644 (file)
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
        BUG_ON(!imxtm->base);
 
        imxtm->type = type;
+       imxtm->irq = irq;
 
        _mxc_timer_init(imxtm);
 }
index ae5b2bd3a9785c63646e3e922fbe17330678b481..fa3dd840a83771735e474a658a5c6516c62f76a0 100644 (file)
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                ret = exynos5250_cpufreq_init(exynos_info);
        } else {
                pr_err("%s: Unknown SoC type\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
        }
 
        if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
 
        if (exynos_info->set_freq == NULL) {
                dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
        arm_regulator = regulator_get(NULL, "vdd_arm");
        if (IS_ERR(arm_regulator)) {
                dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
@@ -225,7 +227,7 @@ err_cpufreq_reg:
        regulator_put(arm_regulator);
 err_vdd_arm:
        kfree(exynos_info);
-       return -EINVAL;
+       return ret;
 }
 
 static struct platform_driver exynos_cpufreq_platdrv = {
index dae1e8099969a192b302703ec291da96ebac3429..f9c78751989ec865491570ed13bf19dbc6b1a799 100644 (file)
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
                          state->buflen_1;
        u32 *sh_desc = ctx->sh_desc_fin, *desc;
        dma_addr_t ptr = ctx->sh_desc_fin_dma;
-       int sec4_sg_bytes;
+       int sec4_sg_bytes, sec4_sg_src_index;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
        int ret = 0;
        int sh_len;
 
-       sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+       sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+       sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
        edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
                                                last_buflen);
-       (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
index 08f8d5cd633491e3ff0e28ca8204d7f51be2b05b..becb738c897b1b5d93b632e3ab80ed2b146ead5a 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process = 0, leftover, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA256_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - to_process;
-               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len,
                                                 max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
 
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index aff0fe58eac0b7aba11b465a192c280ef19fdbac..b6e183d58d73d5a4e38fff2925344783e8e581bc 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process, leftover = 0, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA512_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - leftover;
-               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-               leftover = total - to_process;
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len, max_sg_len);
 
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                        goto out;
                }
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index 4a4cce15f25dd65c6a720d949ed0d9c922ff1cba..3ff284c8e3d5aef72f229017c883c73cbe13403f 100644 (file)
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
        struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
        if (IS_ERR(ch))
                return NULL;
+
+       dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+       ch->device->privatecnt++;
+
        return ch;
 }
 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
index 3515b381c1312612f56953bc267ee7d5d23b0f84..711d8ad74f116ebdcc7fd3833fbc0672c7a6359b 100644 (file)
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
         */
 
        for (row = 0; row < mci->nr_csrows; row++) {
-               struct csrow_info *csi = &mci->csrows[row];
+               struct csrow_info *csi = mci->csrows[row];
 
                /*
                 * Get the configuration settings for this
index 87add3fdce529b1ad6890cb24adb345275e4655c..e41594510b978291de179182c46b0196b3a377e3 100644 (file)
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
 }
 EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
 
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
index 2f7a5efa21c23ab0fda25ee0ebbb360efae966ea..f5c22556ec2c17ff145c48440dfe5e3563e67606 100644 (file)
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
        unsigned height_in_mb = ALIGN(height / 16, 2);
        unsigned fs_in_mb = width_in_mb * height_in_mb;
 
-       unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+       unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
 
        image_size = width * height;
        image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
 
                num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
                min_dpb_size = image_size * num_dpb_buffer;
+               min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+                                          * 16 * num_dpb_buffer + 52 * 1024;
                break;
 
        default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
 
        buf_sizes[0x1] = dpb_size;
        buf_sizes[0x2] = image_size;
+       buf_sizes[0x4] = min_ctx_size;
        return 0;
 }
 
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
                        return -EINVAL;
                }
 
+       } else if (cmd == 0x206) {
+               if ((end - start) < ctx->buf_sizes[4]) {
+                       DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+                                         (unsigned)(end - start),
+                                         ctx->buf_sizes[4]);
+                       return -EINVAL;
+               }
        } else if ((cmd != 0x100) && (cmd != 0x204)) {
                DRM_ERROR("invalid UVD command %X!\n", cmd);
                return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
        struct amdgpu_uvd_cs_ctx ctx = {};
        unsigned buf_sizes[] = {
                [0x00000000]    =       2048,
-               [0x00000001]    =       32 * 1024 * 1024,
-               [0x00000002]    =       2048 * 1152 * 3,
+               [0x00000001]    =       0xFFFFFFFF,
+               [0x00000002]    =       0xFFFFFFFF,
                [0x00000003]    =       2048,
+               [0x00000004]    =       0xFFFFFFFF,
        };
        struct amdgpu_ib *ib = &parser->ibs[ib_idx];
        int r;
index f5a42ab1f65c070ed55192758793c04c9a2d5f7f..20e2cfd521d5352202070f357de89234175cb800 100644 (file)
@@ -3135,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
                                       AMDGPU_DOORBELL_KIQ << 2);
                                WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
-                                               0x7FFFF << 2);
+                                      AMDGPU_DOORBELL_MEC_RING7 << 2);
                        }
                        tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
                        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
index 6fad1f9648f38870b2162cb74a6320f50c34aabc..ef6182bc8e5eef229dc990354f6d31c18915f3bc 100644 (file)
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
index b0487c9f018cfd09d040ffb08fe4585f68ab6022..eb603f1defc2250ea158864ea4371e24655138e1 100644 (file)
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
                   from an EDID retrieval */
                if (port->connector) {
                        mutex_lock(&mgr->destroy_connector_lock);
-                       list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+                       list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
+                       return;
                }
                drm_dp_port_teardown_pdt(port, port->pdt);
 
@@ -2659,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
-       struct drm_connector *connector;
+       struct drm_dp_mst_port *port;
 
        /*
         * Not a regular list traverse as we have to drop the destroy
@@ -2668,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
         */
        for (;;) {
                mutex_lock(&mgr->destroy_connector_lock);
-               connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
-               if (!connector) {
+               port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+               if (!port) {
                        mutex_unlock(&mgr->destroy_connector_lock);
                        break;
                }
-               list_del(&connector->destroy_list);
+               list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
-               mgr->cbs->destroy_connector(mgr, connector);
+               mgr->cbs->destroy_connector(mgr, port->connector);
+
+               drm_dp_port_teardown_pdt(port, port->pdt);
+
+               if (!port->input && port->vcpi.vcpi > 0)
+                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+               kfree(port);
        }
 }
 
index 842d6b8dc3c435ee7d836402d4169847aef75d31..2a652359af644b51f257cde7528d70b6016897da 100644 (file)
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
        spin_lock_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
 
-       pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
        ret = exynos_drm_ippdrv_register(ippdrv);
index 8040ed2a831f9a6f226baf8aee3ce00b213be8e6..f1c6b76c127f4db02388267775431fcd25ac7eb8 100644 (file)
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
index 99e286489031c4a2931565823e0158428548aef2..4a00990e4ae4e8459b94a9a007044af1cc11af62 100644 (file)
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
 {
        struct hdmi_context *hdata = ctx_from_connector(connector);
        struct edid *edid;
+       int ret;
 
        if (!hdata->ddc_adpt)
                return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
 
        drm_mode_connector_update_edid_property(connector, edid);
 
-       return drm_add_edid_modes(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+
+       kfree(edid);
+
+       return ret;
 }
 
 static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
index cae98db3306205e2628b2090b731cf6cfdf79d4f..4706b56902b44f5ba205b30d3aa6e53678bbad52 100644 (file)
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
        /* handling VSYNC */
        if (val & MXR_INT_STATUS_VSYNC) {
+               /* vsync interrupt use different bit for read and clear */
+               val |= MXR_INT_CLEAR_VSYNC;
+               val &= ~MXR_INT_STATUS_VSYNC;
+
                /* interlace scan need to check shadow register */
                if (ctx->interlace) {
                        base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
 out:
        /* clear interrupts */
-       if (~val & MXR_INT_EN_VSYNC) {
-               /* vsync interrupt use different bit for read and clear */
-               val &= ~MXR_INT_EN_VSYNC;
-               val |= MXR_INT_CLEAR_VSYNC;
-       }
        mixer_reg_write(res, MXR_INT_STATUS, val);
 
        spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
        }
 
        /* enable vsync interrupt */
-       mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
-                       MXR_INT_EN_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
 
        return 0;
 }
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
        struct mixer_context *mixer_ctx = crtc->ctx;
        struct mixer_resources *res = &mixer_ctx->mixer_res;
 
+       if (!mixer_ctx->powered) {
+               mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+               return;
+       }
+
        /* disable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
+       if (ctx->int_en & MXR_INT_EN_VSYNC)
+               mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
        mixer_win_reset(ctx);
 }
index 7ed8033aae6097af69d90e83bb6c97f7dc6f7225..8e35e0d013df556d8ac04fc27f9ba2bd7354fae3 100644 (file)
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
                        struct drm_atomic_state *state,
                        bool async)
 {
-       int ret;
-       int i;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int ret, i;
 
        if (async) {
                DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
                return ret;
 
        /* Point of no return */
-
-       /*
-        * FIXME:  The proper sequence here will eventually be:
-        *
-        * drm_atomic_helper_swap_state(dev, state)
-        * drm_atomic_helper_commit_modeset_disables(dev, state);
-        * drm_atomic_helper_commit_planes(dev, state);
-        * drm_atomic_helper_commit_modeset_enables(dev, state);
-        * drm_atomic_helper_wait_for_vblanks(dev, state);
-        * drm_atomic_helper_cleanup_planes(dev, state);
-        * drm_atomic_state_free(state);
-        *
-        * once we have full atomic modeset.  For now, just manually update
-        * plane states to avoid clobbering good states with dummy states
-        * while nuclear pageflipping.
-        */
-       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
-
-               if (!plane)
-                       continue;
-
-               plane->state->state = state;
-               swap(state->plane_states[i], plane->state);
-               plane->state->state = NULL;
-       }
+       drm_atomic_helper_swap_state(dev, state);
 
        /* swap crtc_scaler_state */
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               if (!crtc) {
-                       continue;
-               }
-
-               to_intel_crtc(crtc)->config->scaler_state =
-                       to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
 
                if (INTEL_INFO(dev)->gen >= 9)
                        skl_detach_scalers(to_intel_crtc(crtc));
+
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
-       drm_atomic_helper_commit_planes(dev, state);
        drm_atomic_helper_wait_for_vblanks(dev, state);
        drm_atomic_helper_cleanup_planes(dev, state);
        drm_atomic_state_free(state);
index 3dcd59e694db9e6f32c8e49ea04cbf21bbdc0ad8..198fc3c3291b2ac05540ea36ef853c9826b23efa 100644 (file)
@@ -1075,34 +1075,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        const union child_device_config *p_child;
        union child_device_config *child_dev_ptr;
        int i, child_device_num, count;
-       u8 expected_size;
-       u16 block_size;
+       u16     block_size;
 
        p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
        if (!p_defs) {
                DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
                return;
        }
-       if (bdb->version < 195) {
-               expected_size = 33;
-       } else if (bdb->version == 195) {
-               expected_size = 37;
-       } else if (bdb->version <= 197) {
-               expected_size = 38;
-       } else {
-               expected_size = 38;
-               DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
-                                expected_size, bdb->version);
-       }
-
-       if (expected_size > sizeof(*p_child)) {
-               DRM_ERROR("child_device_config cannot fit in p_child\n");
-               return;
-       }
-
-       if (p_defs->child_dev_size != expected_size) {
-               DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
-                         p_defs->child_dev_size, expected_size, bdb->version);
+       if (p_defs->child_dev_size < sizeof(*p_child)) {
+               DRM_ERROR("General definiton block child device size is too small.\n");
                return;
        }
        /* get the block size of general definitions */
@@ -1149,7 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 
                child_dev_ptr = dev_priv->vbt.child_dev + count;
                count++;
-               memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
+               memcpy(child_dev_ptr, p_child, sizeof(*p_child));
        }
        return;
 }
index 30e0f54ba19d1284107958bb6e5d49f6309b63de..87476ff181ddbef0967d948c37119cfcbd758315 100644 (file)
@@ -11826,7 +11826,9 @@ encoder_retry:
                goto encoder_retry;
        }
 
-       pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+       /* Dithering seems to not pass-through bits correctly when it should, so
+        * only enable it on 6bpc panels. */
+       pipe_config->dither = pipe_config->pipe_bpp == 6*3;
        DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
@@ -12624,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
 
        modeset_update_crtc_power_domains(state);
 
-       drm_atomic_helper_commit_planes(dev, state);
-
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!needs_modeset(crtc->state) || !crtc->state->enable)
+               if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+                       drm_atomic_helper_commit_planes_on_crtc(crtc_state);
                        continue;
+               }
 
                update_scanline_offset(to_intel_crtc(crtc));
 
                dev_priv->display.crtc_enable(crtc);
-               intel_crtc_enable_planes(crtc);
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
        /* FIXME: add subpixel order */
@@ -12891,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        return 0;
 }
 
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
-       struct intel_plane_state *plane_state =
-               to_intel_plane_state(crtc->primary->state);
-
-       return plane_state->visible;
-}
-
 static int intel_crtc_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
        struct drm_atomic_state *state = NULL;
        struct intel_crtc_state *pipe_config;
-       bool primary_plane_was_visible;
        int ret;
 
        BUG_ON(!set);
@@ -12943,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 
        intel_update_pipe_size(to_intel_crtc(set->crtc));
 
-       primary_plane_was_visible = primary_plane_visible(set->crtc);
-
        ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
 
-       if (ret == 0 &&
-           pipe_config->base.enable &&
-           pipe_config->base.planes_changed &&
-           !needs_modeset(&pipe_config->base)) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
-               /*
-                * We need to make sure the primary plane is re-enabled if it
-                * has previously been turned off.
-                */
-               if (ret == 0 && !primary_plane_was_visible &&
-                   primary_plane_visible(set->crtc)) {
-                       WARN_ON(!intel_crtc->active);
-                       intel_post_enable_primary(set->crtc);
-               }
-
-               /*
-                * In the fastboot case this may be our only check of the
-                * state after boot.  It would be better to only do it on
-                * the first update, but we don't have a nice way of doing that
-                * (and really, set_config isn't used much for high freq page
-                * flipping, so increasing its cost here shouldn't be a big
-                * deal).
-                */
-               if (i915.fastboot && ret == 0)
-                       intel_modeset_check_state(set->crtc->dev);
-       }
-
        if (ret) {
                DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
                              set->crtc->base.id, ret);
@@ -13305,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
                         */
                        if (IS_BROADWELL(dev))
                                intel_crtc->atomic.wait_vblank = true;
+
+                       if (crtc_state)
+                               intel_crtc->atomic.post_enable_primary = true;
                }
 
                /*
@@ -13317,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
                if (!state->visible || !fb)
                        intel_crtc->atomic.disable_ips = true;
 
+               if (!state->visible && old_state->visible &&
+                   crtc_state && !needs_modeset(&crtc_state->base))
+                       intel_crtc->atomic.pre_disable_primary = true;
+
                intel_crtc->atomic.fb_bits |=
                        INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 
@@ -15034,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                struct intel_plane_state *plane_state;
 
                memset(crtc->config, 0, sizeof(*crtc->config));
+               crtc->config->base.crtc = &crtc->base;
 
                crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
 
index 6e8faa25379240cab60f57631adf42612f28df33..1df0e1fe235f112830a0e82a1004b0e949613c78 100644 (file)
@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
 
 static const int skl_rates[] = { 162000, 216000, 270000,
                                  324000, 432000, 540000 };
-static const int chv_rates[] = { 162000, 202500, 210000, 216000,
-                                243000, 270000, 324000, 405000,
-                                420000, 432000, 540000 };
 static const int default_rates[] = { 162000, 270000, 540000 };
 
 /**
@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
        return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
 }
 
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+       /* WaDisableHBR2:skl */
+       if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+               return false;
+
+       if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+           (INTEL_INFO(dev)->gen >= 9))
+               return true;
+       else
+               return false;
+}
+
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
        if (IS_SKYLAKE(dev)) {
                *source_rates = skl_rates;
                return ARRAY_SIZE(skl_rates);
-       } else if (IS_CHERRYVIEW(dev)) {
-               *source_rates = chv_rates;
-               return ARRAY_SIZE(chv_rates);
        }
 
        *source_rates = default_rates;
 
-       if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
-               /* WaDisableHBR2:skl */
-               return (DP_LINK_BW_2_7 >> 3) + 1;
-       else if (INTEL_INFO(dev)->gen >= 8 ||
-           (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+       /* This depends on the fact that 5.4 is last value in the array */
+       if (intel_dp_source_supports_hbr2(dev))
                return (DP_LINK_BW_5_4 >> 3) + 1;
        else
                return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                }
        }
 
-       /* Training Pattern 3 support, both source and sink */
+       /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+        * have support for TP3 hence that check is used along with dpcd check
+        * to ensure TP3 can be enabled.
+        * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+        * supported but still not enabled.
+        */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
            intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
-           (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+           intel_dp_source_supports_hbr2(dev)) {
                intel_dp->use_tps3 = true;
                DRM_DEBUG_KMS("Displayport TPS3 supported\n");
        } else
index 9b74ffae5f5a7bab8ef545525d361c29fd4c3bf6..7f2161a1ff5d4d40c7b6dca346c20518b084bf6b 100644 (file)
@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
                ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
                if (ret)
                        goto unpin_ctx_obj;
+
+               ctx_obj->dirty = true;
        }
 
        return ret;
index 52c22b02600598cfa7d18e424d69a99cce4879e7..e10f9644140f5d9fcd6e73446c74634d2b13906a 100644 (file)
@@ -165,31 +165,15 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        return 0;
 }
 
-static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
-       struct nvkm_object *obj = (void *)chan;
-       struct gk104_fifo_priv *priv = (void *)obj->engine;
-
-       nv_wr32(priv, 0x002634, chan->base.chid);
-       if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
-               nv_error(priv, "channel %d [%s] kick timeout\n",
-                        chan->base.chid, nvkm_client_name(chan));
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                          struct nvkm_object *object)
 {
        struct nvkm_bar *bar = nvkm_bar(parent);
+       struct gk104_fifo_priv *priv = (void *)parent->engine;
        struct gk104_fifo_base *base = (void *)parent->parent;
        struct gk104_fifo_chan *chan = (void *)parent;
        u32 addr;
-       int ret;
 
        switch (nv_engidx(object->engine)) {
        case NVDEV_ENGINE_SW    : return 0;
@@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       ret = gk104_fifo_chan_kick(chan);
-       if (ret && suspend)
-               return ret;
+       nv_wr32(priv, 0x002634, chan->base.chid);
+       if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+               nv_error(priv, "channel %d [%s] kick timeout\n",
+                        chan->base.chid, nvkm_client_name(chan));
+               if (suspend)
+                       return -EBUSY;
+       }
 
        if (addr) {
                nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
                gk104_fifo_runlist_update(priv, chan->engine);
        }
 
-       gk104_fifo_chan_kick(chan);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
        return nvkm_fifo_channel_fini(&chan->base, suspend);
 }
index 1162bfa464f3036192854f4d0f3f363eb1ee8cff..171d3e43c30cc02257df75645c6c77f0a726de33 100644 (file)
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
 
+       /* we can race here at startup, some boards seem to trigger
+        * hotplug irqs when they shouldn't. */
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        mutex_lock(&mode_config->mutex);
        if (mode_config->num_connector) {
                list_for_each_entry(connector, &mode_config->connector_list, head)
index 654c8daeb5ab3d0dd84a2ed1d32af633d6955ac9..97ad3bcb99a75a441a54150f779415dc59236ac7 100644 (file)
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
                                     true, NULL);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_validate_buffers(dev_priv, sw_context);
        if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        vmw_resource_relocations_free(&sw_context->res_relocations);
 
        vmw_fifo_commit(dev_priv, command_size);
+       mutex_unlock(&dev_priv->binding_mutex);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                DRM_ERROR("Fence submission error. Syncing.\n");
 
        vmw_resource_list_unreserve(&sw_context->resource_list, false);
-       mutex_unlock(&dev_priv->binding_mutex);
 
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
index 3511bbaba505a4524ad382297ec1e486e21e7e48..e3c63640df737d5527c6d2609622417a4e03c8d3 100644 (file)
@@ -462,12 +462,15 @@ out:
 
 static void hidinput_cleanup_battery(struct hid_device *dev)
 {
+       const struct power_supply_desc *psy_desc;
+
        if (!dev->battery)
                return;
 
+       psy_desc = dev->battery->desc;
        power_supply_unregister(dev->battery);
-       kfree(dev->battery->desc->name);
-       kfree(dev->battery->desc);
+       kfree(psy_desc->name);
+       kfree(psy_desc);
        dev->battery = NULL;
 }
 #else  /* !CONFIG_HID_BATTERY_STRENGTH */
index 94167310e15a4c95001d339d4da3b7319f9005eb..b905d501e752d607b6fc1731ad89ff3d23ce7cd0 100644 (file)
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
        for (p = drvdata->rdesc;
             p <= drvdata->rdesc + drvdata->rsize - 4;) {
                if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
-                   p[3] < sizeof(params)) {
+                   p[3] < ARRAY_SIZE(params)) {
                        v = params[p[3]];
                        put_unaligned(cpu_to_le32(v), (s32 *)p);
                        p += 4;
index 44958d79d598dfc3a7e6938a2babbf3e1fdc2188..01b937e63cf37ec1424a1aad9eee0caef682c010 100644 (file)
@@ -1284,6 +1284,39 @@ fail_register_pen_input:
        return error;
 }
 
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+       if (features->x_resolution) {
+               features->x_phy = (features->x_max * 100) /
+                                       features->x_resolution;
+               features->y_phy = (features->y_max * 100) /
+                                       features->y_resolution;
+       }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+       /* set unit to "100th of a mm" for devices not reported by HID */
+       if (!features->unit) {
+               features->unit = 0x11;
+               features->unitExpo = -3;
+       }
+
+       features->x_resolution = wacom_calc_hid_res(features->x_max,
+                                                   features->x_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+       features->y_resolution = wacom_calc_hid_res(features->y_max,
+                                                   features->y_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+}
+
 static void wacom_wireless_work(struct work_struct *work)
 {
        struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
                if (wacom_wac1->features.type != INTUOSHT &&
                    wacom_wac1->features.type != BAMBOO_PT)
                        wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+               wacom_set_default_phy(&wacom_wac1->features);
+               wacom_calculate_res(&wacom_wac1->features);
                snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
                         wacom_wac1->features.name);
                snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
                        wacom_wac2->features =
                                *((struct wacom_features *)id->driver_data);
                        wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+                       wacom_set_default_phy(&wacom_wac2->features);
                        wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+                       wacom_calculate_res(&wacom_wac2->features);
                        snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
                                 "%s (WL) Finger",wacom_wac2->features.name);
                        snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
        }
 }
 
-/*
- * Not all devices report physical dimensions from HID.
- * Compute the default from hardcoded logical dimension
- * and resolution before driver overwrites them.
- */
-static void wacom_set_default_phy(struct wacom_features *features)
-{
-       if (features->x_resolution) {
-               features->x_phy = (features->x_max * 100) /
-                                       features->x_resolution;
-               features->y_phy = (features->y_max * 100) /
-                                       features->y_resolution;
-       }
-}
-
-static void wacom_calculate_res(struct wacom_features *features)
-{
-       /* set unit to "100th of a mm" for devices not reported by HID */
-       if (!features->unit) {
-               features->unit = 0x11;
-               features->unitExpo = -3;
-       }
-
-       features->x_resolution = wacom_calc_hid_res(features->x_max,
-                                                   features->x_phy,
-                                                   features->unit,
-                                                   features->unitExpo);
-       features->y_resolution = wacom_calc_hid_res(features->y_max,
-                                                   features->y_phy,
-                                                   features->unit,
-                                                   features->unitExpo);
-}
-
 static size_t wacom_compute_pktlen(struct hid_device *hdev)
 {
        struct hid_report_enum *report_enum;
index c7aab48f07cdfcdebf3efb6374416619c9095e04..92d518382a9fce90c3e1dbae45034675072da274 100644 (file)
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                        printk(KERN_ERR MOD
                               "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
-                       ret = -EINVAL;
+                       wc->status = IB_WC_FATAL_ERR;
                }
        }
 out:
index 097d7216d98ee4e4d394726e9d4c9743067d14f8..c6dc644aa5806b37cb4e0dcbbc6217d915e8a562 100644 (file)
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
                 * convert it to descriptor.
                 */
                if (!button->gpiod && gpio_is_valid(button->gpio)) {
-                       unsigned flags = 0;
+                       unsigned flags = GPIOF_IN;
 
                        if (button->active_low)
                                flags |= GPIOF_ACTIVE_LOW;
index f1fb1d3ccc56e679dd72334b6f81195981fa17c1..d77a848d50deb3efb566b9c851e31aa2643e0b21 100644 (file)
@@ -23,7 +23,8 @@ config IOMMU_IO_PGTABLE
 config IOMMU_IO_PGTABLE_LPAE
        bool "ARMv7/v8 Long Descriptor Format"
        select IOMMU_IO_PGTABLE
-       depends on ARM || ARM64 || COMPILE_TEST
+       # SWIOTLB guarantees a dma_to_phys() implementation
+       depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
        help
          Enable support for the ARM long descriptor pagetable format.
          This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
index 658ee39e65696898422bcd9c825d8a49fbc37359..f82060e778a23bb7a8901ef2356d42b5363d93a6 100644 (file)
@@ -1835,8 +1835,8 @@ static void free_gcr3_table(struct protection_domain *domain)
                free_gcr3_tbl_level2(domain->gcr3_tbl);
        else if (domain->glx == 1)
                free_gcr3_tbl_level1(domain->gcr3_tbl);
-       else if (domain->glx != 0)
-               BUG();
+       else
+               BUG_ON(domain->glx != 0);
 
        free_page((unsigned long)domain->gcr3_tbl);
 }
@@ -3947,11 +3947,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
        if (ret < 0)
                return ret;
 
-       ret = -ENOMEM;
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
-       if (!data)
-               goto out_free_parent;
-
        if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
                if (get_irq_table(devid, true))
                        index = info->ioapic_pin;
@@ -3962,7 +3957,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
        }
        if (index < 0) {
                pr_warn("Failed to allocate IRTE\n");
-               kfree(data);
                goto out_free_parent;
        }
 
@@ -3974,17 +3968,18 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
                        goto out_free_data;
                }
 
-               if (i > 0) {
-                       data = kzalloc(sizeof(*data), GFP_KERNEL);
-                       if (!data)
-                               goto out_free_data;
-               }
+               ret = -ENOMEM;
+               data = kzalloc(sizeof(*data), GFP_KERNEL);
+               if (!data)
+                       goto out_free_data;
+
                irq_data->hwirq = (devid << 16) + i;
                irq_data->chip_data = data;
                irq_data->chip = &amd_ir_chip;
                irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
                irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
        }
+
        return 0;
 
 out_free_data:
index a24495eb4e26c5c596efa79c084b14c19fe5932c..5ef347a13cb5d54789c07869b0527d81cb24365e 100644 (file)
@@ -154,7 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
 u32 amd_iommu_max_pasid __read_mostly = ~0;
 
 bool amd_iommu_v2_present __read_mostly;
-bool amd_iommu_pc_present __read_mostly;
+static bool amd_iommu_pc_present __read_mostly;
 
 bool amd_iommu_force_isolation __read_mostly;
 
index f7b875bb70d42138027f49ebde8150d27ce14cd2..1131664b918b0a574c7cc654a6a3cd04107f8e81 100644 (file)
@@ -356,8 +356,8 @@ static void free_pasid_states(struct device_state *dev_state)
                free_pasid_states_level2(dev_state->states);
        else if (dev_state->pasid_levels == 1)
                free_pasid_states_level1(dev_state->states);
-       else if (dev_state->pasid_levels != 0)
-               BUG();
+       else
+               BUG_ON(dev_state->pasid_levels != 0);
 
        free_page((unsigned long)dev_state->states);
 }
index da902baaa7946aac569b7ebe8a316c647dfd8187..dafaf59dc3b82833fb78d55e8f194ff728999d35 100644 (file)
 
 #define ARM_SMMU_IRQ_CTRL              0x50
 #define IRQ_CTRL_EVTQ_IRQEN            (1 << 2)
+#define IRQ_CTRL_PRIQ_IRQEN            (1 << 1)
 #define IRQ_CTRL_GERROR_IRQEN          (1 << 0)
 
 #define ARM_SMMU_IRQ_CTRLACK           0x54
 #define ARM_SMMU_PRIQ_IRQ_CFG2         0xdc
 
 /* Common MSI config fields */
-#define MSI_CFG0_SH_SHIFT              60
-#define MSI_CFG0_SH_NSH                        (0UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_OSH                        (2UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_ISH                        (3UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_MEMATTR_SHIFT         56
-#define MSI_CFG0_MEMATTR_DEVICE_nGnRE  (0x1 << MSI_CFG0_MEMATTR_SHIFT)
 #define MSI_CFG0_ADDR_SHIFT            2
 #define MSI_CFG0_ADDR_MASK             0x3fffffffffffUL
+#define MSI_CFG2_SH_SHIFT              4
+#define MSI_CFG2_SH_NSH                        (0UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_OSH                        (2UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_ISH                        (3UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_MEMATTR_SHIFT         0
+#define MSI_CFG2_MEMATTR_DEVICE_nGnRE  (0x1 << MSI_CFG2_MEMATTR_SHIFT)
 
 #define Q_IDX(q, p)                    ((p) & ((1 << (q)->max_n_shift) - 1))
 #define Q_WRP(q, p)                    ((p) & (1 << (q)->max_n_shift))
@@ -1330,33 +1331,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        arm_smmu_cmdq_issue_cmd(smmu, &cmd);
 }
 
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
-       struct arm_smmu_domain *smmu_domain = cookie;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-       if (smmu->features & ARM_SMMU_FEAT_COHERENCY) {
-               dsb(ishst);
-       } else {
-               dma_addr_t dma_addr;
-               struct device *dev = smmu->dev;
-
-               dma_addr = dma_map_page(dev, virt_to_page(addr), offset, size,
-                                       DMA_TO_DEVICE);
-
-               if (dma_mapping_error(dev, dma_addr))
-                       dev_err(dev, "failed to flush pgtable at %p\n", addr);
-               else
-                       dma_unmap_page(dev, dma_addr, size, DMA_TO_DEVICE);
-       }
-}
-
 static struct iommu_gather_ops arm_smmu_gather_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync,
-       .flush_pgtable  = arm_smmu_flush_pgtable,
 };
 
 /* IOMMU API */
@@ -1531,6 +1509,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
                .ias            = ias,
                .oas            = oas,
                .tlb            = &arm_smmu_gather_ops,
+               .iommu_dev      = smmu->dev,
        };
 
        pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2053,9 +2032,17 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
        int ret;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
-       /* Calculate the L1 size, capped to the SIDSIZE */
-       size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
-       size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+       /*
+        * If we can resolve everything with a single L2 table, then we
+        * just need a single L1 descriptor. Otherwise, calculate the L1
+        * size, capped to the SIDSIZE.
+        */
+       if (smmu->sid_bits < STRTAB_SPLIT) {
+               size = 0;
+       } else {
+               size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+               size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+       }
        cfg->num_l1_ents = 1 << size;
 
        size += STRTAB_SPLIT;
@@ -2198,6 +2185,7 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
 {
        int ret, irq;
+       u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
 
        /* Disable IRQs first */
        ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
@@ -2252,13 +2240,13 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
                        if (IS_ERR_VALUE(ret))
                                dev_warn(smmu->dev,
                                         "failed to enable priq irq\n");
+                       else
+                               irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
                }
        }
 
        /* Enable interrupt generation on the SMMU */
-       ret = arm_smmu_write_reg_sync(smmu,
-                                     IRQ_CTRL_EVTQ_IRQEN |
-                                     IRQ_CTRL_GERROR_IRQEN,
+       ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
                                      ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
        if (ret)
                dev_warn(smmu->dev, "failed to enable irqs\n");
@@ -2540,12 +2528,12 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
        case IDR5_OAS_44_BIT:
                smmu->oas = 44;
                break;
+       default:
+               dev_info(smmu->dev,
+                       "unknown output address size. Truncating to 48-bit\n");
+               /* Fallthrough */
        case IDR5_OAS_48_BIT:
                smmu->oas = 48;
-               break;
-       default:
-               dev_err(smmu->dev, "unknown output address size!\n");
-               return -ENXIO;
        }
 
        /* Set the DMA mask for our table walker */
index 4cd0c29cb585000c0e5899651948ad1dc2ffbf1f..48a39dfa977795deb8271dc5b34a7b3e0be002d0 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -607,34 +608,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        }
 }
 
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
-       struct arm_smmu_domain *smmu_domain = cookie;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
-       /* Ensure new page tables are visible to the hardware walker */
-       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
-               dsb(ishst);
-       } else {
-               /*
-                * If the SMMU can't walk tables in the CPU caches, treat them
-                * like non-coherent DMA since we need to flush the new entries
-                * all the way out to memory. There's no possibility of
-                * recursion here as the SMMU table walker will not be wired
-                * through another SMMU.
-                */
-               dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
-                            DMA_TO_DEVICE);
-       }
-}
-
 static struct iommu_gather_ops arm_smmu_gather_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync,
-       .flush_pgtable  = arm_smmu_flush_pgtable,
 };
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -898,6 +875,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                .ias            = ias,
                .oas            = oas,
                .tlb            = &arm_smmu_gather_ops,
+               .iommu_dev      = smmu->dev,
        };
 
        smmu_domain->smmu = smmu;
@@ -1532,6 +1510,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        unsigned long size;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        u32 id;
+       bool cttw_dt, cttw_reg;
 
        dev_notice(smmu->dev, "probing hardware configuration...\n");
        dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
@@ -1571,10 +1550,22 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
 
-       if (id & ID0_CTTW) {
+       /*
+        * In order for DMA API calls to work properly, we must defer to what
+        * the DT says about coherency, regardless of what the hardware claims.
+        * Fortunately, this also opens up a workaround for systems where the
+        * ID register value has ended up configured incorrectly.
+        */
+       cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
+       cttw_reg = !!(id & ID0_CTTW);
+       if (cttw_dt)
                smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
-               dev_notice(smmu->dev, "\tcoherent table walk\n");
-       }
+       if (cttw_dt || cttw_reg)
+               dev_notice(smmu->dev, "\t%scoherent table walk\n",
+                          cttw_dt ? "" : "non-");
+       if (cttw_dt != cttw_reg)
+               dev_notice(smmu->dev,
+                          "\t(IDR0.CTTW overridden by dma-coherent property)\n");
 
        if (id & ID0_SMS) {
                u32 smr, sid, mask;
index c9db04d4ef39ae36553279859b6ca0f1c7972db1..8757f8dfc4e57afee580fc68d76b88658467dea5 100644 (file)
@@ -1068,7 +1068,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        if (intel_iommu_enabled)
                iommu->iommu_dev = iommu_device_create(NULL, iommu,
                                                       intel_iommu_groups,
-                                                      iommu->name);
+                                                      "%s", iommu->name);
 
        return 0;
 
index abeedc9a78c27c4e8ee419b2571d9a88fbb4fa74..2570f2a25dc432606e283d1dc7dd450e6fec3bd3 100644 (file)
@@ -41,7 +41,6 @@ struct pamu_isr_data {
 
 static struct paace *ppaact;
 static struct paace *spaact;
-static struct ome *omt __initdata;
 
 /*
  * Table for matching compatible strings, for device tree
@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
  * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
  * string would be used.
  */
-static const struct of_device_id guts_device_ids[] __initconst = {
+static const struct of_device_id guts_device_ids[] = {
        { .compatible = "fsl,qoriq-device-config-1.0", },
        { .compatible = "fsl,qoriq-device-config-2.0", },
        {}
@@ -599,7 +598,7 @@ found_cpu_node:
  * Memory accesses to QMAN and BMAN private memory need not be coherent, so
  * clear the PAACE entry coherency attribute for them.
  */
-static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
+static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
 {
        switch (paace_type) {
        case QMAN_PAACE:
@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
  * this table to translate device transaction to appropriate corenet
  * transaction.
  */
-static void __init setup_omt(struct ome *omt)
+static void setup_omt(struct ome *omt)
 {
        struct ome *ome;
 
@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
  * Get the maximum number of PAACT table entries
  * and subwindows supported by PAMU
  */
-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
 {
        u32 pc_val;
 
@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
 }
 
 /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
-                                phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
-                                phys_addr_t omt_phys)
+static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+                         phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+                         phys_addr_t omt_phys)
 {
        u32 *pc;
        struct pamu_mmap_regs *pamu_regs;
@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
 }
 
 /* Enable all device LIODNS */
-static void __init setup_liodns(void)
+static void setup_liodns(void)
 {
        int i, len;
        struct paace *ppaace;
@@ -846,7 +845,7 @@ struct ccsr_law {
 /*
  * Create a coherence subdomain for a given memory block.
  */
-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
 {
        struct device_node *np;
        const __be32 *iprop;
@@ -988,7 +987,7 @@ error:
 static const struct {
        u32 svr;
        u32 port_id;
-} port_id_map[] __initconst = {
+} port_id_map[] = {
        {(SVR_P2040 << 8) | 0x10, 0xFF000000},  /* P2040 1.0 */
        {(SVR_P2040 << 8) | 0x11, 0xFF000000},  /* P2040 1.1 */
        {(SVR_P2041 << 8) | 0x10, 0xFF000000},  /* P2041 1.0 */
@@ -1006,7 +1005,7 @@ static const struct {
 
 #define SVR_SECURITY   0x80000 /* The Security (E) bit */
 
-static int __init fsl_pamu_probe(struct platform_device *pdev)
+static int fsl_pamu_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        void __iomem *pamu_regs = NULL;
@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
        int irq;
        phys_addr_t ppaact_phys;
        phys_addr_t spaact_phys;
+       struct ome *omt;
        phys_addr_t omt_phys;
        size_t mem_size = 0;
        unsigned int order = 0;
@@ -1200,7 +1200,7 @@ error:
        return ret;
 }
 
-static struct platform_driver fsl_of_pamu_driver __initdata = {
+static struct platform_driver fsl_of_pamu_driver = {
        .driver = {
                .name = "fsl-of-pamu",
        },
index 0649b94f59584ca5b885cd0ecad595a84af0d89d..63daf1ba04b7ed5d491a78344589a6bb45abe0c4 100644 (file)
@@ -364,7 +364,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
 static struct dmar_domain *si_domain;
 static int hw_pass_through = 1;
 
-/* domain represents a virtual machine, more than one devices
+/*
+ * Domain represents a virtual machine, more than one devices
  * across iommus may be owned in one domain, e.g. kvm guest.
  */
 #define DOMAIN_FLAG_VIRTUAL_MACHINE    (1 << 0)
@@ -372,11 +373,21 @@ static int hw_pass_through = 1;
 /* si_domain contains mulitple devices */
 #define DOMAIN_FLAG_STATIC_IDENTITY    (1 << 1)
 
+#define for_each_domain_iommu(idx, domain)                     \
+       for (idx = 0; idx < g_num_of_iommus; idx++)             \
+               if (domain->iommu_refcnt[idx])
+
 struct dmar_domain {
-       int     id;                     /* domain id */
        int     nid;                    /* node id */
-       DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
-                                       /* bitmap of iommus this domain uses*/
+
+       unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
+                                       /* Refcount of devices per iommu */
+
+
+       u16             iommu_did[DMAR_UNITS_SUPPORTED];
+                                       /* Domain ids per IOMMU. Use u16 since
+                                        * domain ids are 16 bit wide according
+                                        * to VT-d spec, section 9.3 */
 
        struct list_head devices;       /* all devices' list */
        struct iova_domain iovad;       /* iova's that belong to this domain */
@@ -395,7 +406,6 @@ struct dmar_domain {
        int             iommu_superpage;/* Level of superpages supported:
                                           0 == 4KiB (no superpages), 1 == 2MiB,
                                           2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
-       spinlock_t      iommu_lock;     /* protect iommu set in domain */
        u64             max_addr;       /* maximum mapped address */
 
        struct iommu_domain domain;     /* generic domain data structure for
@@ -461,10 +471,11 @@ static long list_size;
 
 static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-                                      struct device *dev);
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-                                          struct device *dev);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+                                    struct device *dev);
+static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+                                struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
 
@@ -564,6 +575,36 @@ __setup("intel_iommu=", intel_iommu_setup);
 static struct kmem_cache *iommu_domain_cache;
 static struct kmem_cache *iommu_devinfo_cache;
 
+static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
+{
+       struct dmar_domain **domains;
+       int idx = did >> 8;
+
+       domains = iommu->domains[idx];
+       if (!domains)
+               return NULL;
+
+       return domains[did & 0xff];
+}
+
+static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
+                            struct dmar_domain *domain)
+{
+       struct dmar_domain **domains;
+       int idx = did >> 8;
+
+       if (!iommu->domains[idx]) {
+               size_t size = 256 * sizeof(struct dmar_domain *);
+               iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
+       }
+
+       domains = iommu->domains[idx];
+       if (WARN_ON(!domains))
+               return;
+       else
+               domains[did & 0xff] = domain;
+}
+
 static inline void *alloc_pgtable_page(int node)
 {
        struct page *page;
@@ -605,6 +646,11 @@ static inline int domain_type_is_vm(struct dmar_domain *domain)
        return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
 }
 
+static inline int domain_type_is_si(struct dmar_domain *domain)
+{
+       return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
+}
+
 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
 {
        return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
@@ -659,7 +705,9 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
 
        /* si_domain and vm domain should not get here. */
        BUG_ON(domain_type_is_vm_or_si(domain));
-       iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+       for_each_domain_iommu(iommu_id, domain)
+               break;
+
        if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
                return NULL;
 
@@ -675,7 +723,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
 
        domain->iommu_coherency = 1;
 
-       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
+       for_each_domain_iommu(i, domain) {
                found = true;
                if (!ecap_coherent(g_iommus[i]->ecap)) {
                        domain->iommu_coherency = 0;
@@ -755,6 +803,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
        struct context_entry *context;
        u64 *entry;
 
+       entry = &root->lo;
        if (ecs_enabled(iommu)) {
                if (devfn >= 0x80) {
                        devfn -= 0x80;
@@ -762,7 +811,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
                }
                devfn *= 2;
        }
-       entry = &root->lo;
        if (*entry & 1)
                context = phys_to_virt(*entry & VTD_PAGE_MASK);
        else {
@@ -1162,9 +1210,9 @@ next:
 /* We can't just free the pages because the IOMMU may still be walking
    the page tables, and may have cached the intermediate levels. The
    pages can only be freed after the IOTLB flush has been done. */
-struct page *domain_unmap(struct dmar_domain *domain,
-                         unsigned long start_pfn,
-                         unsigned long last_pfn)
+static struct page *domain_unmap(struct dmar_domain *domain,
+                                unsigned long start_pfn,
+                                unsigned long last_pfn)
 {
        struct page *freelist = NULL;
 
@@ -1188,7 +1236,7 @@ struct page *domain_unmap(struct dmar_domain *domain,
        return freelist;
 }
 
-void dma_free_pagelist(struct page *freelist)
+static void dma_free_pagelist(struct page *freelist)
 {
        struct page *pg;
 
@@ -1356,24 +1404,23 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
                         u8 bus, u8 devfn)
 {
        bool found = false;
-       unsigned long flags;
        struct device_domain_info *info;
        struct pci_dev *pdev;
 
+       assert_spin_locked(&device_domain_lock);
+
        if (!ecap_dev_iotlb_support(iommu->ecap))
                return NULL;
 
        if (!iommu->qi)
                return NULL;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry(info, &domain->devices, link)
                if (info->iommu == iommu && info->bus == bus &&
                    info->devfn == devfn) {
                        found = true;
                        break;
                }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        if (!found || !info->dev || !dev_is_pci(info->dev))
                return NULL;
@@ -1430,11 +1477,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
-                                 unsigned long pfn, unsigned int pages, int ih, int map)
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
+                                 struct dmar_domain *domain,
+                                 unsigned long pfn, unsigned int pages,
+                                 int ih, int map)
 {
        unsigned int mask = ilog2(__roundup_pow_of_two(pages));
        uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
+       u16 did = domain->iommu_did[iommu->seq_id];
 
        BUG_ON(pages == 0);
 
@@ -1458,7 +1508,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
         * flush. However, device IOTLB doesn't need to be flushed in this case.
         */
        if (!cap_caching_mode(iommu->cap) || !map)
-               iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
+               iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
+                                     addr, mask);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1513,65 +1564,80 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
 
 static int iommu_init_domains(struct intel_iommu *iommu)
 {
-       unsigned long ndomains;
-       unsigned long nlongs;
+       u32 ndomains, nlongs;
+       size_t size;
 
        ndomains = cap_ndoms(iommu->cap);
-       pr_debug("%s: Number of Domains supported <%ld>\n",
+       pr_debug("%s: Number of Domains supported <%d>\n",
                 iommu->name, ndomains);
        nlongs = BITS_TO_LONGS(ndomains);
 
        spin_lock_init(&iommu->lock);
 
-       /* TBD: there might be 64K domains,
-        * consider other allocation for future chip
-        */
        iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
        if (!iommu->domain_ids) {
                pr_err("%s: Allocating domain id array failed\n",
                       iommu->name);
                return -ENOMEM;
        }
-       iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
-                       GFP_KERNEL);
-       if (!iommu->domains) {
+
+       size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+       iommu->domains = kzalloc(size, GFP_KERNEL);
+
+       if (iommu->domains) {
+               size = 256 * sizeof(struct dmar_domain *);
+               iommu->domains[0] = kzalloc(size, GFP_KERNEL);
+       }
+
+       if (!iommu->domains || !iommu->domains[0]) {
                pr_err("%s: Allocating domain array failed\n",
                       iommu->name);
                kfree(iommu->domain_ids);
+               kfree(iommu->domains);
                iommu->domain_ids = NULL;
+               iommu->domains    = NULL;
                return -ENOMEM;
        }
 
+
+
        /*
-        * if Caching mode is set, then invalid translations are tagged
-        * with domainid 0. Hence we need to pre-allocate it.
+        * If Caching mode is set, then invalid translations are tagged
+        * with domain-id 0, hence we need to pre-allocate it. We also
+        * use domain-id 0 as a marker for non-allocated domain-id, so
+        * make sure it is not used for a real domain.
         */
-       if (cap_caching_mode(iommu->cap))
-               set_bit(0, iommu->domain_ids);
+       set_bit(0, iommu->domain_ids);
+
        return 0;
 }
 
 static void disable_dmar_iommu(struct intel_iommu *iommu)
 {
-       struct dmar_domain *domain;
-       int i;
+       struct device_domain_info *info, *tmp;
+       unsigned long flags;
 
-       if ((iommu->domains) && (iommu->domain_ids)) {
-               for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
-                       /*
-                        * Domain id 0 is reserved for invalid translation
-                        * if hardware supports caching mode.
-                        */
-                       if (cap_caching_mode(iommu->cap) && i == 0)
-                               continue;
+       if (!iommu->domains || !iommu->domain_ids)
+               return;
 
-                       domain = iommu->domains[i];
-                       clear_bit(i, iommu->domain_ids);
-                       if (domain_detach_iommu(domain, iommu) == 0 &&
-                           !domain_type_is_vm(domain))
-                               domain_exit(domain);
-               }
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
+               struct dmar_domain *domain;
+
+               if (info->iommu != iommu)
+                       continue;
+
+               if (!info->dev || !info->domain)
+                       continue;
+
+               domain = info->domain;
+
+               dmar_remove_one_dev_info(domain, info->dev);
+
+               if (!domain_type_is_vm_or_si(domain))
+                       domain_exit(domain);
        }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        if (iommu->gcmd & DMA_GCMD_TE)
                iommu_disable_translation(iommu);
@@ -1580,6 +1646,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
 static void free_dmar_iommu(struct intel_iommu *iommu)
 {
        if ((iommu->domains) && (iommu->domain_ids)) {
+               int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+               int i;
+
+               for (i = 0; i < elems; i++)
+                       kfree(iommu->domains[i]);
                kfree(iommu->domains);
                kfree(iommu->domain_ids);
                iommu->domains = NULL;
@@ -1594,8 +1665,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
 
 static struct dmar_domain *alloc_domain(int flags)
 {
-       /* domain id for virtual machine, it won't be set in context */
-       static atomic_t vm_domid = ATOMIC_INIT(0);
        struct dmar_domain *domain;
 
        domain = alloc_domain_mem();
@@ -1605,111 +1674,64 @@ static struct dmar_domain *alloc_domain(int flags)
        memset(domain, 0, sizeof(*domain));
        domain->nid = -1;
        domain->flags = flags;
-       spin_lock_init(&domain->iommu_lock);
        INIT_LIST_HEAD(&domain->devices);
-       if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
-               domain->id = atomic_inc_return(&vm_domid);
 
        return domain;
 }
 
-static int __iommu_attach_domain(struct dmar_domain *domain,
-                                struct intel_iommu *iommu)
-{
-       int num;
-       unsigned long ndomains;
-
-       ndomains = cap_ndoms(iommu->cap);
-       num = find_first_zero_bit(iommu->domain_ids, ndomains);
-       if (num < ndomains) {
-               set_bit(num, iommu->domain_ids);
-               iommu->domains[num] = domain;
-       } else {
-               num = -ENOSPC;
-       }
-
-       return num;
-}
-
-static int iommu_attach_domain(struct dmar_domain *domain,
+/* Must be called with iommu->lock */
+static int domain_attach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu)
 {
-       int num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       num = __iommu_attach_domain(domain, iommu);
-       spin_unlock_irqrestore(&iommu->lock, flags);
-       if (num < 0)
-               pr_err("%s: No free domain ids\n", iommu->name);
-
-       return num;
-}
-
-static int iommu_attach_vm_domain(struct dmar_domain *domain,
-                                 struct intel_iommu *iommu)
-{
-       int num;
        unsigned long ndomains;
+       int num;
 
-       ndomains = cap_ndoms(iommu->cap);
-       for_each_set_bit(num, iommu->domain_ids, ndomains)
-               if (iommu->domains[num] == domain)
-                       return num;
-
-       return __iommu_attach_domain(domain, iommu);
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
-                               struct intel_iommu *iommu)
-{
-       unsigned long flags;
-       int num, ndomains;
+       assert_spin_locked(&device_domain_lock);
+       assert_spin_locked(&iommu->lock);
 
-       spin_lock_irqsave(&iommu->lock, flags);
-       if (domain_type_is_vm_or_si(domain)) {
+       domain->iommu_refcnt[iommu->seq_id] += 1;
+       domain->iommu_count += 1;
+       if (domain->iommu_refcnt[iommu->seq_id] == 1) {
                ndomains = cap_ndoms(iommu->cap);
-               for_each_set_bit(num, iommu->domain_ids, ndomains) {
-                       if (iommu->domains[num] == domain) {
-                               clear_bit(num, iommu->domain_ids);
-                               iommu->domains[num] = NULL;
-                               break;
-                       }
+               num      = find_first_zero_bit(iommu->domain_ids, ndomains);
+
+               if (num >= ndomains) {
+                       pr_err("%s: No free domain ids\n", iommu->name);
+                       domain->iommu_refcnt[iommu->seq_id] -= 1;
+                       domain->iommu_count -= 1;
+                       return -ENOSPC;
                }
-       } else {
-               clear_bit(domain->id, iommu->domain_ids);
-               iommu->domains[domain->id] = NULL;
-       }
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
 
-static void domain_attach_iommu(struct dmar_domain *domain,
-                              struct intel_iommu *iommu)
-{
-       unsigned long flags;
+               set_bit(num, iommu->domain_ids);
+               set_iommu_domain(iommu, num, domain);
+
+               domain->iommu_did[iommu->seq_id] = num;
+               domain->nid                      = iommu->node;
 
-       spin_lock_irqsave(&domain->iommu_lock, flags);
-       if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
-               domain->iommu_count++;
-               if (domain->iommu_count == 1)
-                       domain->nid = iommu->node;
                domain_update_iommu_cap(domain);
        }
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
+       return 0;
 }
 
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu)
 {
-       unsigned long flags;
-       int count = INT_MAX;
+       int num, count = INT_MAX;
+
+       assert_spin_locked(&device_domain_lock);
+       assert_spin_locked(&iommu->lock);
+
+       domain->iommu_refcnt[iommu->seq_id] -= 1;
+       count = --domain->iommu_count;
+       if (domain->iommu_refcnt[iommu->seq_id] == 0) {
+               num = domain->iommu_did[iommu->seq_id];
+               clear_bit(num, iommu->domain_ids);
+               set_iommu_domain(iommu, num, NULL);
 
-       spin_lock_irqsave(&domain->iommu_lock, flags);
-       if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
-               count = --domain->iommu_count;
                domain_update_iommu_cap(domain);
+               domain->iommu_did[iommu->seq_id] = 0;
        }
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
 
        return count;
 }
@@ -1776,9 +1798,9 @@ static inline int guestwidth_to_adjustwidth(int gaw)
        return agaw;
 }
 
-static int domain_init(struct dmar_domain *domain, int guest_width)
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+                      int guest_width)
 {
-       struct intel_iommu *iommu;
        int adjust_width, agaw;
        unsigned long sagaw;
 
@@ -1787,7 +1809,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        domain_reserve_special_ranges(domain);
 
        /* calculate AGAW */
-       iommu = domain_get_iommu(domain);
        if (guest_width > cap_mgaw(iommu->cap))
                guest_width = cap_mgaw(iommu->cap);
        domain->gaw = guest_width;
@@ -1830,8 +1851,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
        struct page *freelist = NULL;
 
        /* Domain 0 is reserved, so dont process it */
@@ -1842,22 +1861,16 @@ static void domain_exit(struct dmar_domain *domain)
        if (!intel_iommu_strict)
                flush_unmaps_timeout(0);
 
-       /* remove associated devices */
+       /* Remove associated devices and clear attached or cached domains */
+       rcu_read_lock();
        domain_remove_dev_info(domain);
+       rcu_read_unlock();
 
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
 
        freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
 
-       /* clear attached or cached domains */
-       rcu_read_lock();
-       for_each_active_iommu(iommu, drhd)
-               if (domain_type_is_vm(domain) ||
-                   test_bit(iommu->seq_id, domain->iommu_bmp))
-                       iommu_detach_domain(domain, iommu);
-       rcu_read_unlock();
-
        dma_free_pagelist(freelist);
 
        free_domain_mem(domain);
@@ -1865,79 +1878,68 @@ static void domain_exit(struct dmar_domain *domain)
 
 static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
-                                     u8 bus, u8 devfn, int translation)
+                                     u8 bus, u8 devfn)
 {
+       u16 did = domain->iommu_did[iommu->seq_id];
+       int translation = CONTEXT_TT_MULTI_LEVEL;
+       struct device_domain_info *info = NULL;
        struct context_entry *context;
        unsigned long flags;
        struct dma_pte *pgd;
-       int id;
-       int agaw;
-       struct device_domain_info *info = NULL;
+       int ret, agaw;
+
+       WARN_ON(did == 0);
+
+       if (hw_pass_through && domain_type_is_si(domain))
+               translation = CONTEXT_TT_PASS_THROUGH;
 
        pr_debug("Set context mapping for %02x:%02x.%d\n",
                bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 
        BUG_ON(!domain->pgd);
-       BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
-              translation != CONTEXT_TT_MULTI_LEVEL);
 
-       spin_lock_irqsave(&iommu->lock, flags);
+       spin_lock_irqsave(&device_domain_lock, flags);
+       spin_lock(&iommu->lock);
+
+       ret = -ENOMEM;
        context = iommu_context_addr(iommu, bus, devfn, 1);
-       spin_unlock_irqrestore(&iommu->lock, flags);
        if (!context)
-               return -ENOMEM;
-       spin_lock_irqsave(&iommu->lock, flags);
-       if (context_present(context)) {
-               spin_unlock_irqrestore(&iommu->lock, flags);
-               return 0;
-       }
+               goto out_unlock;
 
-       context_clear_entry(context);
+       ret = 0;
+       if (context_present(context))
+               goto out_unlock;
 
-       id = domain->id;
        pgd = domain->pgd;
 
-       if (domain_type_is_vm_or_si(domain)) {
-               if (domain_type_is_vm(domain)) {
-                       id = iommu_attach_vm_domain(domain, iommu);
-                       if (id < 0) {
-                               spin_unlock_irqrestore(&iommu->lock, flags);
-                               pr_err("%s: No free domain ids\n", iommu->name);
-                               return -EFAULT;
-                       }
-               }
+       context_clear_entry(context);
+       context_set_domain_id(context, did);
 
-               /* Skip top levels of page tables for
-                * iommu which has less agaw than default.
-                * Unnecessary for PT mode.
-                */
-               if (translation != CONTEXT_TT_PASS_THROUGH) {
-                       for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
-                               pgd = phys_to_virt(dma_pte_addr(pgd));
-                               if (!dma_pte_present(pgd)) {
-                                       spin_unlock_irqrestore(&iommu->lock, flags);
-                                       return -ENOMEM;
-                               }
-                       }
+       /*
+        * Skip top levels of page tables for iommu which has less agaw
+        * than default.  Unnecessary for PT mode.
+        */
+       if (translation != CONTEXT_TT_PASS_THROUGH) {
+               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                       ret = -ENOMEM;
+                       pgd = phys_to_virt(dma_pte_addr(pgd));
+                       if (!dma_pte_present(pgd))
+                               goto out_unlock;
                }
-       }
-
-       context_set_domain_id(context, id);
 
-       if (translation != CONTEXT_TT_PASS_THROUGH) {
                info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
                translation = info ? CONTEXT_TT_DEV_IOTLB :
                                     CONTEXT_TT_MULTI_LEVEL;
-       }
-       /*
-        * In pass through mode, AW must be programmed to indicate the largest
-        * AGAW value supported by hardware. And ASR is ignored by hardware.
-        */
-       if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
-               context_set_address_width(context, iommu->msagaw);
-       else {
+
                context_set_address_root(context, virt_to_phys(pgd));
                context_set_address_width(context, iommu->agaw);
+       } else {
+               /*
+                * In pass through mode, AW must be programmed to
+                * indicate the largest AGAW value supported by
+                * hardware. And ASR is ignored by hardware.
+                */
+               context_set_address_width(context, iommu->msagaw);
        }
 
        context_set_translation_type(context, translation);
@@ -1956,14 +1958,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                                           (((u16)bus) << 8) | devfn,
                                           DMA_CCMD_MASK_NOBIT,
                                           DMA_CCMD_DEVICE_INVL);
-               iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
+               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
        } else {
                iommu_flush_write_buffer(iommu);
        }
        iommu_enable_dev_iotlb(info);
-       spin_unlock_irqrestore(&iommu->lock, flags);
 
-       domain_attach_iommu(domain, iommu);
+       ret = 0;
+
+out_unlock:
+       spin_unlock(&iommu->lock);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return 0;
 }
@@ -1971,7 +1976,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
 struct domain_context_mapping_data {
        struct dmar_domain *domain;
        struct intel_iommu *iommu;
-       int translation;
 };
 
 static int domain_context_mapping_cb(struct pci_dev *pdev,
@@ -1980,13 +1984,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
        struct domain_context_mapping_data *data = opaque;
 
        return domain_context_mapping_one(data->domain, data->iommu,
-                                         PCI_BUS_NUM(alias), alias & 0xff,
-                                         data->translation);
+                                         PCI_BUS_NUM(alias), alias & 0xff);
 }
 
 static int
-domain_context_mapping(struct dmar_domain *domain, struct device *dev,
-                      int translation)
+domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 {
        struct intel_iommu *iommu;
        u8 bus, devfn;
@@ -1997,12 +1999,10 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev,
                return -ENODEV;
 
        if (!dev_is_pci(dev))
-               return domain_context_mapping_one(domain, iommu, bus, devfn,
-                                                 translation);
+               return domain_context_mapping_one(domain, iommu, bus, devfn);
 
        data.domain = domain;
        data.iommu = iommu;
-       data.translation = translation;
 
        return pci_for_each_dma_alias(to_pci_dev(dev),
                                      &domain_context_mapping_cb, &data);
@@ -2188,7 +2188,7 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i
        return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
 }
 
-static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
        if (!iommu)
                return;
@@ -2214,21 +2214,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
        unsigned long flags;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry_safe(info, tmp, &domain->devices, link) {
-               unlink_domain_info(info);
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-
-               iommu_disable_dev_iotlb(info);
-               iommu_detach_dev(info->iommu, info->bus, info->devfn);
-
-               if (domain_type_is_vm(domain)) {
-                       iommu_detach_dependent_devices(info->iommu, info->dev);
-                       domain_detach_iommu(domain, info->iommu);
-               }
-
-               free_devinfo_mem(info);
-               spin_lock_irqsave(&device_domain_lock, flags);
-       }
+       list_for_each_entry_safe(info, tmp, &domain->devices, link)
+               __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
@@ -2260,14 +2247,15 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
        return NULL;
 }
 
-static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
-                                               int bus, int devfn,
-                                               struct device *dev,
-                                               struct dmar_domain *domain)
+static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
+                                                   int bus, int devfn,
+                                                   struct device *dev,
+                                                   struct dmar_domain *domain)
 {
        struct dmar_domain *found = NULL;
        struct device_domain_info *info;
        unsigned long flags;
+       int ret;
 
        info = alloc_devinfo_mem();
        if (!info)
@@ -2282,12 +2270,16 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
        spin_lock_irqsave(&device_domain_lock, flags);
        if (dev)
                found = find_domain(dev);
-       else {
+
+       if (!found) {
                struct device_domain_info *info2;
                info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
-               if (info2)
-                       found = info2->domain;
+               if (info2) {
+                       found      = info2->domain;
+                       info2->dev = dev;
+               }
        }
+
        if (found) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
                free_devinfo_mem(info);
@@ -2295,12 +2287,27 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
                return found;
        }
 
+       spin_lock(&iommu->lock);
+       ret = domain_attach_iommu(domain, iommu);
+       spin_unlock(&iommu->lock);
+
+       if (ret) {
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               return NULL;
+       }
+
        list_add(&info->link, &domain->devices);
        list_add(&info->global, &device_domain_list);
        if (dev)
                dev->archdata.iommu = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
+       if (dev && domain_context_mapping(domain, dev)) {
+               pr_err("Domain context map for %s failed\n", dev_name(dev));
+               dmar_remove_one_dev_info(domain, dev);
+               return NULL;
+       }
+
        return domain;
 }
 
@@ -2313,10 +2320,10 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
 /* domain is initialized */
 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 {
+       struct device_domain_info *info = NULL;
        struct dmar_domain *domain, *tmp;
        struct intel_iommu *iommu;
-       struct device_domain_info *info;
-       u16 dma_alias;
+       u16 req_id, dma_alias;
        unsigned long flags;
        u8 bus, devfn;
 
@@ -2328,6 +2335,8 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        if (!iommu)
                return NULL;
 
+       req_id = ((u16)bus << 8) | devfn;
+
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
@@ -2352,21 +2361,15 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        domain = alloc_domain(0);
        if (!domain)
                return NULL;
-       domain->id = iommu_attach_domain(domain, iommu);
-       if (domain->id < 0) {
-               free_domain_mem(domain);
-               return NULL;
-       }
-       domain_attach_iommu(domain, iommu);
-       if (domain_init(domain, gaw)) {
+       if (domain_init(domain, iommu, gaw)) {
                domain_exit(domain);
                return NULL;
        }
 
        /* register PCI DMA alias device */
-       if (dev_is_pci(dev)) {
-               tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
-                                          dma_alias & 0xff, NULL, domain);
+       if (req_id != dma_alias && dev_is_pci(dev)) {
+               tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+                                              dma_alias & 0xff, NULL, domain);
 
                if (!tmp || tmp != domain) {
                        domain_exit(domain);
@@ -2378,7 +2381,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        }
 
 found_domain:
-       tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+       tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
 
        if (!tmp || tmp != domain) {
                domain_exit(domain);
@@ -2406,8 +2409,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
                return -ENOMEM;
        }
 
-       pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
-                start, end, domain->id);
+       pr_debug("Mapping reserved region %llx-%llx\n", start, end);
        /*
         * RMRR range might have overlap with physical memory range,
         * clear it first
@@ -2468,11 +2470,6 @@ static int iommu_prepare_identity_map(struct device *dev,
        if (ret)
                goto error;
 
-       /* context entry init */
-       ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
-       if (ret)
-               goto error;
-
        return 0;
 
  error:
@@ -2518,37 +2515,18 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
 static int __init si_domain_init(int hw)
 {
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
        int nid, ret = 0;
-       bool first = true;
 
        si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
        if (!si_domain)
                return -EFAULT;
 
-       for_each_active_iommu(iommu, drhd) {
-               ret = iommu_attach_domain(si_domain, iommu);
-               if (ret < 0) {
-                       domain_exit(si_domain);
-                       return -EFAULT;
-               } else if (first) {
-                       si_domain->id = ret;
-                       first = false;
-               } else if (si_domain->id != ret) {
-                       domain_exit(si_domain);
-                       return -EFAULT;
-               }
-               domain_attach_iommu(si_domain, iommu);
-       }
-
        if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                domain_exit(si_domain);
                return -EFAULT;
        }
 
-       pr_debug("Identity mapping domain is domain %d\n",
-                si_domain->id);
+       pr_debug("Identity mapping domain allocated\n");
 
        if (hw)
                return 0;
@@ -2582,28 +2560,20 @@ static int identity_mapping(struct device *dev)
        return 0;
 }
 
-static int domain_add_dev_info(struct dmar_domain *domain,
-                              struct device *dev, int translation)
+static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
 {
        struct dmar_domain *ndomain;
        struct intel_iommu *iommu;
        u8 bus, devfn;
-       int ret;
 
        iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
                return -ENODEV;
 
-       ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+       ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
        if (ndomain != domain)
                return -EBUSY;
 
-       ret = domain_context_mapping(domain, dev, translation);
-       if (ret) {
-               domain_remove_one_dev_info(domain, dev);
-               return ret;
-       }
-
        return 0;
 }
 
@@ -2743,9 +2713,7 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
        if (!iommu_should_identity_map(dev, 1))
                return 0;
 
-       ret = domain_add_dev_info(si_domain, dev,
-                                 hw ? CONTEXT_TT_PASS_THROUGH :
-                                      CONTEXT_TT_MULTI_LEVEL);
+       ret = domain_add_dev_info(si_domain, dev);
        if (!ret)
                pr_info("%s identity mapping for device %s\n",
                        hw ? "Hardware" : "Software", dev_name(dev));
@@ -2831,15 +2799,18 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
 }
 
 static int copy_context_table(struct intel_iommu *iommu,
-                             struct root_entry *old_re,
+                             struct root_entry __iomem *old_re,
                              struct context_entry **tbl,
                              int bus, bool ext)
 {
-       struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
        int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
+       struct context_entry __iomem *old_ce = NULL;
+       struct context_entry *new_ce = NULL, ce;
+       struct root_entry re;
        phys_addr_t old_ce_phys;
 
        tbl_idx = ext ? bus * 2 : bus;
+       memcpy_fromio(&re, old_re, sizeof(re));
 
        for (devfn = 0; devfn < 256; devfn++) {
                /* First calculate the correct index */
@@ -2859,9 +2830,9 @@ static int copy_context_table(struct intel_iommu *iommu,
 
                        ret = 0;
                        if (devfn < 0x80)
-                               old_ce_phys = root_entry_lctp(old_re);
+                               old_ce_phys = root_entry_lctp(&re);
                        else
-                               old_ce_phys = root_entry_uctp(old_re);
+                               old_ce_phys = root_entry_uctp(&re);
 
                        if (!old_ce_phys) {
                                if (ext && devfn == 0) {
@@ -2886,7 +2857,7 @@ static int copy_context_table(struct intel_iommu *iommu,
                }
 
                /* Now copy the context entry */
-               ce = old_ce[idx];
+               memcpy_fromio(&ce, old_ce + idx, sizeof(ce));
 
                if (!__context_present(&ce))
                        continue;
@@ -2930,8 +2901,8 @@ out:
 
 static int copy_translation_tables(struct intel_iommu *iommu)
 {
+       struct root_entry __iomem *old_rt;
        struct context_entry **ctxt_tbls;
-       struct root_entry *old_rt;
        phys_addr_t old_rt_phys;
        int ctxt_table_entries;
        unsigned long flags;
@@ -3261,7 +3232,6 @@ static struct iova *intel_alloc_iova(struct device *dev,
 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
 {
        struct dmar_domain *domain;
-       int ret;
 
        domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
        if (!domain) {
@@ -3270,16 +3240,6 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
                return NULL;
        }
 
-       /* make sure context mapping is ok */
-       if (unlikely(!domain_context_mapped(dev))) {
-               ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
-               if (ret) {
-                       pr_err("Domain context map for %s failed\n",
-                              dev_name(dev));
-                       return NULL;
-               }
-       }
-
        return domain;
 }
 
@@ -3315,7 +3275,7 @@ static int iommu_no_mapping(struct device *dev)
                         * 32 bit DMA is removed from si_domain and fall back
                         * to non-identity mapping.
                         */
-                       domain_remove_one_dev_info(si_domain, dev);
+                       dmar_remove_one_dev_info(si_domain, dev);
                        pr_info("32bit %s uses non-identity mapping\n",
                                dev_name(dev));
                        return 0;
@@ -3327,10 +3287,7 @@ static int iommu_no_mapping(struct device *dev)
                 */
                if (iommu_should_identity_map(dev, 0)) {
                        int ret;
-                       ret = domain_add_dev_info(si_domain, dev,
-                                                 hw_pass_through ?
-                                                 CONTEXT_TT_PASS_THROUGH :
-                                                 CONTEXT_TT_MULTI_LEVEL);
+                       ret = domain_add_dev_info(si_domain, dev);
                        if (!ret) {
                                pr_info("64bit %s uses identity mapping\n",
                                        dev_name(dev));
@@ -3391,7 +3348,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        /* it's a non-present to present mapping. Only flush if caching mode */
        if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
+               iommu_flush_iotlb_psi(iommu, domain,
+                                     mm_to_dma_pfn(iova->pfn_lo),
+                                     size, 0, 1);
        else
                iommu_flush_write_buffer(iommu);
 
@@ -3442,7 +3401,7 @@ static void flush_unmaps(void)
 
                        /* On real hardware multiple invalidations are expensive */
                        if (cap_caching_mode(iommu->cap))
-                               iommu_flush_iotlb_psi(iommu, domain->id,
+                               iommu_flush_iotlb_psi(iommu, domain,
                                        iova->pfn_lo, iova_size(iova),
                                        !deferred_flush[i].freelist[j], 0);
                        else {
@@ -3526,7 +3485,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
        freelist = domain_unmap(domain, start_pfn, last_pfn);
 
        if (intel_iommu_strict) {
-               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+               iommu_flush_iotlb_psi(iommu, domain, start_pfn,
                                      last_pfn - start_pfn + 1, !freelist, 0);
                /* free iova */
                __free_iova(&domain->iovad, iova);
@@ -3684,7 +3643,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
 
        /* it's a non-present to present mapping. Only flush if caching mode */
        if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
+               iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
        else
                iommu_flush_write_buffer(iommu);
 
@@ -4161,13 +4120,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
        iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
        iommu_enable_translation(iommu);
 
-       if (si_domain) {
-               ret = iommu_attach_domain(si_domain, iommu);
-               if (ret < 0 || si_domain->id != ret)
-                       goto disable_iommu;
-               domain_attach_iommu(si_domain, iommu);
-       }
-
        iommu_disable_protect_mem_regions(iommu);
        return 0;
 
@@ -4329,11 +4281,9 @@ static int device_notifier(struct notifier_block *nb,
        if (!domain)
                return 0;
 
-       down_read(&dmar_global_lock);
-       domain_remove_one_dev_info(domain, dev);
+       dmar_remove_one_dev_info(domain, dev);
        if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
                domain_exit(domain);
-       up_read(&dmar_global_lock);
 
        return 0;
 }
@@ -4390,7 +4340,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
 
                        rcu_read_lock();
                        for_each_active_iommu(iommu, drhd)
-                               iommu_flush_iotlb_psi(iommu, si_domain->id,
+                               iommu_flush_iotlb_psi(iommu, si_domain,
                                        iova->pfn_lo, iova_size(iova),
                                        !freelist, 0);
                        rcu_read_unlock();
@@ -4449,11 +4399,32 @@ static ssize_t intel_iommu_show_ecap(struct device *dev,
 }
 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
 
+static ssize_t intel_iommu_show_ndoms(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       struct intel_iommu *iommu = dev_get_drvdata(dev);
+       return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
+}
+static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
+
+static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       struct intel_iommu *iommu = dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
+                                                 cap_ndoms(iommu->cap)));
+}
+static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
+
 static struct attribute *intel_iommu_attrs[] = {
        &dev_attr_version.attr,
        &dev_attr_address.attr,
        &dev_attr_cap.attr,
        &dev_attr_ecap.attr,
+       &dev_attr_domains_supported.attr,
+       &dev_attr_domains_used.attr,
        NULL,
 };
 
@@ -4533,7 +4504,7 @@ int __init intel_iommu_init(void)
        for_each_active_iommu(iommu, drhd)
                iommu->iommu_dev = iommu_device_create(NULL, iommu,
                                                       intel_iommu_groups,
-                                                      iommu->name);
+                                                      "%s", iommu->name);
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        bus_register_notifier(&pci_bus_type, &device_nb);
@@ -4553,11 +4524,11 @@ out_free_dmar:
        return ret;
 }
 
-static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
 {
        struct intel_iommu *iommu = opaque;
 
-       iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+       domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
        return 0;
 }
 
@@ -4567,63 +4538,50 @@ static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
  * devices, unbinding the driver from any one of them will possibly leave
  * the others unable to operate.
  */
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-                                          struct device *dev)
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
 {
        if (!iommu || !dev || !dev_is_pci(dev))
                return;
 
-       pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
+       pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
 }
 
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-                                      struct device *dev)
+static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
-       struct device_domain_info *info, *tmp;
        struct intel_iommu *iommu;
        unsigned long flags;
-       bool found = false;
-       u8 bus, devfn;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
+       assert_spin_locked(&device_domain_lock);
+
+       if (WARN_ON(!info))
                return;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry_safe(info, tmp, &domain->devices, link) {
-               if (info->iommu == iommu && info->bus == bus &&
-                   info->devfn == devfn) {
-                       unlink_domain_info(info);
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
+       iommu = info->iommu;
 
-                       iommu_disable_dev_iotlb(info);
-                       iommu_detach_dev(iommu, info->bus, info->devfn);
-                       iommu_detach_dependent_devices(iommu, dev);
-                       free_devinfo_mem(info);
+       if (info->dev) {
+               iommu_disable_dev_iotlb(info);
+               domain_context_clear(iommu, info->dev);
+       }
 
-                       spin_lock_irqsave(&device_domain_lock, flags);
+       unlink_domain_info(info);
 
-                       if (found)
-                               break;
-                       else
-                               continue;
-               }
+       spin_lock_irqsave(&iommu->lock, flags);
+       domain_detach_iommu(info->domain, iommu);
+       spin_unlock_irqrestore(&iommu->lock, flags);
 
-               /* if there is no other devices under the same iommu
-                * owned by this domain, clear this iommu in iommu_bmp
-                * update iommu count and coherency
-                */
-               if (info->iommu == iommu)
-                       found = true;
-       }
+       free_devinfo_mem(info);
+}
 
-       spin_unlock_irqrestore(&device_domain_lock, flags);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+                                    struct device *dev)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
 
-       if (found == 0) {
-               domain_detach_iommu(domain, iommu);
-               if (!domain_type_is_vm_or_si(domain))
-                       iommu_detach_domain(domain, iommu);
-       }
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info = dev->archdata.iommu;
+       __dmar_remove_one_dev_info(info);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
 static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4704,10 +4662,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
 
                old_domain = find_domain(dev);
                if (old_domain) {
-                       if (domain_type_is_vm_or_si(dmar_domain))
-                               domain_remove_one_dev_info(old_domain, dev);
-                       else
-                               domain_remove_dev_info(old_domain);
+                       rcu_read_lock();
+                       dmar_remove_one_dev_info(old_domain, dev);
+                       rcu_read_unlock();
 
                        if (!domain_type_is_vm_or_si(old_domain) &&
                             list_empty(&old_domain->devices))
@@ -4747,13 +4704,13 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                dmar_domain->agaw--;
        }
 
-       return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
+       return domain_add_dev_info(dmar_domain, dev);
 }
 
 static void intel_iommu_detach_device(struct iommu_domain *domain,
                                      struct device *dev)
 {
-       domain_remove_one_dev_info(to_dmar_domain(domain), dev);
+       dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
 }
 
 static int intel_iommu_map(struct iommu_domain *domain,
@@ -4802,12 +4759,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
        struct intel_iommu *iommu;
        unsigned long start_pfn, last_pfn;
        unsigned int npages;
-       int iommu_id, num, ndomains, level = 0;
+       int iommu_id, level = 0;
 
        /* Cope with horrid API which requires us to unmap more than the
           size argument if it happens to be a large-page mapping. */
-       if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
-               BUG();
+       BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
 
        if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
                size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -4819,19 +4775,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
 
        npages = last_pfn - start_pfn + 1;
 
-       for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
-               iommu = g_iommus[iommu_id];
-
-               /*
-                * find bit position of dmar_domain
-                */
-               ndomains = cap_ndoms(iommu->cap);
-               for_each_set_bit(num, iommu->domain_ids, ndomains) {
-                       if (iommu->domains[num] == dmar_domain)
-                               iommu_flush_iotlb_psi(iommu, num, start_pfn,
-                                                    npages, !freelist, 0);
-              }
+       for_each_domain_iommu(iommu_id, dmar_domain) {
+               iommu = g_iommus[iommu_id];
 
+               iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+                                     start_pfn, npages, !freelist, 0);
        }
 
        dma_free_pagelist(freelist);
index f15692a410c7e7064e844be2b7d83feee20ef5c5..9ec4e0d94ffd5bdd0be3be7fe6c83ae9ac37e85f 100644 (file)
@@ -384,7 +384,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
 
 static int iommu_load_old_irte(struct intel_iommu *iommu)
 {
-       struct irte *old_ir_table;
+       struct irte __iomem *old_ir_table;
        phys_addr_t irt_phys;
        unsigned int i;
        size_t size;
@@ -413,7 +413,7 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
                return -ENOMEM;
 
        /* Copy data over */
-       memcpy(iommu->ir_table->base, old_ir_table, size);
+       memcpy_fromio(iommu->ir_table->base, old_ir_table, size);
 
        __iommu_flush_cache(iommu, iommu->ir_table->base, size);
 
@@ -426,6 +426,8 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
                        bitmap_set(iommu->ir_table->bitmap, i, 1);
        }
 
+       iounmap(old_ir_table);
+
        return 0;
 }
 
index 4e460216bd1644e5bb8a26ba9a4c3891d7452393..73c07482f48763c5af3f0d43d73a2f04774bb74d 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 
+#include <asm/barrier.h>
+
 #include "io-pgtable.h"
 
 #define ARM_LPAE_MAX_ADDR_BITS         48
@@ -200,20 +202,97 @@ typedef u64 arm_lpae_iopte;
 
 static bool selftest_running = false;
 
+static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+{
+       return phys_to_dma(dev, virt_to_phys(pages));
+}
+
+static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
+                                   struct io_pgtable_cfg *cfg)
+{
+       struct device *dev = cfg->iommu_dev;
+       dma_addr_t dma;
+       void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+
+       if (!pages)
+               return NULL;
+
+       if (!selftest_running) {
+               dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, dma))
+                       goto out_free;
+               /*
+                * We depend on the IOMMU being able to work with any physical
+                * address directly, so if the DMA layer suggests it can't by
+                * giving us back some translation, that bodes very badly...
+                */
+               if (dma != __arm_lpae_dma_addr(dev, pages))
+                       goto out_unmap;
+       }
+
+       return pages;
+
+out_unmap:
+       dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+       dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+       free_pages_exact(pages, size);
+       return NULL;
+}
+
+static void __arm_lpae_free_pages(void *pages, size_t size,
+                                 struct io_pgtable_cfg *cfg)
+{
+       struct device *dev = cfg->iommu_dev;
+
+       if (!selftest_running)
+               dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+                                size, DMA_TO_DEVICE);
+       free_pages_exact(pages, size);
+}
+
+static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
+                              struct io_pgtable_cfg *cfg)
+{
+       struct device *dev = cfg->iommu_dev;
+
+       *ptep = pte;
+
+       if (!selftest_running)
+               dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+                                          sizeof(pte), DMA_TO_DEVICE);
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+                           unsigned long iova, size_t size, int lvl,
+                           arm_lpae_iopte *ptep);
+
 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
                             unsigned long iova, phys_addr_t paddr,
                             arm_lpae_iopte prot, int lvl,
                             arm_lpae_iopte *ptep)
 {
        arm_lpae_iopte pte = prot;
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
-       /* We require an unmap first */
        if (iopte_leaf(*ptep, lvl)) {
+               /* We require an unmap first */
                WARN_ON(!selftest_running);
                return -EEXIST;
+       } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+               /*
+                * We need to unmap and free the old table before
+                * overwriting it with a block entry.
+                */
+               arm_lpae_iopte *tblp;
+               size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+               tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
+               if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+                       return -EINVAL;
        }
 
-       if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
                pte |= ARM_LPAE_PTE_NS;
 
        if (lvl == ARM_LPAE_MAX_LEVELS - 1)
@@ -224,8 +303,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
        pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
        pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
 
-       *ptep = pte;
-       data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+       __arm_lpae_set_pte(ptep, pte, cfg);
        return 0;
 }
 
@@ -234,14 +312,14 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
                          int lvl, arm_lpae_iopte *ptep)
 {
        arm_lpae_iopte *cptep, pte;
-       void *cookie = data->iop.cookie;
        size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
        /* Find our entry at the current level */
        ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
        /* If we can install a leaf entry at this level, then do so */
-       if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+       if (size == block_size && (size & cfg->pgsize_bitmap))
                return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
 
        /* We can't allocate tables at the final level */
@@ -251,18 +329,15 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
        /* Grab a pointer to the next level */
        pte = *ptep;
        if (!pte) {
-               cptep = alloc_pages_exact(1UL << data->pg_shift,
-                                        GFP_ATOMIC | __GFP_ZERO);
+               cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
+                                              GFP_ATOMIC, cfg);
                if (!cptep)
                        return -ENOMEM;
 
-               data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
-                                                cookie);
                pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
-               if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+               if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
                        pte |= ARM_LPAE_PTE_NSTABLE;
-               *ptep = pte;
-               data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+               __arm_lpae_set_pte(ptep, pte, cfg);
        } else {
                cptep = iopte_deref(pte, data);
        }
@@ -309,7 +384,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        arm_lpae_iopte *ptep = data->pgd;
-       int lvl = ARM_LPAE_START_LVL(data);
+       int ret, lvl = ARM_LPAE_START_LVL(data);
        arm_lpae_iopte prot;
 
        /* If no access, then nothing to do */
@@ -317,7 +392,14 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
                return 0;
 
        prot = arm_lpae_prot_to_pte(data, iommu_prot);
-       return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+       ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+       /*
+        * Synchronise all PTE updates for the new mapping before there's
+        * a chance for anything to kick off a table walk for the new iova.
+        */
+       wmb();
+
+       return ret;
 }
 
 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
@@ -347,7 +429,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
                __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
        }
 
-       free_pages_exact(start, table_size);
+       __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
 }
 
 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -366,8 +448,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
        unsigned long blk_start, blk_end;
        phys_addr_t blk_paddr;
        arm_lpae_iopte table = 0;
-       void *cookie = data->iop.cookie;
-       const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
        blk_start = iova & ~(blk_size - 1);
        blk_end = blk_start + blk_size;
@@ -393,10 +474,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
                }
        }
 
-       *ptep = table;
-       tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+       __arm_lpae_set_pte(ptep, table, cfg);
        iova &= ~(blk_size - 1);
-       tlb->tlb_add_flush(iova, blk_size, true, cookie);
+       cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
        return size;
 }
 
@@ -418,13 +498,12 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 
        /* If the size matches this level, we're in the right place */
        if (size == blk_size) {
-               *ptep = 0;
-               tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+               __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
 
                if (!iopte_leaf(pte, lvl)) {
                        /* Also flush any partial walks */
                        tlb->tlb_add_flush(iova, size, false, cookie);
-                       tlb->tlb_sync(data->iop.cookie);
+                       tlb->tlb_sync(cookie);
                        ptep = iopte_deref(pte, data);
                        __arm_lpae_free_pgtable(data, lvl + 1, ptep);
                } else {
@@ -640,11 +719,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_lpae_s1_cfg.mair[1] = 0;
 
        /* Looking good; allocate a pgd */
-       data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
        if (!data->pgd)
                goto out_free_data;
 
-       cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+       /* Ensure the empty pgd is visible before any actual TTBR write */
+       wmb();
 
        /* TTBRs */
        cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
@@ -728,11 +808,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_lpae_s2_cfg.vtcr = reg;
 
        /* Allocate pgd pages */
-       data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
        if (!data->pgd)
                goto out_free_data;
 
-       cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+       /* Ensure the empty pgd is visible before any actual TTBR write */
+       wmb();
 
        /* VTTBR */
        cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
@@ -818,16 +899,10 @@ static void dummy_tlb_sync(void *cookie)
        WARN_ON(cookie != cfg_cookie);
 }
 
-static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
-       WARN_ON(cookie != cfg_cookie);
-}
-
 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
        .tlb_flush_all  = dummy_tlb_flush_all,
        .tlb_add_flush  = dummy_tlb_add_flush,
        .tlb_sync       = dummy_tlb_sync,
-       .flush_pgtable  = dummy_flush_pgtable,
 };
 
 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
index 6436fe24bc2f6fdc0273d6017056a24f964b640e..6f2e319d4f04a58d1174984338c0af9d856e9329 100644 (file)
 
 #include "io-pgtable.h"
 
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
-
 static const struct io_pgtable_init_fns *
 io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
 {
index 10e32f69c6681368cb04023173a4bcf0b859839a..ac9e2341a633ed82420d3d06bd24cec23240b86b 100644 (file)
@@ -17,8 +17,9 @@ enum io_pgtable_fmt {
  *
  * @tlb_flush_all: Synchronously invalidate the entire TLB context.
  * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
- * @tlb_sync:      Ensure any queue TLB invalidation has taken effect.
- * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ * @tlb_sync:      Ensure any queued TLB invalidation has taken effect, and
+ *                 any corresponding page table updates are visible to the
+ *                 IOMMU.
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
@@ -28,7 +29,6 @@ struct iommu_gather_ops {
        void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
                              void *cookie);
        void (*tlb_sync)(void *cookie);
-       void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
 };
 
 /**
@@ -41,6 +41,8 @@ struct iommu_gather_ops {
  * @ias:           Input address (iova) size, in bits.
  * @oas:           Output address (paddr) size, in bits.
  * @tlb:           TLB management callbacks for this set of tables.
+ * @iommu_dev:     The device representing the DMA configuration for the
+ *                 page table walker.
  */
 struct io_pgtable_cfg {
        #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0)        /* Set NS bit in PTEs */
@@ -49,6 +51,7 @@ struct io_pgtable_cfg {
        unsigned int                    ias;
        unsigned int                    oas;
        const struct iommu_gather_ops   *tlb;
+       struct device                   *iommu_dev;
 
        /* Low-level data specific to the table format */
        union {
@@ -140,4 +143,9 @@ struct io_pgtable_init_fns {
        void (*free)(struct io_pgtable *iop);
 };
 
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
 #endif /* __IO_PGTABLE_H */
index 1a67c531a07eb908519e5c130dd22e0ebf377566..8cf605fa9946013642b2a88f500beb285cc55cfc 100644 (file)
@@ -283,24 +283,10 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
        /* The hardware doesn't support selective TLB flush. */
 }
 
-static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
-       unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
-       struct ipmmu_vmsa_domain *domain = cookie;
-
-       /*
-        * TODO: Add support for coherent walk through CCI with DVM and remove
-        * cache handling.
-        */
-       dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
-                    DMA_TO_DEVICE);
-}
-
 static struct iommu_gather_ops ipmmu_gather_ops = {
        .tlb_flush_all = ipmmu_tlb_flush_all,
        .tlb_add_flush = ipmmu_tlb_add_flush,
        .tlb_sync = ipmmu_tlb_flush_all,
-       .flush_pgtable = ipmmu_flush_pgtable,
 };
 
 /* -----------------------------------------------------------------------------
@@ -327,6 +313,11 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
        domain->cfg.ias = 32;
        domain->cfg.oas = 40;
        domain->cfg.tlb = &ipmmu_gather_ops;
+       /*
+        * TODO: Add support for coherent walk through CCI with DVM and remove
+        * cache handling. For now, delegate it to the io-pgtable code.
+        */
+       domain->cfg.iommu_dev = domain->mmu->dev;
 
        domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
                                           domain);
index 2d9993062ded6b2d543c89a9c09c3e6a85dfb17f..913455a5fd40e044e21ec20095296027dad3c1e6 100644 (file)
@@ -84,7 +84,7 @@ void set_irq_remapping_broken(void)
 bool irq_remapping_cap(enum irq_remap_cap cap)
 {
        if (!remap_ops || disable_irq_post)
-               return 0;
+               return false;
 
        return (remap_ops->capability & (1 << cap));
 }
index 43429ab62228a4d0674dadeab740be812af77ff8..60ba238090d92f3eed93de994d5fd7271a68b8ce 100644 (file)
@@ -141,10 +141,12 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
        struct iommu_ops *ops = NULL;
        int idx = 0;
 
-       if (dev_is_pci(dev)) {
-               dev_err(dev, "IOMMU is currently not supported for PCI\n");
+       /*
+        * We can't do much for PCI devices without knowing how
+        * device IDs are wired up from the PCI bus to the IOMMU.
+        */
+       if (dev_is_pci(dev))
                return NULL;
-       }
 
        /*
         * We don't currently walk up the tree looking for a parent IOMMU.
index f3d20a2039d20417093fd70c6192b905e7c3ccf8..0717aa96ce39bd22d1b4fe9e3a82242fa1e5403e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
 #include <linux/debugfs.h>
 #include <linux/platform_data/iommu-omap.h>
 
@@ -29,6 +30,59 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
        return !obj->domain;
 }
 
+#define pr_reg(name)                                                   \
+       do {                                                            \
+               ssize_t bytes;                                          \
+               const char *str = "%20s: %08x\n";                       \
+               const int maxcol = 32;                                  \
+               bytes = snprintf(p, maxcol, str, __stringify(name),     \
+                                iommu_read_reg(obj, MMU_##name));      \
+               p += bytes;                                             \
+               len -= bytes;                                           \
+               if (len < maxcol)                                       \
+                       goto out;                                       \
+       } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+       char *p = buf;
+
+       pr_reg(REVISION);
+       pr_reg(IRQSTATUS);
+       pr_reg(IRQENABLE);
+       pr_reg(WALKING_ST);
+       pr_reg(CNTL);
+       pr_reg(FAULT_AD);
+       pr_reg(TTB);
+       pr_reg(LOCK);
+       pr_reg(LD_TLB);
+       pr_reg(CAM);
+       pr_reg(RAM);
+       pr_reg(GFLUSH);
+       pr_reg(FLUSH_ENTRY);
+       pr_reg(READ_CAM);
+       pr_reg(READ_RAM);
+       pr_reg(EMU_FAULT_AD);
+out:
+       return p - buf;
+}
+
+static ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf,
+                                  ssize_t bytes)
+{
+       if (!obj || !buf)
+               return -EINVAL;
+
+       pm_runtime_get_sync(obj->dev);
+
+       bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
+
+       pm_runtime_put_sync(obj->dev);
+
+       return bytes;
+}
+
 static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
                               size_t count, loff_t *ppos)
 {
@@ -55,34 +109,71 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
        return bytes;
 }
 
-static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
-                             size_t count, loff_t *ppos)
+static int
+__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
 {
-       struct omap_iommu *obj = file->private_data;
-       char *p, *buf;
-       ssize_t bytes, rest;
+       int i;
+       struct iotlb_lock saved;
+       struct cr_regs tmp;
+       struct cr_regs *p = crs;
+
+       pm_runtime_get_sync(obj->dev);
+       iotlb_lock_get(obj, &saved);
+
+       for_each_iotlb_cr(obj, num, i, tmp) {
+               if (!iotlb_cr_valid(&tmp))
+                       continue;
+               *p++ = tmp;
+       }
+
+       iotlb_lock_set(obj, &saved);
+       pm_runtime_put_sync(obj->dev);
+
+       return  p - crs;
+}
+
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+                            struct seq_file *s)
+{
+       return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
+                         (cr->cam & MMU_CAM_P) ? 1 : 0);
+}
+
+static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
+{
+       int i, num;
+       struct cr_regs *cr;
+
+       num = obj->nr_tlb_entries;
+
+       cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
+       if (!cr)
+               return 0;
+
+       num = __dump_tlb_entries(obj, cr, num);
+       for (i = 0; i < num; i++)
+               iotlb_dump_cr(obj, cr + i, s);
+       kfree(cr);
+
+       return 0;
+}
+
+static int debug_read_tlb(struct seq_file *s, void *data)
+{
+       struct omap_iommu *obj = s->private;
 
        if (is_omap_iommu_detached(obj))
                return -EPERM;
 
-       buf = kmalloc(count, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       p = buf;
-
        mutex_lock(&iommu_debug_lock);
 
-       p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
-       p += sprintf(p, "-----------------------------------------\n");
-       rest = count - (p - buf);
-       p += omap_dump_tlb_entries(obj, p, rest);
-
-       bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+       seq_printf(s, "%8s %8s\n", "cam:", "ram:");
+       seq_puts(s, "-----------------------------------------\n");
+       omap_dump_tlb_entries(obj, s);
 
        mutex_unlock(&iommu_debug_lock);
-       kfree(buf);
 
-       return bytes;
+       return 0;
 }
 
 static void dump_ioptable(struct seq_file *s)
@@ -154,10 +245,10 @@ static int debug_read_pagetable(struct seq_file *s, void *data)
                .open = simple_open,                                    \
                .read = debug_read_##name,                              \
                .llseek = generic_file_llseek,                          \
-       };
+       }
 
 DEBUG_FOPS_RO(regs);
-DEBUG_FOPS_RO(tlb);
+DEBUG_SEQ_FOPS_RO(tlb);
 DEBUG_SEQ_FOPS_RO(pagetable);
 
 #define __DEBUG_ADD_FILE(attr, mode)                                   \
index a22c33d6a486c9dea9bfa820feea18a034623eec..36d0033c2ccbfc554b02b8b4183fdbde4ededffe 100644 (file)
@@ -12,7 +12,6 @@
  */
 
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #define to_iommu(dev)                                                  \
        ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
 
-#define for_each_iotlb_cr(obj, n, __i, cr)                             \
-       for (__i = 0;                                                   \
-            (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);   \
-            __i++)
-
 /* bitmap of the page sizes currently supported */
 #define OMAP_IOMMU_PGSIZES     (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
 
@@ -72,11 +66,6 @@ struct omap_iommu_domain {
 #define MMU_LOCK_VICT(x)       \
        ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
 
-struct iotlb_lock {
-       short base;
-       short vict;
-};
-
 static struct platform_driver omap_iommu_driver;
 static struct kmem_cache *iopte_cachep;
 
@@ -213,14 +202,6 @@ static void iommu_disable(struct omap_iommu *obj)
 /*
  *     TLB operations
  */
-static inline int iotlb_cr_valid(struct cr_regs *cr)
-{
-       if (!cr)
-               return -EINVAL;
-
-       return cr->cam & MMU_CAM_V;
-}
-
 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
 {
        u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
@@ -260,7 +241,7 @@ static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
        return status;
 }
 
-static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
 {
        u32 val;
 
@@ -268,10 +249,9 @@ static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
 
        l->base = MMU_LOCK_BASE(val);
        l->vict = MMU_LOCK_VICT(val);
-
 }
 
-static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
 {
        u32 val;
 
@@ -297,7 +277,7 @@ static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
 }
 
 /* only used in iotlb iteration for-loop */
-static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
 {
        struct cr_regs cr;
        struct iotlb_lock l;
@@ -468,129 +448,6 @@ static void flush_iotlb_all(struct omap_iommu *obj)
        pm_runtime_put_sync(obj->dev);
 }
 
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-
-#define pr_reg(name)                                                   \
-       do {                                                            \
-               ssize_t bytes;                                          \
-               const char *str = "%20s: %08x\n";                       \
-               const int maxcol = 32;                                  \
-               bytes = snprintf(p, maxcol, str, __stringify(name),     \
-                                iommu_read_reg(obj, MMU_##name));      \
-               p += bytes;                                             \
-               len -= bytes;                                           \
-               if (len < maxcol)                                       \
-                       goto out;                                       \
-       } while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
-       char *p = buf;
-
-       pr_reg(REVISION);
-       pr_reg(IRQSTATUS);
-       pr_reg(IRQENABLE);
-       pr_reg(WALKING_ST);
-       pr_reg(CNTL);
-       pr_reg(FAULT_AD);
-       pr_reg(TTB);
-       pr_reg(LOCK);
-       pr_reg(LD_TLB);
-       pr_reg(CAM);
-       pr_reg(RAM);
-       pr_reg(GFLUSH);
-       pr_reg(FLUSH_ENTRY);
-       pr_reg(READ_CAM);
-       pr_reg(READ_RAM);
-       pr_reg(EMU_FAULT_AD);
-out:
-       return p - buf;
-}
-
-ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
-       if (!obj || !buf)
-               return -EINVAL;
-
-       pm_runtime_get_sync(obj->dev);
-
-       bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
-
-       pm_runtime_put_sync(obj->dev);
-
-       return bytes;
-}
-
-static int
-__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
-{
-       int i;
-       struct iotlb_lock saved;
-       struct cr_regs tmp;
-       struct cr_regs *p = crs;
-
-       pm_runtime_get_sync(obj->dev);
-       iotlb_lock_get(obj, &saved);
-
-       for_each_iotlb_cr(obj, num, i, tmp) {
-               if (!iotlb_cr_valid(&tmp))
-                       continue;
-               *p++ = tmp;
-       }
-
-       iotlb_lock_set(obj, &saved);
-       pm_runtime_put_sync(obj->dev);
-
-       return  p - crs;
-}
-
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj:       target iommu
- * @cr:                contents of cam and ram register
- * @buf:       output buffer
- **/
-static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
-                            char *buf)
-{
-       char *p = buf;
-
-       /* FIXME: Need more detail analysis of cam/ram */
-       p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
-                                       (cr->cam & MMU_CAM_P) ? 1 : 0);
-
-       return p - buf;
-}
-
-/**
- * omap_dump_tlb_entries - dump cr arrays to given buffer
- * @obj:       target iommu
- * @buf:       output buffer
- **/
-size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
-       int i, num;
-       struct cr_regs *cr;
-       char *p = buf;
-
-       num = bytes / sizeof(*cr);
-       num = min(obj->nr_tlb_entries, num);
-
-       cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
-       if (!cr)
-               return 0;
-
-       num = __dump_tlb_entries(obj, cr, num);
-       for (i = 0; i < num; i++)
-               p += iotlb_dump_cr(obj, cr + i, p);
-       kfree(cr);
-
-       return p - buf;
-}
-
-#endif /* CONFIG_OMAP_IOMMU_DEBUG */
-
 /*
  *     H/W pagetable operations
  */
@@ -930,14 +787,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
 
        if (!iopgd_is_table(*iopgd)) {
                dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
-                               obj->name, errs, da, iopgd, *iopgd);
+                       obj->name, errs, da, iopgd, *iopgd);
                return IRQ_NONE;
        }
 
        iopte = iopte_offset(iopgd, da);
 
        dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
-                       obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
+               obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 
        return IRQ_NONE;
 }
@@ -963,9 +820,8 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
        struct device *dev;
        struct omap_iommu *obj;
 
-       dev = driver_find_device(&omap_iommu_driver.driver, NULL,
-                               (void *)name,
-                               device_match_by_alias);
+       dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+                                device_match_by_alias);
        if (!dev)
                return ERR_PTR(-ENODEV);
 
@@ -1089,7 +945,6 @@ static const struct of_device_id omap_iommu_of_match[] = {
        { .compatible = "ti,dra7-iommu" },
        {},
 };
-MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
 
 static struct platform_driver omap_iommu_driver = {
        .probe  = omap_iommu_probe,
@@ -1121,7 +976,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
 }
 
 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
-                        phys_addr_t pa, size_t bytes, int prot)
+                         phys_addr_t pa, size_t bytes, int prot)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1148,7 +1003,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
 }
 
 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
-                           size_t size)
+                              size_t size)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1199,7 +1054,7 @@ out:
 }
 
 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
-                       struct device *dev)
+                                  struct device *dev)
 {
        struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1220,7 +1075,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
 }
 
 static void omap_iommu_detach_dev(struct iommu_domain *domain,
-                                struct device *dev)
+                                 struct device *dev)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
 
@@ -1237,16 +1092,12 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
                return NULL;
 
        omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
-       if (!omap_domain) {
-               pr_err("kzalloc failed\n");
+       if (!omap_domain)
                goto out;
-       }
 
        omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
-       if (!omap_domain->pgtable) {
-               pr_err("kzalloc failed\n");
+       if (!omap_domain->pgtable)
                goto fail_nomem;
-       }
 
        /*
         * should never fail, but please keep this around to ensure
@@ -1285,7 +1136,7 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
 }
 
 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
-                                         dma_addr_t da)
+                                          dma_addr_t da)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1302,7 +1153,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                        ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
                else
                        dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
-                                                       (unsigned long long)da);
+                               (unsigned long long)da);
        } else {
                if (iopgd_is_section(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
@@ -1310,7 +1161,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                        ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
                else
                        dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
-                                                       (unsigned long long)da);
+                               (unsigned long long)da);
        }
 
        return ret;
@@ -1405,20 +1256,5 @@ static int __init omap_iommu_init(void)
 
        return platform_driver_register(&omap_iommu_driver);
 }
-/* must be ready before omap3isp is probed */
 subsys_initcall(omap_iommu_init);
-
-static void __exit omap_iommu_exit(void)
-{
-       kmem_cache_destroy(iopte_cachep);
-
-       platform_driver_unregister(&omap_iommu_driver);
-
-       omap_iommu_debugfs_exit();
-}
-module_exit(omap_iommu_exit);
-
-MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
-MODULE_ALIAS("platform:omap-iommu");
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_LICENSE("GPL v2");
+/* must be ready before omap3isp is probed */
index d736630df3c8a16a1a853915be83dec437225df0..a656df2f9e03d27b2ad06cbd6f202aaecf2ee5dc 100644 (file)
 #ifndef _OMAP_IOMMU_H
 #define _OMAP_IOMMU_H
 
+#include <linux/bitops.h>
+
+#define for_each_iotlb_cr(obj, n, __i, cr)                             \
+       for (__i = 0;                                                   \
+            (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);   \
+            __i++)
+
 struct iotlb_entry {
        u32 da;
        u32 pa;
        u32 pgsz, prsvd, valid;
-       union {
-               u16 ap;
-               struct {
-                       u32 endian, elsz, mixed;
-               };
-       };
+       u32 endian, elsz, mixed;
 };
 
 struct omap_iommu {
@@ -49,20 +51,13 @@ struct omap_iommu {
 };
 
 struct cr_regs {
-       union {
-               struct {
-                       u16 cam_l;
-                       u16 cam_h;
-               };
-               u32 cam;
-       };
-       union {
-               struct {
-                       u16 ram_l;
-                       u16 ram_h;
-               };
-               u32 ram;
-       };
+       u32 cam;
+       u32 ram;
+};
+
+struct iotlb_lock {
+       short base;
+       short vict;
 };
 
 /**
@@ -103,11 +98,11 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
  * MMU Register bit definitions
  */
 /* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT  (1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
-#define MMU_IRQ_EMUMISS                (1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT       (1 << 1)
-#define MMU_IRQ_TLBMISS                (1 << 0)
+#define MMU_IRQ_MULTIHITFAULT  BIT(4)
+#define MMU_IRQ_TABLEWALKFAULT BIT(3)
+#define MMU_IRQ_EMUMISS                BIT(2)
+#define MMU_IRQ_TRANSLATIONFAULT       BIT(1)
+#define MMU_IRQ_TLBMISS                BIT(0)
 
 #define __MMU_IRQ_FAULT                \
        (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
@@ -119,16 +114,16 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
 /* MMU_CNTL */
 #define MMU_CNTL_SHIFT         1
 #define MMU_CNTL_MASK          (7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB       (1 << 3)
-#define MMU_CNTL_TWL_EN                (1 << 2)
-#define MMU_CNTL_MMU_EN                (1 << 1)
+#define MMU_CNTL_EML_TLB       BIT(3)
+#define MMU_CNTL_TWL_EN                BIT(2)
+#define MMU_CNTL_MMU_EN                BIT(1)
 
 /* CAM */
 #define MMU_CAM_VATAG_SHIFT    12
 #define MMU_CAM_VATAG_MASK \
        ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
-#define MMU_CAM_P              (1 << 3)
-#define MMU_CAM_V              (1 << 2)
+#define MMU_CAM_P              BIT(3)
+#define MMU_CAM_V              BIT(2)
 #define MMU_CAM_PGSZ_MASK      3
 #define MMU_CAM_PGSZ_1M                (0 << 0)
 #define MMU_CAM_PGSZ_64K       (1 << 0)
@@ -141,9 +136,9 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
        ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
 
 #define MMU_RAM_ENDIAN_SHIFT   9
-#define MMU_RAM_ENDIAN_MASK    (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_MASK    BIT(MMU_RAM_ENDIAN_SHIFT)
 #define MMU_RAM_ENDIAN_LITTLE  (0 << MMU_RAM_ENDIAN_SHIFT)
-#define MMU_RAM_ENDIAN_BIG     (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_BIG     BIT(MMU_RAM_ENDIAN_SHIFT)
 
 #define MMU_RAM_ELSZ_SHIFT     7
 #define MMU_RAM_ELSZ_MASK      (3 << MMU_RAM_ELSZ_SHIFT)
@@ -152,7 +147,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
 #define MMU_RAM_ELSZ_32                (2 << MMU_RAM_ELSZ_SHIFT)
 #define MMU_RAM_ELSZ_NONE      (3 << MMU_RAM_ELSZ_SHIFT)
 #define MMU_RAM_MIXED_SHIFT    6
-#define MMU_RAM_MIXED_MASK     (1 << MMU_RAM_MIXED_SHIFT)
+#define MMU_RAM_MIXED_MASK     BIT(MMU_RAM_MIXED_SHIFT)
 #define MMU_RAM_MIXED          MMU_RAM_MIXED_MASK
 
 #define MMU_GP_REG_BUS_ERR_BACK_EN     0x1
@@ -190,12 +185,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
 /*
  * global functions
  */
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-extern ssize_t
-omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
-extern size_t
-omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
 
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n);
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l);
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l);
+
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
 void omap_iommu_debugfs_init(void);
 void omap_iommu_debugfs_exit(void);
 
@@ -222,4 +217,12 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
        __raw_writel(val, obj->regbase + offs);
 }
 
+static inline int iotlb_cr_valid(struct cr_regs *cr)
+{
+       if (!cr)
+               return -EINVAL;
+
+       return cr->cam & MMU_CAM_V;
+}
+
 #endif /* _OMAP_IOMMU_H */
index f891683e3f05af915738151a1a7574268fe84b3b..01a315227bf052d03a0c1f72e6e4e48c6121b201 100644 (file)
  * published by the Free Software Foundation.
  */
 
+#ifndef _OMAP_IOPGTABLE_H
+#define _OMAP_IOPGTABLE_H
+
+#include <linux/bitops.h>
+
 /*
  * "L2 table" address mask and size definitions.
  */
 #define IOPGD_SHIFT            20
-#define IOPGD_SIZE             (1UL << IOPGD_SHIFT)
+#define IOPGD_SIZE             BIT(IOPGD_SHIFT)
 #define IOPGD_MASK             (~(IOPGD_SIZE - 1))
 
 /*
  * "section" address mask and size definitions.
  */
 #define IOSECTION_SHIFT                20
-#define IOSECTION_SIZE         (1UL << IOSECTION_SHIFT)
+#define IOSECTION_SIZE         BIT(IOSECTION_SHIFT)
 #define IOSECTION_MASK         (~(IOSECTION_SIZE - 1))
 
 /*
  * "supersection" address mask and size definitions.
  */
 #define IOSUPER_SHIFT          24
-#define IOSUPER_SIZE           (1UL << IOSUPER_SHIFT)
+#define IOSUPER_SIZE           BIT(IOSUPER_SHIFT)
 #define IOSUPER_MASK           (~(IOSUPER_SIZE - 1))
 
 #define PTRS_PER_IOPGD         (1UL << (32 - IOPGD_SHIFT))
  * "small page" address mask and size definitions.
  */
 #define IOPTE_SHIFT            12
-#define IOPTE_SIZE             (1UL << IOPTE_SHIFT)
+#define IOPTE_SIZE             BIT(IOPTE_SHIFT)
 #define IOPTE_MASK             (~(IOPTE_SIZE - 1))
 
 /*
  * "large page" address mask and size definitions.
  */
 #define IOLARGE_SHIFT          16
-#define IOLARGE_SIZE           (1UL << IOLARGE_SHIFT)
+#define IOLARGE_SIZE           BIT(IOLARGE_SHIFT)
 #define IOLARGE_MASK           (~(IOLARGE_SIZE - 1))
 
 #define PTRS_PER_IOPTE         (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
@@ -69,16 +74,16 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 /*
  * some descriptor attributes.
  */
-#define IOPGD_TABLE            (1 << 0)
-#define IOPGD_SECTION          (2 << 0)
-#define IOPGD_SUPER            (1 << 18 | 2 << 0)
+#define IOPGD_TABLE            (1)
+#define IOPGD_SECTION          (2)
+#define IOPGD_SUPER            (BIT(18) | IOPGD_SECTION)
 
 #define iopgd_is_table(x)      (((x) & 3) == IOPGD_TABLE)
 #define iopgd_is_section(x)    (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
 #define iopgd_is_super(x)      (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
 
-#define IOPTE_SMALL            (2 << 0)
-#define IOPTE_LARGE            (1 << 0)
+#define IOPTE_SMALL            (2)
+#define IOPTE_LARGE            (1)
 
 #define iopte_is_small(x)      (((x) & 2) == IOPTE_SMALL)
 #define iopte_is_large(x)      (((x) & 3) == IOPTE_LARGE)
@@ -93,3 +98,5 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 /* to find an entry in the second-level page table. */
 #define iopte_index(da)                (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
+
+#endif /* _OMAP_IOPGTABLE_H */
index c1f2e521dc52cdb383b528c27d07f0786e4ffc43..9305964250acaf94cef7da5ce7f5a2fb6e78b5ce 100644 (file)
@@ -27,6 +27,7 @@ struct tegra_smmu {
        const struct tegra_smmu_soc *soc;
 
        unsigned long pfn_mask;
+       unsigned long tlb_mask;
 
        unsigned long *asids;
        struct mutex lock;
@@ -40,8 +41,10 @@ struct tegra_smmu_as {
        struct iommu_domain domain;
        struct tegra_smmu *smmu;
        unsigned int use_count;
-       struct page *count;
+       u32 *count;
+       struct page **pts;
        struct page *pd;
+       dma_addr_t pd_dma;
        unsigned id;
        u32 attr;
 };
@@ -68,7 +71,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define SMMU_TLB_CONFIG 0x14
 #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
 #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
-#define  SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
+#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
+       ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
 
 #define SMMU_PTC_CONFIG 0x18
 #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
@@ -79,9 +83,9 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
 
 #define SMMU_PTB_DATA 0x020
-#define  SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
+#define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
 
-#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
+#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
 
 #define SMMU_TLB_FLUSH 0x030
 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
@@ -134,29 +138,49 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define SMMU_PTE_ATTR          (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
                                 SMMU_PTE_NONSECURE)
 
-static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
+static unsigned int iova_pd_index(unsigned long iova)
+{
+       return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
+}
+
+static unsigned int iova_pt_index(unsigned long iova)
+{
+       return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
+}
+
+static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
+{
+       addr >>= 12;
+       return (addr & smmu->pfn_mask) == addr;
+}
+
+static dma_addr_t smmu_pde_to_dma(u32 pde)
+{
+       return pde << 12;
+}
+
+static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
+{
+       smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
+}
+
+static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
                                  unsigned long offset)
 {
-       phys_addr_t phys = page ? page_to_phys(page) : 0;
        u32 value;
 
-       if (page) {
-               offset &= ~(smmu->mc->soc->atom_size - 1);
+       offset &= ~(smmu->mc->soc->atom_size - 1);
 
-               if (smmu->mc->soc->num_address_bits > 32) {
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-                       value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
+       if (smmu->mc->soc->num_address_bits > 32) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
 #else
-                       value = 0;
+               value = 0;
 #endif
-                       smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
-               }
-
-               value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
-       } else {
-               value = SMMU_PTC_FLUSH_TYPE_ALL;
+               smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
        }
 
+       value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
        smmu_writel(smmu, value, SMMU_PTC_FLUSH);
 }
 
@@ -236,8 +260,6 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
 {
        struct tegra_smmu_as *as;
-       unsigned int i;
-       uint32_t *pd;
 
        if (type != IOMMU_DOMAIN_UNMANAGED)
                return NULL;
@@ -248,32 +270,26 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
 
        as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
 
-       as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
+       as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
        if (!as->pd) {
                kfree(as);
                return NULL;
        }
 
-       as->count = alloc_page(GFP_KERNEL);
+       as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
        if (!as->count) {
                __free_page(as->pd);
                kfree(as);
                return NULL;
        }
 
-       /* clear PDEs */
-       pd = page_address(as->pd);
-       SetPageReserved(as->pd);
-
-       for (i = 0; i < SMMU_NUM_PDE; i++)
-               pd[i] = 0;
-
-       /* clear PDE usage counters */
-       pd = page_address(as->count);
-       SetPageReserved(as->count);
-
-       for (i = 0; i < SMMU_NUM_PDE; i++)
-               pd[i] = 0;
+       as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
+       if (!as->pts) {
+               kfree(as->count);
+               __free_page(as->pd);
+               kfree(as);
+               return NULL;
+       }
 
        /* setup aperture */
        as->domain.geometry.aperture_start = 0;
@@ -288,7 +304,6 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
        struct tegra_smmu_as *as = to_smmu_as(domain);
 
        /* TODO: free page directory and page tables */
-       ClearPageReserved(as->pd);
 
        kfree(as);
 }
@@ -376,16 +391,26 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
                return 0;
        }
 
+       as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(smmu->dev, as->pd_dma))
+               return -ENOMEM;
+
+       /* We can't handle 64-bit DMA addresses */
+       if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+               err = -ENOMEM;
+               goto err_unmap;
+       }
+
        err = tegra_smmu_alloc_asid(smmu, &as->id);
        if (err < 0)
-               return err;
+               goto err_unmap;
 
-       smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
-       smmu_flush_ptc(smmu, as->pd, 0);
+       smmu_flush_ptc(smmu, as->pd_dma, 0);
        smmu_flush_tlb_asid(smmu, as->id);
 
        smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
-       value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
+       value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
        smmu_writel(smmu, value, SMMU_PTB_DATA);
        smmu_flush(smmu);
 
@@ -393,6 +418,10 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
        as->use_count++;
 
        return 0;
+
+err_unmap:
+       dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+       return err;
 }
 
 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
@@ -402,6 +431,9 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
                return;
 
        tegra_smmu_free_asid(smmu, as->id);
+
+       dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+
        as->smmu = NULL;
 }
 
@@ -465,96 +497,155 @@ static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *de
        }
 }
 
+static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
+                              u32 value)
+{
+       unsigned int pd_index = iova_pd_index(iova);
+       struct tegra_smmu *smmu = as->smmu;
+       u32 *pd = page_address(as->pd);
+       unsigned long offset = pd_index * sizeof(*pd);
+
+       /* Set the page directory entry first */
+       pd[pd_index] = value;
+
+       /* The flush the page directory entry from caches */
+       dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
+                                        sizeof(*pd), DMA_TO_DEVICE);
+
+       /* And flush the iommu */
+       smmu_flush_ptc(smmu, as->pd_dma, offset);
+       smmu_flush_tlb_section(smmu, as->id, iova);
+       smmu_flush(smmu);
+}
+
+static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+{
+       u32 *pt = page_address(pt_page);
+
+       return pt + iova_pt_index(iova);
+}
+
+static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
+                                 dma_addr_t *dmap)
+{
+       unsigned int pd_index = iova_pd_index(iova);
+       struct page *pt_page;
+       u32 *pd;
+
+       pt_page = as->pts[pd_index];
+       if (!pt_page)
+               return NULL;
+
+       pd = page_address(as->pd);
+       *dmap = smmu_pde_to_dma(pd[pd_index]);
+
+       return tegra_smmu_pte_offset(pt_page, iova);
+}
+
 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
-                      struct page **pagep)
+                      dma_addr_t *dmap)
 {
-       u32 *pd = page_address(as->pd), *pt, *count;
-       u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
-       u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
+       unsigned int pde = iova_pd_index(iova);
        struct tegra_smmu *smmu = as->smmu;
-       struct page *page;
-       unsigned int i;
 
-       if (pd[pde] == 0) {
-               page = alloc_page(GFP_KERNEL | __GFP_DMA);
+       if (!as->pts[pde]) {
+               struct page *page;
+               dma_addr_t dma;
+
+               page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
                if (!page)
                        return NULL;
 
-               pt = page_address(page);
-               SetPageReserved(page);
+               dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
+                                  DMA_TO_DEVICE);
+               if (dma_mapping_error(smmu->dev, dma)) {
+                       __free_page(page);
+                       return NULL;
+               }
 
-               for (i = 0; i < SMMU_NUM_PTE; i++)
-                       pt[i] = 0;
+               if (!smmu_dma_addr_valid(smmu, dma)) {
+                       dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
+                                      DMA_TO_DEVICE);
+                       __free_page(page);
+                       return NULL;
+               }
 
-               smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
+               as->pts[pde] = page;
 
-               pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
+               tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
+                                                             SMMU_PDE_NEXT));
 
-               smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
-               smmu_flush_ptc(smmu, as->pd, pde << 2);
-               smmu_flush_tlb_section(smmu, as->id, iova);
-               smmu_flush(smmu);
+               *dmap = dma;
        } else {
-               page = pfn_to_page(pd[pde] & smmu->pfn_mask);
-               pt = page_address(page);
+               u32 *pd = page_address(as->pd);
+
+               *dmap = smmu_pde_to_dma(pd[pde]);
        }
 
-       *pagep = page;
+       return tegra_smmu_pte_offset(as->pts[pde], iova);
+}
 
-       /* Keep track of entries in this page table. */
-       count = page_address(as->count);
-       if (pt[pte] == 0)
-               count[pde]++;
+static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
+{
+       unsigned int pd_index = iova_pd_index(iova);
 
-       return &pt[pte];
+       as->count[pd_index]++;
 }
 
-static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
+static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
 {
-       u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
-       u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
-       u32 *count = page_address(as->count);
-       u32 *pd = page_address(as->pd), *pt;
-       struct page *page;
-
-       page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
-       pt = page_address(page);
+       unsigned int pde = iova_pd_index(iova);
+       struct page *page = as->pts[pde];
 
        /*
         * When no entries in this page table are used anymore, return the
         * memory page to the system.
         */
-       if (pt[pte] != 0) {
-               if (--count[pde] == 0) {
-                       ClearPageReserved(page);
-                       __free_page(page);
-                       pd[pde] = 0;
-               }
+       if (--as->count[pde] == 0) {
+               struct tegra_smmu *smmu = as->smmu;
+               u32 *pd = page_address(as->pd);
+               dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+
+               tegra_smmu_set_pde(as, iova, 0);
 
-               pt[pte] = 0;
+               dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
+               __free_page(page);
+               as->pts[pde] = NULL;
        }
 }
 
+static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
+                              u32 *pte, dma_addr_t pte_dma, u32 val)
+{
+       struct tegra_smmu *smmu = as->smmu;
+       unsigned long offset = offset_in_page(pte);
+
+       *pte = val;
+
+       dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
+                                        4, DMA_TO_DEVICE);
+       smmu_flush_ptc(smmu, pte_dma, offset);
+       smmu_flush_tlb_group(smmu, as->id, iova);
+       smmu_flush(smmu);
+}
+
 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
                          phys_addr_t paddr, size_t size, int prot)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct tegra_smmu *smmu = as->smmu;
-       unsigned long offset;
-       struct page *page;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = as_get_pte(as, iova, &page);
+       pte = as_get_pte(as, iova, &pte_dma);
        if (!pte)
                return -ENOMEM;
 
-       *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
-       offset = offset_in_page(pte);
+       /* If we aren't overwriting a pre-existing entry, increment use */
+       if (*pte == 0)
+               tegra_smmu_pte_get_use(as, iova);
 
-       smmu->soc->ops->flush_dcache(page, offset, 4);
-       smmu_flush_ptc(smmu, page, offset);
-       smmu_flush_tlb_group(smmu, as->id, iova);
-       smmu_flush(smmu);
+       tegra_smmu_set_pte(as, iova, pte, pte_dma,
+                          __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
 
        return 0;
 }
@@ -563,22 +654,15 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
                               size_t size)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct tegra_smmu *smmu = as->smmu;
-       unsigned long offset;
-       struct page *page;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = as_get_pte(as, iova, &page);
-       if (!pte)
+       pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+       if (!pte || !*pte)
                return 0;
 
-       offset = offset_in_page(pte);
-       as_put_pte(as, iova);
-
-       smmu->soc->ops->flush_dcache(page, offset, 4);
-       smmu_flush_ptc(smmu, page, offset);
-       smmu_flush_tlb_group(smmu, as->id, iova);
-       smmu_flush(smmu);
+       tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
+       tegra_smmu_pte_put_use(as, iova);
 
        return size;
 }
@@ -587,11 +671,14 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct page *page;
        unsigned long pfn;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = as_get_pte(as, iova, &page);
+       pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+       if (!pte || !*pte)
+               return 0;
+
        pfn = *pte & as->smmu->pfn_mask;
 
        return PFN_PHYS(pfn);
@@ -816,6 +903,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
        smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
        dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
                mc->soc->num_address_bits, smmu->pfn_mask);
+       smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+       dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
+               smmu->tlb_mask);
 
        value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
 
@@ -825,14 +915,14 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
        smmu_writel(smmu, value, SMMU_PTC_CONFIG);
 
        value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
-               SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
+               SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
 
        if (soc->supports_round_robin_arbitration)
                value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
 
        smmu_writel(smmu, value, SMMU_TLB_CONFIG);
 
-       smmu_flush_ptc(smmu, NULL, 0);
+       smmu_flush_ptc_all(smmu);
        smmu_flush_tlb(smmu);
        smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
        smmu_flush(smmu);
index 692fe2bc81979b6b48f984ec6d88c117fbfdebdd..c12bb93334ff99e7a5ddc72d274b00c4eeb9c24c 100644 (file)
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
        .irq_mask               = irq_chip_mask_parent,
        .irq_unmask             = irq_chip_unmask_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .irq_set_wake           = irq_chip_set_wake_parent,
+       .irq_set_type           = irq_chip_set_type_parent,
+       .flags                  = IRQCHIP_MASK_ON_SUSPEND |
+                                 IRQCHIP_SKIP_SET_WAKE,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
index 32814371b8d304539a2eb1cfece077510b4ca394..aa1b41ca40f778dcb4e6c0e393ab4ee33d25d388 100644 (file)
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
index 48a4a826ae07649419d033b99c564b2adb9da6ea..200366c62231dd5f8f38f74284a42810c8603d19 100644 (file)
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
index 48dfe3c4d6aa7968bbc1986eafcb9e965fe55950..6ba47cfb1443748ccf092819a6a5ef160bb856fd 100644 (file)
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
                return r;
 
        disk_super = dm_block_data(copy);
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+       dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+       dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
        dm_sm_dec_block(pmd->metadata_sm, held_root);
 
        return dm_tm_unlock(pmd->tm, copy);
index bf2b80d5c4707a64210b5e57deb785069dc7d921..8731b6ea026bd9b8cfbe2a21bbc06366e509181c 100644 (file)
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
 
 extern struct dm_block_validator btree_node_validator;
 
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+                          struct dm_btree_value_type *vt);
+
 #endif /* DM_BTREE_INTERNAL_H */
index 9ca9eccd512fd99f8116a20ab1b3ed67d821cb6d..4222f774cf369b1eb1b031bd652854c573b224af 100644 (file)
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
        return r;
 }
 
-static struct dm_btree_value_type le64_type = {
-       .context = NULL,
-       .size = sizeof(__le64),
-       .inc = NULL,
-       .dec = NULL,
-       .equal = NULL
-};
-
 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
                    uint64_t *keys, dm_block_t *new_root)
 {
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
        int index = 0, r = 0;
        struct shadow_spine spine;
        struct btree_node *n;
+       struct dm_btree_value_type le64_vt;
 
+       init_le64_type(info->tm, &le64_vt);
        init_shadow_spine(&spine, info);
        for (level = 0; level < info->levels; level++) {
                r = remove_raw(&spine, info,
                               (level == last_level ?
-                               &info->value_type : &le64_type),
+                               &info->value_type : &le64_vt),
                               root, keys[level], (unsigned *)&index);
                if (r < 0)
                        break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
        int index = 0, r = 0;
        struct shadow_spine spine;
        struct btree_node *n;
+       struct dm_btree_value_type le64_vt;
        uint64_t k;
 
+       init_le64_type(info->tm, &le64_vt);
        init_shadow_spine(&spine, info);
        for (level = 0; level < last_level; level++) {
-               r = remove_raw(&spine, info, &le64_type,
+               r = remove_raw(&spine, info, &le64_vt,
                               root, keys[level], (unsigned *) &index);
                if (r < 0)
                        goto out;
index 1b5e13ec7f96a670ed7a9b5b472a5d2ee95a7dff..0dee514ba4c5f9e8d34d16e9d239ef333395c7d4 100644 (file)
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
 {
        return s->root;
 }
+
+static void le64_inc(void *context, const void *value_le)
+{
+       struct dm_transaction_manager *tm = context;
+       __le64 v_le;
+
+       memcpy(&v_le, value_le, sizeof(v_le));
+       dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+       struct dm_transaction_manager *tm = context;
+       __le64 v_le;
+
+       memcpy(&v_le, value_le, sizeof(v_le));
+       dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+       __le64 v1_le, v2_le;
+
+       memcpy(&v1_le, value1_le, sizeof(v1_le));
+       memcpy(&v2_le, value2_le, sizeof(v2_le));
+       return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+                   struct dm_btree_value_type *vt)
+{
+       vt->context = tm;
+       vt->size = sizeof(__le64);
+       vt->inc = le64_inc;
+       vt->dec = le64_dec;
+       vt->equal = le64_equal;
+}
index fdd3793e22f957ef08db71f897607c68ce6eb6a3..c7726cebc4950c24cb6f6f2b7cacfa6465bddeb6 100644 (file)
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
        struct btree_node *n;
        struct dm_btree_value_type le64_type;
 
-       le64_type.context = NULL;
-       le64_type.size = sizeof(__le64);
-       le64_type.inc = NULL;
-       le64_type.dec = NULL;
-       le64_type.equal = NULL;
-
+       init_le64_type(info->tm, &le64_type);
        init_shadow_spine(&spine, info);
 
        for (level = 0; level < (info->levels - 1); level++) {
index 0d35f5850ff1ea8e9910168f55fe04f05c42600e..5ab90f36a6a687c3c64edd56f09d3f8708dba635 100644 (file)
@@ -240,7 +240,7 @@ config DVB_SI21XX
 
 config DVB_TS2020
        tristate "Montage Tehnology TS2020 based tuners"
-       depends on DVB_CORE
+       depends on DVB_CORE && I2C
        select REGMAP_I2C
        default m if !MEDIA_SUBDRV_AUTOSELECT
        help
index 3be1b2c3c3860ec48c2e9a1e5e5cd41daf56046f..6a1c0089bb6279c6660b6154681c1c7cdeb129d4 100644 (file)
@@ -2,6 +2,7 @@ config VIDEO_COBALT
        tristate "Cisco Cobalt support"
        depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
        depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
+       depends on SND
        select I2C_ALGOBIT
        select VIDEO_ADV7604
        select VIDEO_ADV7511
index dd4bff9cf3390a4f167c6e5d1a9ae4886dae9750..d1f5898d11ba1c046737b2bb2a7ba48fa7547b9d 100644 (file)
@@ -139,7 +139,7 @@ done:
           also know about dropped frames. */
        cb->vb.v4l2_buf.sequence = s->sequence++;
        vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
-                       VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
+                       VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
 }
 
 irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
index 1d59c7e039f7d41e989dc986cf6ac09815165747..87990ece5848957baf4af40dd5100f356466b901 100644 (file)
@@ -130,10 +130,11 @@ err:
 
 int mantis_dma_init(struct mantis_pci *mantis)
 {
-       int err = 0;
+       int err;
 
        dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
-       if (mantis_alloc_buffers(mantis) < 0) {
+       err = mantis_alloc_buffers(mantis);
+       if (err < 0) {
                dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
 
                /* Stop RISC Engine */
index 8939ebd7439198161935f573652f25babb438840..84fa6e9b59a1acd9360364fbd30ca0a4e00d3a56 100644 (file)
@@ -184,125 +184,9 @@ out:
        return -EINVAL;
 }
 
-static struct ir_raw_timings_manchester ir_rc5_timings = {
-       .leader                 = RC5_UNIT,
-       .pulse_space_start      = 0,
-       .clock                  = RC5_UNIT,
-       .trailer_space          = RC5_UNIT * 10,
-};
-
-static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
-       {
-               .leader                 = RC5_UNIT,
-               .pulse_space_start      = 0,
-               .clock                  = RC5_UNIT,
-               .trailer_space          = RC5X_SPACE,
-       },
-       {
-               .clock                  = RC5_UNIT,
-               .trailer_space          = RC5_UNIT * 10,
-       },
-};
-
-static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
-       .leader                         = RC5_UNIT,
-       .pulse_space_start              = 0,
-       .clock                          = RC5_UNIT,
-       .trailer_space                  = RC5_UNIT * 10,
-};
-
-static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
-                                 unsigned int important_bits)
-{
-       /* all important bits of scancode should be set in mask */
-       if (~scancode->mask & important_bits)
-               return -EINVAL;
-       /* extra bits in mask should be zero in data */
-       if (scancode->mask & scancode->data & ~important_bits)
-               return -EINVAL;
-       return 0;
-}
-
-/**
- * ir_rc5_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode:  scancode filter describing scancode (helps distinguish between
- *             protocol subtypes when scancode is ambiguous)
- * @events:    array of raw ir events to write into
- * @max:       maximum size of @events
- *
- * Returns:    The number of events written.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             encoding. In this case all @max events will have been written.
- *             -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc5_encode(u64 protocols,
-                        const struct rc_scancode_filter *scancode,
-                        struct ir_raw_event *events, unsigned int max)
-{
-       int ret;
-       struct ir_raw_event *e = events;
-       unsigned int data, xdata, command, commandx, system;
-
-       /* Detect protocol and convert scancode to raw data */
-       if (protocols & RC_BIT_RC5 &&
-           !ir_rc5_validate_filter(scancode, 0x1f7f)) {
-               /* decode scancode */
-               command  = (scancode->data & 0x003f) >> 0;
-               commandx = (scancode->data & 0x0040) >> 6;
-               system   = (scancode->data & 0x1f00) >> 8;
-               /* encode data */
-               data = !commandx << 12 | system << 6 | command;
-
-               /* Modulate the data */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
-                                           data);
-               if (ret < 0)
-                       return ret;
-       } else if (protocols & RC_BIT_RC5X &&
-                  !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
-               /* decode scancode */
-               xdata    = (scancode->data & 0x00003f) >> 0;
-               command  = (scancode->data & 0x003f00) >> 8;
-               commandx = (scancode->data & 0x004000) >> 14;
-               system   = (scancode->data & 0x1f0000) >> 16;
-               /* commandx and system overlap, bits must match when encoded */
-               if (commandx == (system & 0x1))
-                       return -EINVAL;
-               /* encode data */
-               data = 1 << 18 | system << 12 | command << 6 | xdata;
-
-               /* Modulate the data */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
-                                       CHECK_RC5X_NBITS,
-                                       data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
-               if (ret < 0)
-                       return ret;
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                       &ir_rc5x_timings[1],
-                                       RC5X_NBITS - CHECK_RC5X_NBITS,
-                                       data);
-               if (ret < 0)
-                       return ret;
-       } else if (protocols & RC_BIT_RC5_SZ &&
-                  !ir_rc5_validate_filter(scancode, 0x2fff)) {
-               /* RC5-SZ scancode is raw enough for Manchester as it is */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
-                                       RC5_SZ_NBITS, scancode->data & 0x2fff);
-               if (ret < 0)
-                       return ret;
-       } else {
-               return -EINVAL;
-       }
-
-       return e - events;
-}
-
 static struct ir_raw_handler rc5_handler = {
        .protocols      = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
        .decode         = ir_rc5_decode,
-       .encode         = ir_rc5_encode,
 };
 
 static int __init ir_rc5_decode_init(void)
index f9c70baf6e0cb1f071f0a6f7e2ac9e81cf131817..d16bc67af732251998fb280b86d887835cf39e1b 100644 (file)
@@ -291,133 +291,11 @@ out:
        return -EINVAL;
 }
 
-static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
-       {
-               .leader                 = RC6_PREFIX_PULSE,
-               .pulse_space_start      = 0,
-               .clock                  = RC6_UNIT,
-               .invert                 = 1,
-               .trailer_space          = RC6_PREFIX_SPACE,
-       },
-       {
-               .clock                  = RC6_UNIT,
-               .invert                 = 1,
-       },
-       {
-               .clock                  = RC6_UNIT * 2,
-               .invert                 = 1,
-       },
-       {
-               .clock                  = RC6_UNIT,
-               .invert                 = 1,
-               .trailer_space          = RC6_SUFFIX_SPACE,
-       },
-};
-
-static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
-                                 unsigned int important_bits)
-{
-       /* all important bits of scancode should be set in mask */
-       if (~scancode->mask & important_bits)
-               return -EINVAL;
-       /* extra bits in mask should be zero in data */
-       if (scancode->mask & scancode->data & ~important_bits)
-               return -EINVAL;
-       return 0;
-}
-
-/**
- * ir_rc6_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode:  scancode filter describing scancode (helps distinguish between
- *             protocol subtypes when scancode is ambiguous)
- * @events:    array of raw ir events to write into
- * @max:       maximum size of @events
- *
- * Returns:    The number of events written.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             encoding. In this case all @max events will have been written.
- *             -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc6_encode(u64 protocols,
-                        const struct rc_scancode_filter *scancode,
-                        struct ir_raw_event *events, unsigned int max)
-{
-       int ret;
-       struct ir_raw_event *e = events;
-
-       if (protocols & RC_BIT_RC6_0 &&
-           !ir_rc6_validate_filter(scancode, 0xffff)) {
-
-               /* Modulate the preamble */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate the header (Start Bit & Mode-0) */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[1],
-                                           RC6_HEADER_NBITS, (1 << 3));
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate Trailer Bit */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[2], 1, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate rest of the data */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[3], RC6_0_NBITS,
-                                           scancode->data);
-               if (ret < 0)
-                       return ret;
-
-       } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
-                               RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
-                  !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
-
-               /* Modulate the preamble */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate the header (Start Bit & Header-version 6 */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[1],
-                                           RC6_HEADER_NBITS, (1 << 3 | 6));
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate Trailer Bit */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[2], 1, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate rest of the data */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[3],
-                                           fls(scancode->mask),
-                                           scancode->data);
-               if (ret < 0)
-                       return ret;
-
-       } else {
-               return -EINVAL;
-       }
-
-       return e - events;
-}
-
 static struct ir_raw_handler rc6_handler = {
        .protocols      = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
                          RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
                          RC_BIT_RC6_MCE,
        .decode         = ir_rc6_decode,
-       .encode         = ir_rc6_encode,
 };
 
 static int __init ir_rc6_decode_init(void)
index baeb5971fd52cecca1a01f436713f9f3a77c65ca..85af7a8691677a9b3965f469f37bc49b741de6b7 100644 (file)
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
        return 0;
 }
 
-static int nvt_write_wakeup_codes(struct rc_dev *dev,
-                                 const u8 *wakeup_sample_buf, int count)
-{
-       int i = 0;
-       u8 reg, reg_learn_mode;
-       unsigned long flags;
-       struct nvt_dev *nvt = dev->priv;
-
-       nvt_dbg_wake("writing wakeup samples");
-
-       reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
-       reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
-       reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
-
-       /* Lock the learn area to prevent racing with wake-isr */
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-
-       /* Enable fifo writes */
-       nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
-
-       /* Clear cir wake rx fifo */
-       nvt_clear_cir_wake_fifo(nvt);
-
-       if (count > WAKE_FIFO_LEN) {
-               nvt_dbg_wake("HW FIFO too small for all wake samples");
-               count = WAKE_FIFO_LEN;
-       }
-
-       if (count)
-               pr_info("Wake samples (%d) =", count);
-       else
-               pr_info("Wake sample fifo cleared");
-
-       /* Write wake samples to fifo */
-       for (i = 0; i < count; i++) {
-               pr_cont(" %02x", wakeup_sample_buf[i]);
-               nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
-                                      CIR_WAKE_WR_FIFO_DATA);
-       }
-       pr_cont("\n");
-
-       /* Switch cir to wakeup mode and disable fifo writing */
-       nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
-
-       /* Set number of bytes needed for wake */
-       nvt_cir_wake_reg_write(nvt, count ? count :
-                              CIR_WAKE_FIFO_CMP_BYTES,
-                              CIR_WAKE_FIFO_CMP_DEEP);
-
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
-       return 0;
-}
-
-static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
-                                       struct rc_scancode_filter *sc_filter)
-{
-       u8 *reg_buf;
-       u8 buf_val;
-       int i, ret, count;
-       unsigned int val;
-       struct ir_raw_event *raw;
-       bool complete;
-
-       /* Require both mask and data to be set before actually committing */
-       if (!sc_filter->mask || !sc_filter->data)
-               return 0;
-
-       raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
-       if (!raw)
-               return -ENOMEM;
-
-       ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
-                                    raw, WAKE_FIFO_LEN);
-       complete = (ret != -ENOBUFS);
-       if (!complete)
-               ret = WAKE_FIFO_LEN;
-       else if (ret < 0)
-               goto out_raw;
-
-       reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
-       if (!reg_buf) {
-               ret = -ENOMEM;
-               goto out_raw;
-       }
-
-       /* Inspect the ir samples */
-       for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
-               val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
-
-               /* Split too large values into several smaller ones */
-               while (val > 0 && count < WAKE_FIFO_LEN) {
-
-                       /* Skip last value for better comparison tolerance */
-                       if (complete && i == ret - 1 && val < BUF_LEN_MASK)
-                               break;
-
-                       /* Clamp values to BUF_LEN_MASK at most */
-                       buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
-
-                       reg_buf[count] = buf_val;
-                       val -= buf_val;
-                       if ((raw[i]).pulse)
-                               reg_buf[count] |= BUF_PULSE_BIT;
-                       count++;
-               }
-       }
-
-       ret = nvt_write_wakeup_codes(dev, reg_buf, count);
-
-       kfree(reg_buf);
-out_raw:
-       kfree(raw);
-
-       return ret;
-}
-
-/* Dummy implementation. nuvoton is agnostic to the protocol used */
-static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
-                                            u64 *rc_type)
-{
-       return 0;
-}
-
 /*
  * nvt_tx_ir
  *
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        /* Set up the rc device */
        rdev->priv = nvt;
        rdev->driver_type = RC_DRIVER_IR_RAW;
-       rdev->encode_wakeup = true;
        rdev->allowed_protocols = RC_BIT_ALL;
        rdev->open = nvt_open;
        rdev->close = nvt_close;
        rdev->tx_ir = nvt_tx_ir;
        rdev->s_tx_carrier = nvt_set_tx_carrier;
-       rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
-       rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
        rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
        rdev->input_phys = "nuvoton/cir0";
        rdev->input_id.bustype = BUS_HOST;
index 9d0e161c2a886c511d3fde3928b154b0cfb7f0db..e1cf23c3875b16ead52464d4e6ad3c479fd1e951 100644 (file)
@@ -63,7 +63,6 @@ static int debug;
  */
 #define TX_BUF_LEN 256
 #define RX_BUF_LEN 32
-#define WAKE_FIFO_LEN 67
 
 struct nvt_dev {
        struct pnp_dev *pdev;
index 4b994aa2f2a7d1f763fd5bb7eccfb95f8ba1b6d6..b68d4f76273448fcbc98fc1e215a6656c9b6c6ec 100644 (file)
@@ -25,8 +25,6 @@ struct ir_raw_handler {
 
        u64 protocols; /* which are handled by this handler */
        int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
-       int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
-                     struct ir_raw_event *events, unsigned int max);
 
        /* These two should only be used by the lirc decoder */
        int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
 #define TO_US(duration)                        DIV_ROUND_CLOSEST((duration), 1000)
 #define TO_STR(is_pulse)               ((is_pulse) ? "pulse" : "space")
 
-/* functions for IR encoders */
-
-static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
-                                             unsigned int pulse,
-                                             u32 duration)
-{
-       init_ir_raw_event(ev);
-       ev->duration = duration;
-       ev->pulse = pulse;
-}
-
-/**
- * struct ir_raw_timings_manchester - Manchester coding timings
- * @leader:            duration of leader pulse (if any) 0 if continuing
- *                     existing signal (see @pulse_space_start)
- * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
- * @clock:             duration of each pulse/space in ns
- * @invert:            if set clock logic is inverted
- *                     (0 = space + pulse, 1 = pulse + space)
- * @trailer_space:     duration of trailer space in ns
- */
-struct ir_raw_timings_manchester {
-       unsigned int leader;
-       unsigned int pulse_space_start:1;
-       unsigned int clock;
-       unsigned int invert:1;
-       unsigned int trailer_space;
-};
-
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
-                         const struct ir_raw_timings_manchester *timings,
-                         unsigned int n, unsigned int data);
-
 /*
  * Routines from rc-raw.c to be used internally and by decoders
  */
 u64 ir_raw_get_allowed_protocols(void);
-u64 ir_raw_get_encode_protocols(void);
 int ir_raw_event_register(struct rc_dev *dev);
 void ir_raw_event_unregister(struct rc_dev *dev);
 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
index b9e4645c731c087f3dea99fa652fe09ef19dc8de..b732ac6a26d8065cc26ecb3a648ced999db71367 100644 (file)
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
 static DEFINE_MUTEX(ir_raw_handler_lock);
 static LIST_HEAD(ir_raw_handler_list);
 static u64 available_protocols;
-static u64 encode_protocols;
 
 static int ir_raw_event_thread(void *data)
 {
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
        return protocols;
 }
 
-/* used internally by the sysfs interface */
-u64
-ir_raw_get_encode_protocols(void)
-{
-       u64 protocols;
-
-       mutex_lock(&ir_raw_handler_lock);
-       protocols = encode_protocols;
-       mutex_unlock(&ir_raw_handler_lock);
-       return protocols;
-}
-
 static int change_protocol(struct rc_dev *dev, u64 *rc_type)
 {
        /* the caller will update dev->enabled_protocols */
        return 0;
 }
 
-/**
- * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
- * @ev:                Pointer to pointer to next free event. *@ev is incremented for
- *             each raw event filled.
- * @max:       Maximum number of raw events to fill.
- * @timings:   Manchester modulation timings.
- * @n:         Number of bits of data.
- * @data:      Data bits to encode.
- *
- * Encodes the @n least significant bits of @data using Manchester (bi-phase)
- * modulation with the timing characteristics described by @timings, writing up
- * to @max raw IR events using the *@ev pointer.
- *
- * Returns:    0 on success.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             full encoded data. In this case all @max events will have been
- *             written.
- */
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
-                         const struct ir_raw_timings_manchester *timings,
-                         unsigned int n, unsigned int data)
-{
-       bool need_pulse;
-       unsigned int i;
-       int ret = -ENOBUFS;
-
-       i = 1 << (n - 1);
-
-       if (timings->leader) {
-               if (!max--)
-                       return ret;
-               if (timings->pulse_space_start) {
-                       init_ir_raw_event_duration((*ev)++, 1, timings->leader);
-
-                       if (!max--)
-                               return ret;
-                       init_ir_raw_event_duration((*ev), 0, timings->leader);
-               } else {
-                       init_ir_raw_event_duration((*ev), 1, timings->leader);
-               }
-               i >>= 1;
-       } else {
-               /* continue existing signal */
-               --(*ev);
-       }
-       /* from here on *ev will point to the last event rather than the next */
-
-       while (n && i > 0) {
-               need_pulse = !(data & i);
-               if (timings->invert)
-                       need_pulse = !need_pulse;
-               if (need_pulse == !!(*ev)->pulse) {
-                       (*ev)->duration += timings->clock;
-               } else {
-                       if (!max--)
-                               goto nobufs;
-                       init_ir_raw_event_duration(++(*ev), need_pulse,
-                                                  timings->clock);
-               }
-
-               if (!max--)
-                       goto nobufs;
-               init_ir_raw_event_duration(++(*ev), !need_pulse,
-                                          timings->clock);
-               i >>= 1;
-       }
-
-       if (timings->trailer_space) {
-               if (!(*ev)->pulse)
-                       (*ev)->duration += timings->trailer_space;
-               else if (!max--)
-                       goto nobufs;
-               else
-                       init_ir_raw_event_duration(++(*ev), 0,
-                                                  timings->trailer_space);
-       }
-
-       ret = 0;
-nobufs:
-       /* point to the next event rather than last event before returning */
-       ++(*ev);
-       return ret;
-}
-EXPORT_SYMBOL(ir_raw_gen_manchester);
-
-/**
- * ir_raw_encode_scancode() - Encode a scancode as raw events
- *
- * @protocols:         permitted protocols
- * @scancode:          scancode filter describing a single scancode
- * @events:            array of raw events to write into
- * @max:               max number of raw events
- *
- * Attempts to encode the scancode as raw events.
- *
- * Returns:    The number of events written.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             encoding. In this case all @max events will have been written.
- *             -EINVAL if the scancode is ambiguous or invalid, or if no
- *             compatible encoder was found.
- */
-int ir_raw_encode_scancode(u64 protocols,
-                          const struct rc_scancode_filter *scancode,
-                          struct ir_raw_event *events, unsigned int max)
-{
-       struct ir_raw_handler *handler;
-       int ret = -EINVAL;
-
-       mutex_lock(&ir_raw_handler_lock);
-       list_for_each_entry(handler, &ir_raw_handler_list, list) {
-               if (handler->protocols & protocols && handler->encode) {
-                       ret = handler->encode(protocols, scancode, events, max);
-                       if (ret >= 0 || ret == -ENOBUFS)
-                               break;
-               }
-       }
-       mutex_unlock(&ir_raw_handler_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(ir_raw_encode_scancode);
-
 /*
  * Used to (un)register raw event clients
  */
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
                list_for_each_entry(raw, &ir_raw_client_list, list)
                        ir_raw_handler->raw_register(raw->dev);
        available_protocols |= ir_raw_handler->protocols;
-       if (ir_raw_handler->encode)
-               encode_protocols |= ir_raw_handler->protocols;
        mutex_unlock(&ir_raw_handler_lock);
 
        return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
                list_for_each_entry(raw, &ir_raw_client_list, list)
                        ir_raw_handler->raw_unregister(raw->dev);
        available_protocols &= ~ir_raw_handler->protocols;
-       if (ir_raw_handler->encode)
-               encode_protocols &= ~ir_raw_handler->protocols;
        mutex_unlock(&ir_raw_handler_lock);
 }
 EXPORT_SYMBOL(ir_raw_handler_unregister);
index d8bdf63ce9858ccfc78536fc698698e16c751670..63dace8198b0b3dbcb116108e238351fe9684a9e 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/sched.h>
-#include <linux/slab.h>
 #include <media/rc-core.h>
 
 #define DRIVER_NAME    "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
        return 0;
 }
 
-static int loop_set_wakeup_filter(struct rc_dev *dev,
-                                 struct rc_scancode_filter *sc_filter)
-{
-       static const unsigned int max = 512;
-       struct ir_raw_event *raw;
-       int ret;
-       int i;
-
-       /* fine to disable filter */
-       if (!sc_filter->mask)
-               return 0;
-
-       /* encode the specified filter and loop it back */
-       raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
-       ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
-                                    raw, max);
-       /* still loop back the partial raw IR even if it's incomplete */
-       if (ret == -ENOBUFS)
-               ret = max;
-       if (ret >= 0) {
-               /* do the loopback */
-               for (i = 0; i < ret; ++i)
-                       ir_raw_event_store(dev, &raw[i]);
-               ir_raw_event_handle(dev);
-
-               ret = 0;
-       }
-
-       kfree(raw);
-
-       return ret;
-}
-
 static int __init loop_init(void)
 {
        struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
        rc->map_name            = RC_MAP_EMPTY;
        rc->priv                = &loopdev;
        rc->driver_type         = RC_DRIVER_IR_RAW;
-       rc->encode_wakeup       = true;
        rc->allowed_protocols   = RC_BIT_ALL;
        rc->timeout             = 100 * 1000 * 1000; /* 100 ms */
        rc->min_timeout         = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
        rc->s_idle              = loop_set_idle;
        rc->s_learning_mode     = loop_set_learning_mode;
        rc->s_carrier_report    = loop_set_carrier_report;
-       rc->s_wakeup_filter     = loop_set_wakeup_filter;
 
        loopdev.txmask          = RXMASK_REGULAR;
        loopdev.txcarrier       = 36000;
index 9d015db652808fcb54fc0abfff19e9018dbad488..0ff388a1616876d50ade807ff5bff20204d1b8cd 100644 (file)
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
        } else {
                enabled = dev->enabled_wakeup_protocols;
                allowed = dev->allowed_wakeup_protocols;
-               if (dev->encode_wakeup && !allowed)
-                       allowed = ir_raw_get_encode_protocols();
        }
 
        mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
                path ? path : "N/A");
        kfree(path);
 
-       if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
+       if (dev->driver_type == RC_DRIVER_IR_RAW) {
                /* Load raw decoders, if they aren't already */
                if (!raw_init) {
                        IR_dprintk(1, "Loading raw decoders\n");
                        ir_raw_init();
                        raw_init = true;
                }
-       }
-
-       if (dev->driver_type == RC_DRIVER_IR_RAW) {
                /* calls ir_register_device so unlock mutex here*/
                mutex_unlock(&dev->lock);
                rc = ir_raw_event_register(dev);
index 93b315459098932381d61d282cdb3f04194b18ae..a14c428f70e992460ed869b492723a5a08f155b9 100644 (file)
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
                break;
        case VB2_BUF_STATE_PREPARING:
        case VB2_BUF_STATE_DEQUEUED:
+       case VB2_BUF_STATE_REQUEUEING:
                /* nothing */
                break;
        }
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
 
        if (WARN_ON(state != VB2_BUF_STATE_DONE &&
                    state != VB2_BUF_STATE_ERROR &&
-                   state != VB2_BUF_STATE_QUEUED))
+                   state != VB2_BUF_STATE_QUEUED &&
+                   state != VB2_BUF_STATE_REQUEUEING))
                state = VB2_BUF_STATE_ERROR;
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
        for (plane = 0; plane < vb->num_planes; ++plane)
                call_void_memop(vb, finish, vb->planes[plane].mem_priv);
 
-       /* Add the buffer to the done buffers list */
        spin_lock_irqsave(&q->done_lock, flags);
-       vb->state = state;
-       if (state != VB2_BUF_STATE_QUEUED)
+       if (state == VB2_BUF_STATE_QUEUED ||
+           state == VB2_BUF_STATE_REQUEUEING) {
+               vb->state = VB2_BUF_STATE_QUEUED;
+       } else {
+               /* Add the buffer to the done buffers list */
                list_add_tail(&vb->done_entry, &q->done_list);
+               vb->state = state;
+       }
        atomic_dec(&q->owned_by_drv_count);
        spin_unlock_irqrestore(&q->done_lock, flags);
 
-       if (state == VB2_BUF_STATE_QUEUED) {
+       switch (state) {
+       case VB2_BUF_STATE_QUEUED:
+               return;
+       case VB2_BUF_STATE_REQUEUEING:
                if (q->start_streaming_called)
                        __enqueue_in_driver(vb);
                return;
+       default:
+               /* Inform any processes that may be waiting for buffers */
+               wake_up(&q->done_wq);
+               break;
        }
-
-       /* Inform any processes that may be waiting for buffers */
-       wake_up(&q->done_wq);
 }
 EXPORT_SYMBOL_GPL(vb2_buffer_done);
 
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
 
 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
 {
-       static bool __check_once __read_mostly;
+       static bool check_once;
 
-       if (__check_once)
+       if (check_once)
                return;
 
-       __check_once = true;
-       __WARN();
+       check_once = true;
+       WARN_ON(1);
 
-       pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+       pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
        if (vb->vb2_queue->allow_zero_bytesused)
-               pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+               pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
        else
-               pr_warn_once("use the actual size instead.\n");
+               pr_warn("use the actual size instead.\n");
 }
 
 /**
index 3a27a84ad3ec376a2543c1ac9568c30e5d7c131b..9426276dbe1402b1445dd7b84da6d7fca38893a6 100644 (file)
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
        gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
        gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
        gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
        gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
index 9f579589e8000aaac06333c8d7162bdd5b6041ae..8053f70dbfd19266f9cbbbc220be85d8967d7d11 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra114-mc.h>
 
 #include "mc.h"
@@ -914,20 +912,6 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
        { .name = "tsec",      .swgroup = TEGRA_SWGROUP_TSEC,      .reg = 0x294 },
 };
 
-static void tegra114_flush_dcache(struct page *page, unsigned long offset,
-                                 size_t size)
-{
-       phys_addr_t phys = page_to_phys(page) + offset;
-       void *virt = page_address(page) + offset;
-
-       __cpuc_flush_dcache_area(virt, size);
-       outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra114_smmu_ops = {
-       .flush_dcache = tegra114_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra114_smmu_soc = {
        .clients = tegra114_mc_clients,
        .num_clients = ARRAY_SIZE(tegra114_mc_clients),
@@ -935,8 +919,8 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
        .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
        .supports_round_robin_arbitration = false,
        .supports_request_limit = false,
+       .num_tlb_lines = 32,
        .num_asids = 4,
-       .ops = &tegra114_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra114_mc_soc = {
index 966e1557e6f414598868a8392b5487cb05e09f61..7d734befe0ed3122cce8032098345c3ff5f40d39 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra124-mc.h>
 
 #include "mc.h"
@@ -1002,20 +1000,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
 };
 
 #ifdef CONFIG_ARCH_TEGRA_124_SOC
-static void tegra124_flush_dcache(struct page *page, unsigned long offset,
-                                 size_t size)
-{
-       phys_addr_t phys = page_to_phys(page) + offset;
-       void *virt = page_address(page) + offset;
-
-       __cpuc_flush_dcache_area(virt, size);
-       outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra124_smmu_ops = {
-       .flush_dcache = tegra124_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra124_smmu_soc = {
        .clients = tegra124_mc_clients,
        .num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1024,7 +1008,6 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
        .supports_round_robin_arbitration = true,
        .supports_request_limit = true,
        .num_asids = 128,
-       .ops = &tegra124_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra124_mc_soc = {
@@ -1039,18 +1022,6 @@ const struct tegra_mc_soc tegra124_mc_soc = {
 #endif /* CONFIG_ARCH_TEGRA_124_SOC */
 
 #ifdef CONFIG_ARCH_TEGRA_132_SOC
-static void tegra132_flush_dcache(struct page *page, unsigned long offset,
-                                 size_t size)
-{
-       void *virt = page_address(page) + offset;
-
-       __flush_dcache_area(virt, size);
-}
-
-static const struct tegra_smmu_ops tegra132_smmu_ops = {
-       .flush_dcache = tegra132_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra132_smmu_soc = {
        .clients = tegra124_mc_clients,
        .num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1058,8 +1029,8 @@ static const struct tegra_smmu_soc tegra132_smmu_soc = {
        .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
        .supports_round_robin_arbitration = true,
        .supports_request_limit = true,
+       .num_tlb_lines = 32,
        .num_asids = 128,
-       .ops = &tegra132_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra132_mc_soc = {
index 1abcd8f6f3ba60ed6cdabcc28478123061af0b63..7e0694d80edb3a3a5d63a543f15ab516b87ac0b1 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra30-mc.h>
 
 #include "mc.h"
@@ -936,20 +934,6 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
        { .name = "isp",  .swgroup = TEGRA_SWGROUP_ISP,  .reg = 0x258 },
 };
 
-static void tegra30_flush_dcache(struct page *page, unsigned long offset,
-                                size_t size)
-{
-       phys_addr_t phys = page_to_phys(page) + offset;
-       void *virt = page_address(page) + offset;
-
-       __cpuc_flush_dcache_area(virt, size);
-       outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra30_smmu_ops = {
-       .flush_dcache = tegra30_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra30_smmu_soc = {
        .clients = tegra30_mc_clients,
        .num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -957,8 +941,8 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
        .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
        .supports_round_robin_arbitration = false,
        .supports_request_limit = false,
+       .num_tlb_lines = 16,
        .num_asids = 4,
-       .ops = &tegra30_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra30_mc_soc = {
index 653815950aa2416b277718df69213545573aa557..3f68dd251ce89304bf044960568c58c11aca8fdd 100644 (file)
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
 
 config MFD_CROS_EC_SPI
        tristate "ChromeOS Embedded Controller (SPI)"
-       depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF
+       depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
 
        ---help---
          If you say Y here, you get support for talking to the ChromeOS EC
index bebf58a06a6b2932d57798c0b5b81a31a12e9b7a..a72ddb2950784cf044fbfb5156ebd68866bbea48 100644 (file)
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
 
                arizona->has_fully_powered_off = true;
 
-               disable_irq(arizona->irq);
+               disable_irq_nosync(arizona->irq);
                arizona_enable_reset(arizona);
                regulator_bulk_disable(arizona->num_core_supplies,
                                       arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
                             arizona->pdata.gpio_defaults[i]);
        }
 
-       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
-       pm_runtime_use_autosuspend(arizona->dev);
-       pm_runtime_enable(arizona->dev);
-
        /* Chip default */
        if (!arizona->pdata.clk32k_src)
                arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
                                           arizona->pdata.spk_fmt[i]);
        }
 
+       pm_runtime_set_active(arizona->dev);
+       pm_runtime_enable(arizona->dev);
+
        /* Set up for interrupts */
        ret = arizona_irq_init(arizona);
        if (ret != 0)
                goto err_reset;
 
+       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+       pm_runtime_use_autosuspend(arizona->dev);
+
        arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
                            arizona_clkgen_err, arizona);
        arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
                goto err_irq;
        }
 
-#ifdef CONFIG_PM
-       regulator_disable(arizona->dcvdd);
-#endif
-
        return 0;
 
 err_irq:
index e1ccefce9a9de629505344f9fc22042ab09b9684..a98dd4f1b0e33126ca04d1a7cecb8ad1e41d8d72 100644 (file)
@@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
                   slave ? slave->dev->name : "NULL");
 
        if (!slave || !bond->send_peer_notif ||
+           !netif_carrier_ok(bond->dev) ||
            test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
                return false;
 
index 2d1ce3c5d0dd34c9fabb1399a32c49be744afdd1..753887d02b46abc66663a24a2ea3e4e396d6b881 100644 (file)
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
                        vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
                }
                if (i != RX_RING_SIZE) {
-                       int j;
                        pr_emerg("%s: no memory for rx ring\n", dev->name);
-                       for (j = 0; j < i; j++) {
-                               if (vp->rx_skbuff[j]) {
-                                       dev_kfree_skb(vp->rx_skbuff[j]);
-                                       vp->rx_skbuff[j] = NULL;
-                               }
-                       }
                        retval = -ENOMEM;
-                       goto err_free_irq;
+                       goto err_free_skb;
                }
                /* Wrap the ring. */
                vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
        if (!retval)
                goto out;
 
-err_free_irq:
+err_free_skb:
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               if (vp->rx_skbuff[i]) {
+                       dev_kfree_skb(vp->rx_skbuff[i]);
+                       vp->rx_skbuff[i] = NULL;
+               }
+       }
        free_irq(dev->irq, dev);
 err:
        if (vortex_debug > 1)
index a90d7364334f9dfa3687dc813e068508a342861c..f7fbdc9d132511b72df0db2ce0b92e4df0774c44 100644 (file)
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        if (likely(skb)) {
                (*pkts_compl)++;
                (*bytes_compl) += skb->len;
+               dev_kfree_skb_any(skb);
        }
 
-       dev_kfree_skb_any(skb);
        tx_buf->first_bd = 0;
        tx_buf->skb = NULL;
 
index 76b9052a961c517978494199d74398264583508c..5907c821d131eed6fa5b6b0aa7a93dc41cca0d66 100644 (file)
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
                offset += sizeof(u32);
                data_buf += sizeof(u32);
                written_so_far += sizeof(u32);
+
+               /* At end of each 4Kb page, release nvram lock to allow MFW
+                * chance to take it for its own use.
+                */
+               if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+                   (written_so_far < buf_size)) {
+                       DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+                          "Releasing NVM lock after offset 0x%x\n",
+                          (u32)(offset - sizeof(u32)));
+                       bnx2x_release_nvram_lock(bp);
+                       usleep_range(1000, 2000);
+                       rc = bnx2x_acquire_nvram_lock(bp);
+                       if (rc)
+                               return rc;
+               }
+
                cmd_flags = 0;
        }
 
index 0612b19f6313bd3e6ffa205be2b543585262e31e..506047c386071db9472d60218faa004fe94849a8 100644 (file)
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                        if (!next_cmpl->valid)
                                break;
                }
+               packets++;
 
                /* TODO: BNA_CQ_EF_LOCAL ? */
                if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                else
                        bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
 
-               packets++;
                rcb->rxq->rx_packets++;
                rcb->rxq->rx_bytes += totlen;
                ccb->bytes_per_intr += totlen;
index c4d6bbe9458dbfe9c726fdff1e9b6c4c64b8a33f..02e23e6f142487ddc8cc2a8d4e2c512cb7e6db62 100644 (file)
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
 config THUNDER_NIC_PF
        tristate "Thunder Physical function driver"
        depends on 64BIT
-       default ARCH_THUNDER
        select THUNDER_NIC_BGX
        ---help---
          This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
 config THUNDER_NIC_VF
        tristate "Thunder Virtual function driver"
        depends on 64BIT
-       default ARCH_THUNDER
        ---help---
          This driver supports Thunder's NIC virtual function
 
 config THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
        depends on 64BIT
-       default ARCH_THUNDER
        ---help---
          This driver supports programming and controlling of MAC
          interface from NIC physical function driver.
index a11485fbb33f2b7bcd6c973324ea41601dbaf575..c3c7db41819dfad26b521008c6c70148711ee019 100644 (file)
@@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
                                        EXT_MEM1_SIZE_G(size));
                }
        } else {
-               if (i & EXT_MEM_ENABLE_F)
+               if (i & EXT_MEM_ENABLE_F) {
                        size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
                        add_debugfs_mem(adap, "mc", MEM_MC,
                                        EXT_MEM_SIZE_G(size));
+               }
        }
 
        de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
index 2716e6f30d9a0949633b40dc9864196c7465fa3a..00e3a6b6b82254269aa7e0bf5d165f13d358594f 100644 (file)
@@ -620,6 +620,11 @@ enum be_if_flags {
                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |\
                                         BE_IF_FLAGS_MCAST_PROMISCUOUS)
 
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+                       BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS   (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
 /* An RX interface is an object with one or more MAC addresses and
  * filtering capabilities. */
 struct be_cmd_req_if_create {
index 6f642426308c67399eac3abdb20ae6160ce41d2a..6ca693b03f33abbbdb6097544f2f4ee7f3928720 100644 (file)
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
                return 0;
 
+       /* if device is not running, copy MAC to netdev->dev_addr */
+       if (!netif_running(netdev))
+               goto done;
+
        /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
         * privilege or if PF did not provision the new MAC address.
         * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                status = -EPERM;
                goto err;
        }
-
-       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
        return 0;
 err:
        dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
        be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
 }
 
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
 {
-       struct be_rx_page_info *page_info;
        struct be_queue_info *rxq = &rxo->q;
+       struct be_rx_page_info *page_info;
+
+       while (atomic_read(&rxq->used) > 0) {
+               page_info = get_rx_page_info(rxo);
+               put_page(page_info->page);
+               memset(page_info, 0, sizeof(*page_info));
+       }
+       BUG_ON(atomic_read(&rxq->used));
+       rxq->tail = 0;
+       rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
        struct be_queue_info *rx_cq = &rxo->cq;
        struct be_rx_compl_info *rxcp;
        struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 
        /* After cleanup, leave the CQ in unarmed state */
        be_cq_notify(adapter, rx_cq->id, false, 0);
-
-       /* Then free posted rx buffers that were not used */
-       while (atomic_read(&rxq->used) > 0) {
-               page_info = get_rx_page_info(rxo);
-               put_page(page_info->page);
-               memset(page_info, 0, sizeof(*page_info));
-       }
-       BUG_ON(atomic_read(&rxq->used));
-       rxq->tail = 0;
-       rxq->head = 0;
 }
 
 static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
                        napi_hash_del(&eqo->napi);
                        netif_napi_del(&eqo->napi);
+                       free_cpumask_var(eqo->affinity_mask);
                }
-               free_cpumask_var(eqo->affinity_mask);
                be_queue_free(adapter, &eqo->q);
        }
 }
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 
        for_all_evt_queues(adapter, eqo, i) {
                int numa_node = dev_to_node(&adapter->pdev->dev);
-               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
-                       return -ENOMEM;
-               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
-                               eqo->affinity_mask);
-               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
-                              BE_NAPI_WEIGHT);
-               napi_hash_add(&eqo->napi);
+
                aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                rc = be_cmd_eq_create(adapter, eqo);
                if (rc)
                        return rc;
+
+               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+                       return -ENOMEM;
+               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                               eqo->affinity_mask);
+               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+                              BE_NAPI_WEIGHT);
+               napi_hash_add(&eqo->napi);
        }
        return 0;
 }
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
        for_all_rx_queues(adapter, rxo, i) {
                q = &rxo->q;
                if (q->created) {
+                       /* If RXQs are destroyed while in an "out of buffer"
+                        * state, there is a possibility of an HW stall on
+                        * Lancer. So, post 64 buffers to each queue to relieve
+                        * the "out of buffer" condition.
+                        * Make sure there's space in the RXQ before posting.
+                        */
+                       if (lancer_chip(adapter)) {
+                               be_rx_cq_clean(rxo);
+                               if (atomic_read(&q->used) == 0)
+                                       be_post_rx_frags(rxo, GFP_KERNEL,
+                                                        MAX_RX_POST);
+                       }
+
                        be_cmd_rxq_destroy(adapter, q);
                        be_rx_cq_clean(rxo);
+                       be_rxq_clean(rxo);
                }
                be_queue_free(adapter, q);
        }
 }
 
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+       be_cmd_pmac_del(adapter, adapter->if_handle,
+                       adapter->pmac_id[0], 0);
+
+       be_clear_uc_list(adapter);
+
+       /* The IFACE flags are enabled in the open path and cleared
+        * in the close path. When a VF gets detached from the host and
+        * assigned to a VM the following happens:
+        *      - VF's IFACE flags get cleared in the detach path
+        *      - IFACE create is issued by the VF in the attach path
+        * Due to a bug in the BE3/Skyhawk-R FW
+        * (Lancer FW doesn't have the bug), the IFACE capability flags
+        * specified along with the IFACE create cmd issued by a VF are not
+        * honoured by FW.  As a consequence, if a *new* driver
+        * (that enables/disables IFACE flags in open/close)
+        * is loaded in the host and an *old* driver is * used by a VM/VF,
+        * the IFACE gets created *without* the needed flags.
+        * To avoid this, disable RX-filter flags only for Lancer.
+        */
+       if (lancer_chip(adapter)) {
+               be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+               adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+       }
+}
+
 static int be_close(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
        if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
                return 0;
 
+       be_disable_if_filters(adapter);
+
        be_roce_dev_close(adapter);
 
        if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
        be_tx_compl_clean(adapter);
 
        be_rx_qs_destroy(adapter);
-       be_clear_uc_list(adapter);
 
        for_all_evt_queues(adapter, eqo, i) {
                if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
        return 0;
 }
 
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+       int status;
+
+       status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+       if (status)
+               return status;
+
+       /* For BE3 VFs, the PF programs the initial MAC address */
+       if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+               status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+                                        adapter->if_handle,
+                                        &adapter->pmac_id[0], 0);
+               if (status)
+                       return status;
+       }
+
+       if (adapter->vlans_added)
+               be_vid_config(adapter);
+
+       be_set_rx_mode(adapter->netdev);
+
+       return 0;
+}
+
 static int be_open(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
        if (status)
                goto err;
 
+       status = be_enable_if_filters(adapter);
+       if (status)
+               goto err;
+
        status = be_irq_register(adapter);
        if (status)
                goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
        }
 }
 
-static void be_mac_clear(struct be_adapter *adapter)
-{
-       if (adapter->pmac_id) {
-               be_cmd_pmac_del(adapter, adapter->if_handle,
-                               adapter->pmac_id[0], 0);
-               kfree(adapter->pmac_id);
-               adapter->pmac_id = NULL;
-       }
-}
-
 #ifdef CONFIG_BE2NET_VXLAN
 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
 {
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
 #endif
-       /* delete the primary mac along with the uc-mac list */
-       be_mac_clear(adapter);
+       kfree(adapter->pmac_id);
+       adapter->pmac_id = NULL;
 
        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
        return 0;
 }
 
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
-                       u32 cap_flags, u32 vf)
-{
-       u32 en_flags;
-
-       en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-                  BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
-                  BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
-       en_flags &= cap_flags;
-
-       return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
 static int be_vfs_if_create(struct be_adapter *adapter)
 {
        struct be_resources res = {0};
+       u32 cap_flags, en_flags, vf;
        struct be_vf_cfg *vf_cfg;
-       u32 cap_flags, vf;
        int status;
 
        /* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
                        }
                }
 
-               status = be_if_create(adapter, &vf_cfg->if_handle,
-                                     cap_flags, vf + 1);
+               en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+                                       BE_IF_FLAGS_BROADCAST |
+                                       BE_IF_FLAGS_MULTICAST |
+                                       BE_IF_FLAGS_PASS_L3L4_ERRORS);
+               status = be_cmd_if_create(adapter, cap_flags, en_flags,
+                                         &vf_cfg->if_handle, vf + 1);
                if (status)
                        return status;
        }
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
 
                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-       } else {
-               /* Maybe the HW was reset; dev_addr must be re-programmed */
-               memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
        }
 
-       /* For BE3-R VFs, the PF programs the initial MAC address */
-       if (!(BEx_chip(adapter) && be_virtfn(adapter)))
-               be_cmd_pmac_add(adapter, mac, adapter->if_handle,
-                               &adapter->pmac_id[0], 0);
        return 0;
 }
 
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
 static int be_setup(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
+       u32 en_flags;
        int status;
 
        status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       status = be_if_create(adapter, &adapter->if_handle,
-                             be_if_cap_flags(adapter), 0);
+       /* will enable all the needed filter flags in be_open() */
+       en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+       en_flags = en_flags & be_if_cap_flags(adapter);
+       status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+                                 &adapter->if_handle, 0);
        if (status)
                goto err;
 
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
                dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
        }
 
-       if (adapter->vlans_added)
-               be_vid_config(adapter);
-
-       be_set_rx_mode(adapter->netdev);
-
        status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                         adapter->rx_fc);
        if (status)
@@ -5121,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
        struct device *dev = &adapter->pdev->dev;
        int status;
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->vxlan_port != port)
index 32e3807c650ea7256b09f0c9e406a8219cb2bb78..271bb5862346ede352473f0837fb76e0a5ac7163 100644 (file)
@@ -3433,6 +3433,7 @@ fec_probe(struct platform_device *pdev)
 
        pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
        pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
index 56316db6c5a674fd1d17ef20e8db3747950cf71e..cf8e54652df95266e24a5d6d64ed67c00336f67b 100644 (file)
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        frag = skb_shinfo(skb)->frags;
        while (nr_frags) {
                CBDC_SC(bdp,
-                       BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+                       BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+                       BD_ENET_TX_TC);
                CBDS_SC(bdp, BD_ENET_TX_READY);
 
                if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
index b34214e2df5f6e55bdbb29e6a8016c139e74345c..016743e355de31984d57904e802d69e6703ea5e4 100644 (file)
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
 }
 
 #define FEC_NAPI_RX_EVENT_MSK  (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK  (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK  (FEC_ENET_TXF)
 #define FEC_RX_EVENT           (FEC_ENET_RXF)
 #define FEC_TX_EVENT           (FEC_ENET_TXF)
 #define FEC_ERR_EVENT_MSK      (FEC_ENET_HBERR | FEC_ENET_BABR | \
index 2b7610f341b09f4ff293f9916235030487fb2a80..10b3bbbbac8e10e89c47b2d50bb661cc80ad616b 100644 (file)
@@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
        /* Start Rx/Tx DMA and enable the interrupts */
        gfar_start(priv);
 
+       /* force link state update after mac reset */
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
+
        phy_start(priv->phydev);
 
        enable_napi(priv);
index 3c0a8f825b630148c29b4cda6ca46b9797a668be..5b90fcf96265aec4a6daa1f28135ef2fa06a0752 100644 (file)
@@ -900,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
        return 0;
 }
 
-static int gfar_comp_asc(const void *a, const void *b)
-{
-       return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
-       return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
-       u32 *_a = a;
-       u32 *_b = b;
-
-       swap(_a[0], _b[0]);
-       swap(_a[1], _b[1]);
-       swap(_a[2], _b[2]);
-       swap(_a[3], _b[3]);
-}
-
 /* Write a mask to filer cache */
 static void gfar_set_mask(u32 mask, struct filer_table *tab)
 {
@@ -1270,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
        return 0;
 }
 
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
-                                   struct gfar_filer_entry src[0], s32 size)
-{
-       while (size > 0) {
-               size--;
-               dst[size].ctrl = src[size].ctrl;
-               dst[size].prop = src[size].prop;
-       }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
-       int length;
-
-       if (end > MAX_FILER_CACHE_IDX || end < begin)
-               return -EINVAL;
-
-       end++;
-       length = end - begin;
-
-       /* Copy */
-       while (end < tab->index) {
-               tab->fe[begin].ctrl = tab->fe[end].ctrl;
-               tab->fe[begin++].prop = tab->fe[end++].prop;
-
-       }
-       /* Fill up with don't cares */
-       while (begin < tab->index) {
-               tab->fe[begin].ctrl = 0x60;
-               tab->fe[begin].prop = 0xFFFFFFFF;
-               begin++;
-       }
-
-       tab->index -= length;
-       return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
-                                    struct filer_table *tab)
-{
-       if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
-           begin > MAX_FILER_CACHE_IDX)
-               return -EINVAL;
-
-       gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
-                               tab->index - length + 1);
-
-       tab->index += length;
-       return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
-       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-            start++) {
-               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-                   (RQFCR_AND | RQFCR_CLE))
-                       return start;
-       }
-       return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
-       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-            start++) {
-               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-                   (RQFCR_CLE))
-                       return start;
-       }
-       return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
-       s32 i = -1, j, iend, jend;
-
-       while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
-               j = i;
-               while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
-                       /* The cluster entries self and the previous one
-                        * (a mask) must be identical!
-                        */
-                       if (tab->fe[i].ctrl != tab->fe[j].ctrl)
-                               break;
-                       if (tab->fe[i].prop != tab->fe[j].prop)
-                               break;
-                       if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
-                               break;
-                       if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
-                               break;
-                       iend = gfar_get_next_cluster_end(i, tab);
-                       jend = gfar_get_next_cluster_end(j, tab);
-                       if (jend == -1 || iend == -1)
-                               break;
-
-                       /* First we make some free space, where our cluster
-                        * element should be. Then we copy it there and finally
-                        * delete in from its old location.
-                        */
-                       if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
-                           -EINVAL)
-                               break;
-
-                       gfar_copy_filer_entries(&(tab->fe[iend + 1]),
-                                               &(tab->fe[jend + 1]), jend - j);
-
-                       if (gfar_trim_filer_entries(jend - 1,
-                                                   jend + (jend - j),
-                                                   tab) == -EINVAL)
-                               return;
-
-                       /* Mask out cluster bit */
-                       tab->fe[iend].ctrl &= ~(RQFCR_CLE);
-               }
-       }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
-                          struct gfar_filer_entry *a2,
-                          struct gfar_filer_entry *b1,
-                          struct gfar_filer_entry *b2, u32 mask)
-{
-       u32 temp[4];
-       temp[0] = a1->ctrl & mask;
-       temp[1] = a2->ctrl & mask;
-       temp[2] = b1->ctrl & mask;
-       temp[3] = b2->ctrl & mask;
-
-       a1->ctrl &= ~mask;
-       a2->ctrl &= ~mask;
-       b1->ctrl &= ~mask;
-       b2->ctrl &= ~mask;
-
-       a1->ctrl |= temp[1];
-       a2->ctrl |= temp[0];
-       b1->ctrl |= temp[3];
-       b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
-                                   struct filer_table *tab)
-{
-       u32 i, and_index = 0, block_index = 1;
-
-       for (i = 0; i < tab->index; i++) {
-
-               /* LSByte of control = 0 sets a mask */
-               if (!(tab->fe[i].ctrl & 0xF)) {
-                       mask_table[and_index].mask = tab->fe[i].prop;
-                       mask_table[and_index].start = i;
-                       mask_table[and_index].block = block_index;
-                       if (and_index >= 1)
-                               mask_table[and_index - 1].end = i - 1;
-                       and_index++;
-               }
-               /* cluster starts and ends will be separated because they should
-                * hold their position
-                */
-               if (tab->fe[i].ctrl & RQFCR_CLE)
-                       block_index++;
-               /* A not set AND indicates the end of a depended block */
-               if (!(tab->fe[i].ctrl & RQFCR_AND))
-                       block_index++;
-       }
-
-       mask_table[and_index - 1].end = i - 1;
-
-       return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
-                                struct filer_table *temp_table, u32 and_index)
-{
-       /* Pointer to compare function (_asc or _desc) */
-       int (*gfar_comp)(const void *, const void *);
-
-       u32 i, size = 0, start = 0, prev = 1;
-       u32 old_first, old_last, new_first, new_last;
-
-       gfar_comp = &gfar_comp_desc;
-
-       for (i = 0; i < and_index; i++) {
-               if (prev != mask_table[i].block) {
-                       old_first = mask_table[start].start + 1;
-                       old_last = mask_table[i - 1].end;
-                       sort(mask_table + start, size,
-                            sizeof(struct gfar_mask_entry),
-                            gfar_comp, &gfar_swap);
-
-                       /* Toggle order for every block. This makes the
-                        * thing more efficient!
-                        */
-                       if (gfar_comp == gfar_comp_desc)
-                               gfar_comp = &gfar_comp_asc;
-                       else
-                               gfar_comp = &gfar_comp_desc;
-
-                       new_first = mask_table[start].start + 1;
-                       new_last = mask_table[i - 1].end;
-
-                       gfar_swap_bits(&temp_table->fe[new_first],
-                                      &temp_table->fe[old_first],
-                                      &temp_table->fe[new_last],
-                                      &temp_table->fe[old_last],
-                                      RQFCR_QUEUE | RQFCR_CLE |
-                                      RQFCR_RJE | RQFCR_AND);
-
-                       start = i;
-                       size = 0;
-               }
-               size++;
-               prev = mask_table[i].block;
-       }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
-       struct filer_table *temp_table;
-       struct gfar_mask_entry *mask_table;
-
-       u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
-       s32 ret = 0;
-
-       /* We need a copy of the filer table because
-        * we want to change its order
-        */
-       temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
-       if (temp_table == NULL)
-               return -ENOMEM;
-
-       mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
-                            sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
-       if (mask_table == NULL) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       and_index = gfar_generate_mask_table(mask_table, tab);
-
-       gfar_sort_mask_table(mask_table, temp_table, and_index);
-
-       /* Now we can copy the data from our duplicated filer table to
-        * the real one in the order the mask table says
-        */
-       for (i = 0; i < and_index; i++) {
-               size = mask_table[i].end - mask_table[i].start + 1;
-               gfar_copy_filer_entries(&(tab->fe[j]),
-                               &(temp_table->fe[mask_table[i].start]), size);
-               j += size;
-       }
-
-       /* And finally we just have to check for duplicated masks and drop the
-        * second ones
-        */
-       for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-               if (tab->fe[i].ctrl == 0x80) {
-                       previous_mask = i++;
-                       break;
-               }
-       }
-       for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-               if (tab->fe[i].ctrl == 0x80) {
-                       if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
-                               /* Two identical ones found!
-                                * So drop the second one!
-                                */
-                               gfar_trim_filer_entries(i, i, tab);
-                       } else
-                               /* Not identical! */
-                               previous_mask = i;
-               }
-       }
-
-       kfree(mask_table);
-end:   kfree(temp_table);
-       return ret;
-}
-
 /* Write the bit-pattern from software's buffer to hardware registers */
 static int gfar_write_filer_table(struct gfar_private *priv,
                                  struct filer_table *tab)
@@ -1583,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
                return -EBUSY;
 
        /* Fill regular entries */
-       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
-            i++)
+       for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
        /* Fill the rest with fall-troughs */
-       for (; i < MAX_FILER_IDX - 1; i++)
+       for (; i < MAX_FILER_IDX; i++)
                gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
        /* Last entry must be default accept
         * because that's what people expect
@@ -1621,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
 {
        struct ethtool_flow_spec_container *j;
        struct filer_table *tab;
-       s32 i = 0;
        s32 ret = 0;
 
        /* So index is set to zero, too! */
@@ -1646,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
                }
        }
 
-       i = tab->index;
-
-       /* Optimizations to save entries */
-       gfar_cluster_filer(tab);
-       gfar_optimize_filer_masks(tab);
-
-       pr_debug("\tSummary:\n"
-                "\tData on hardware: %d\n"
-                "\tCompression rate: %d%%\n",
-                tab->index, 100 - (100 * tab->index) / i);
-
        /* Write everything to hardware */
        ret = gfar_write_filer_table(priv, tab);
        if (ret == -EBUSY) {
@@ -1722,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
        }
 
 process:
+       priv->rx_list.count++;
        ret = gfar_process_filer_changes(priv);
        if (ret)
                goto clean_list;
-       priv->rx_list.count++;
        return ret;
 
 clean_list:
+       priv->rx_list.count--;
        list_del(&temp->list);
 clean_mem:
        kfree(temp);
index 982fdcdc795b4752a2fbe65e71ed76ee197ad5de..b5b2925103ec10c1e835429c2ab7d2efee093838 100644 (file)
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
 
 static inline bool fm10k_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
index 2f70a9b152bd1789349d9c4d995852e95be1e70d..830466c49987cfbf1ad25f6d9bc01e4f7f0d0454 100644 (file)
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
 
 static inline bool igb_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
index 9aa6104e34ea8e2bd93994e4d8291763014162b8..ae21e0b06c3ad40a54093c8966fcfa711f188cfb 100644 (file)
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
 
 static inline bool ixgbe_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 /**
index e71cdde9cb017aecab834d2f2d9c5d4821c3d42e..1d7b00b038a2ea8c3bdd9394ad3d4988ccee04c8 100644 (file)
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
 
 static inline bool ixgbevf_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 /**
index 3e8b1bfb1f2e316212bd9b60fa06522ca4dc68db..d9884fd15b453e2486177d58b7fc40bcd5aaf7cc 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/of_address.h>
 #include <linux/phy.h>
 #include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 
 /* Coalescing */
 #define MVPP2_TXDONE_COAL_PKTS_THRESH  15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
 #define MVPP2_RX_COAL_PKTS             32
 #define MVPP2_RX_COAL_USEC             100
 
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
        u64     tx_bytes;
 };
 
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+       struct hrtimer tx_done_timer;
+       bool timer_scheduled;
+       /* Tasklet for egress finalization */
+       struct tasklet_struct tx_done_tasklet;
+};
+
 struct mvpp2_port {
        u8 id;
 
@@ -679,6 +690,9 @@ struct mvpp2_port {
        u32 pending_cause_rx;
        struct napi_struct napi;
 
+       /* Per-CPU port control */
+       struct mvpp2_port_pcpu __percpu *pcpu;
+
        /* Flags */
        unsigned long flags;
 
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
        /* Array of transmitted skb */
        struct sk_buff **tx_skb;
 
+       /* Array of transmitted buffers' physical addresses */
+       dma_addr_t *tx_buffs;
+
        /* Index of last TX DMA descriptor that was inserted */
        int txq_put_index;
 
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
        /* Occupied buffers indicator */
        atomic_t in_use;
        int in_use_thresh;
-
-       spinlock_t lock;
 };
 
 struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
 }
 
 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
-                             struct sk_buff *skb)
+                             struct sk_buff *skb,
+                             struct mvpp2_tx_desc *tx_desc)
 {
        txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+       if (skb)
+               txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+                                                        tx_desc->buf_phys_addr;
        txq_pcpu->txq_put_index++;
        if (txq_pcpu->txq_put_index == txq_pcpu->size)
                txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
        bm_pool->pkt_size = 0;
        bm_pool->buf_num = 0;
        atomic_set(&bm_pool->in_use, 0);
-       spin_lock_init(&bm_pool->lock);
 
        return 0;
 }
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                  int pkt_size)
 {
-       unsigned long flags = 0;
        struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
        int num;
 
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                return NULL;
        }
 
-       spin_lock_irqsave(&new_pool->lock, flags);
-
        if (new_pool->type == MVPP2_BM_FREE)
                new_pool->type = type;
 
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                if (num != pkts_num) {
                        WARN(1, "pool %d: %d of %d allocated\n",
                             new_pool->id, num, pkts_num);
-                       /* We need to undo the bufs_add() allocations */
-                       spin_unlock_irqrestore(&new_pool->lock, flags);
                        return NULL;
                }
        }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
        mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
                                  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
 
-       spin_unlock_irqrestore(&new_pool->lock, flags);
-
        return new_pool;
 }
 
 /* Initialize pools for swf */
 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
 {
-       unsigned long flags = 0;
        int rxq;
 
        if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
                if (!port->pool_long)
                        return -ENOMEM;
 
-               spin_lock_irqsave(&port->pool_long->lock, flags);
                port->pool_long->port_map |= (1 << port->id);
-               spin_unlock_irqrestore(&port->pool_long->lock, flags);
 
                for (rxq = 0; rxq < rxq_number; rxq++)
                        mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
                if (!port->pool_short)
                        return -ENOMEM;
 
-               spin_lock_irqsave(&port->pool_short->lock, flags);
                port->pool_short->port_map |= (1 << port->id);
-               spin_unlock_irqrestore(&port->pool_short->lock, flags);
 
                for (rxq = 0; rxq < rxq_number; rxq++)
                        mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
 
        mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
                    (MVPP2_CAUSE_MISC_SUM_MASK |
-                    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
                     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
 }
 
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
        rxq->time_coal = usec;
 }
 
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
-       struct mvpp2_port *port = arg;
-       int queue;
-       u32 val;
-
-       for (queue = 0; queue < txq_number; queue++) {
-               struct mvpp2_tx_queue *txq = port->txqs[queue];
-
-               val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
-                      MVPP2_TRANSMITTED_THRESH_MASK;
-               mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
-               mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
-       }
-}
-
 /* Free Tx queue skbuffs */
 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                                struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
        int i;
 
        for (i = 0; i < num; i++) {
-               struct mvpp2_tx_desc *tx_desc = txq->descs +
-                                                       txq_pcpu->txq_get_index;
+               dma_addr_t buf_phys_addr =
+                                   txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
                struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
 
                mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                if (!skb)
                        continue;
 
-               dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
-                                tx_desc->data_size, DMA_TO_DEVICE);
+               dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+                                skb_headlen(skb), DMA_TO_DEVICE);
                dev_kfree_skb_any(skb);
        }
 }
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
                                                        u32 cause)
 {
-       int queue = fls(cause >> 16) - 1;
+       int queue = fls(cause) - 1;
 
        return port->txqs[queue];
 }
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
                        netif_tx_wake_queue(nq);
 }
 
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+       struct mvpp2_tx_queue *txq;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       unsigned int tx_todo = 0;
+
+       while (cause) {
+               txq = mvpp2_get_tx_queue(port, cause);
+               if (!txq)
+                       break;
+
+               txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+               if (txq_pcpu->count) {
+                       mvpp2_txq_done(port, txq, txq_pcpu);
+                       tx_todo += txq_pcpu->count;
+               }
+
+               cause &= ~(1 << txq->log_id);
+       }
+       return tx_todo;
+}
+
 /* Rx/Tx queue initialization/cleanup methods */
 
 /* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
                                           sizeof(*txq_pcpu->tx_skb),
                                           GFP_KERNEL);
-               if (!txq_pcpu->tx_skb) {
-                       dma_free_coherent(port->dev->dev.parent,
-                                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                                         txq->descs, txq->descs_phys);
-                       return -ENOMEM;
-               }
+               if (!txq_pcpu->tx_skb)
+                       goto error;
+
+               txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+                                            sizeof(dma_addr_t), GFP_KERNEL);
+               if (!txq_pcpu->tx_buffs)
+                       goto error;
 
                txq_pcpu->count = 0;
                txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        }
 
        return 0;
+
+error:
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+               kfree(txq_pcpu->tx_skb);
+               kfree(txq_pcpu->tx_buffs);
+       }
+
+       dma_free_coherent(port->dev->dev.parent,
+                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                         txq->descs, txq->descs_phys);
+
+       return -ENOMEM;
 }
 
 /* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
                kfree(txq_pcpu->tx_skb);
+               kfree(txq_pcpu->tx_buffs);
        }
 
        if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
                        goto err_cleanup;
        }
 
-       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
        return 0;
 
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
        }
 }
 
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+       ktime_t interval;
+
+       if (!port_pcpu->timer_scheduled) {
+               port_pcpu->timer_scheduled = true;
+               interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+               hrtimer_start(&port_pcpu->tx_done_timer, interval,
+                             HRTIMER_MODE_REL_PINNED);
+       }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+       struct net_device *dev = (struct net_device *)data;
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+       unsigned int tx_todo, cause;
+
+       if (!netif_running(dev))
+               return;
+       port_pcpu->timer_scheduled = false;
+
+       /* Process all the Tx queues */
+       cause = (1 << txq_number) - 1;
+       tx_todo = mvpp2_tx_done(port, cause);
+
+       /* Set the timer in case not all the packets were processed */
+       if (tx_todo)
+               mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+       struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+                                                        struct mvpp2_port_pcpu,
+                                                        tx_done_timer);
+
+       tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+       return HRTIMER_NORESTART;
+}
+
 /* Main RX/TX processing routines */
 
 /* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
                if (i == (skb_shinfo(skb)->nr_frags - 1)) {
                        /* Last descriptor */
                        tx_desc->command = MVPP2_TXD_L_DESC;
-                       mvpp2_txq_inc_put(txq_pcpu, skb);
+                       mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
                } else {
                        /* Descriptor in the middle: Not First, Not Last */
                        tx_desc->command = 0;
-                       mvpp2_txq_inc_put(txq_pcpu, NULL);
+                       mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
                }
        }
 
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
                /* First and Last descriptor */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
                tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, skb);
+               mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
        } else {
                /* First but not Last */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
                tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, NULL);
+               mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
 
                /* Continue with other skb fragments */
                if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
                dev_kfree_skb_any(skb);
        }
 
+       /* Finalize TX processing */
+       if (txq_pcpu->count >= txq->done_pkts_coal)
+               mvpp2_txq_done(port, txq, txq_pcpu);
+
+       /* Set the timer in case not all frags were processed */
+       if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+               struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+               mvpp2_timer_set(port_pcpu);
+       }
+
        return NETDEV_TX_OK;
 }
 
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
                netdev_err(dev, "tx fifo underrun error\n");
 }
 
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
 {
-       struct mvpp2_port *port = arg;
-       u32 cause_rx_tx, cause_tx, cause_misc;
+       u32 cause_rx_tx, cause_rx, cause_misc;
+       int rx_done = 0;
+       struct mvpp2_port *port = netdev_priv(napi->dev);
 
        /* Rx/Tx cause register
         *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
         */
        cause_rx_tx = mvpp2_read(port->priv,
                                 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
-       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+       cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
        cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
 
        if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
                            cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
        }
 
-       /* Release TX descriptors */
-       if (cause_tx) {
-               struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
-               struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
-               if (txq_pcpu->count)
-                       mvpp2_txq_done(port, txq, txq_pcpu);
-       }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
-       u32 cause_rx_tx, cause_rx;
-       int rx_done = 0;
-       struct mvpp2_port *port = netdev_priv(napi->dev);
-
-       on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
-       cause_rx_tx = mvpp2_read(port->priv,
-                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
        cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
 
        /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
 static int mvpp2_stop(struct net_device *dev)
 {
        struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port_pcpu *port_pcpu;
+       int cpu;
 
        mvpp2_stop_dev(port);
        mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
        on_each_cpu(mvpp2_interrupts_mask, port, 1);
 
        free_irq(port->irq, port);
+       for_each_present_cpu(cpu) {
+               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+               hrtimer_cancel(&port_pcpu->tx_done_timer);
+               port_pcpu->timer_scheduled = false;
+               tasklet_kill(&port_pcpu->tx_done_tasklet);
+       }
        mvpp2_cleanup_rxqs(port);
        mvpp2_cleanup_txqs(port);
 
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
                txq->done_pkts_coal = c->tx_max_coalesced_frames;
        }
 
-       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
        return 0;
 }
 
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 {
        struct device_node *phy_node;
        struct mvpp2_port *port;
+       struct mvpp2_port_pcpu *port_pcpu;
        struct net_device *dev;
        struct resource *res;
        const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        int features;
        int phy_mode;
        int priv_common_regs_num = 2;
-       int err, i;
+       int err, i, cpu;
 
        dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
                                 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        }
        mvpp2_port_power_up(port);
 
+       port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+       if (!port->pcpu) {
+               err = -ENOMEM;
+               goto err_free_txq_pcpu;
+       }
+
+       for_each_present_cpu(cpu) {
+               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+               hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_REL_PINNED);
+               port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+               port_pcpu->timer_scheduled = false;
+
+               tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+                            (unsigned long)dev);
+       }
+
        netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
        features = NETIF_F_SG | NETIF_F_IP_CSUM;
        dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        err = register_netdev(dev);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register netdev\n");
-               goto err_free_txq_pcpu;
+               goto err_free_port_pcpu;
        }
        netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
 
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        priv->port_list[id] = port;
        return 0;
 
+err_free_port_pcpu:
+       free_percpu(port->pcpu);
 err_free_txq_pcpu:
        for (i = 0; i < txq_number; i++)
                free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
        int i;
 
        unregister_netdev(port->dev);
+       free_percpu(port->pcpu);
        free_percpu(port->stats);
        for (i = 0; i < txq_number; i++)
                free_percpu(port->txqs[i]->pcpu);
index afad529838de748efc9f9253c6fde42abbe954a3..06e3e1e54c35d6078321d792d1e0e537ec95a207 100644 (file)
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        /* disable cmdif checksum */
        MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
+       MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
        err = set_caps(dev, set_ctx, set_sz);
 
 query_ex:
index f78909a00f150edfd76065b2b993d9660edaebdd..09d2e16fd6b00bfdd0c20fc10c64abd03c29a935 100644 (file)
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
 
        sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
                tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
-       err = dma_mapping_error(adapter->dev,
-               sg_dma_address(&tx_ctl->sg));
-       if (err) {
+       if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+               err = -ENOMEM;
                sg_dma_address(&tx_ctl->sg) = 0;
                goto err;
        }
index 3df51faf18ae3ba8ce6bb7f49e6f51e4da1be738..f790f61ea78a2b4f1008da82eca29132ff5bdcc0 100644 (file)
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_46:
        case RTL_GIGA_MAC_VER_47:
        case RTL_GIGA_MAC_VER_48:
+               RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               break;
        case RTL_GIGA_MAC_VER_49:
        case RTL_GIGA_MAC_VER_50:
        case RTL_GIGA_MAC_VER_51:
-               RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
                break;
        default:
                RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
index 2d8578cade03790782af7e97a1f59f9848301ae6..2e7f9a2834be320eef4a7bed44d8c787f72f3e65 100644 (file)
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
                rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
                                   ROCKER_OP_FLAG_REMOVE);
                unregister_netdev(rocker_port->dev);
+               free_netdev(rocker_port->dev);
        }
        kfree(rocker->ports);
 }
index 7e3129e7f143a9990c89780a8e6638f8182e4892..f0e4bb4e3ec59f9695957dee9dec55a02d450da8 100644 (file)
@@ -42,7 +42,7 @@
 #define NSS_COMMON_CLK_DIV_MASK                        0x7f
 
 #define NSS_COMMON_CLK_SRC_CTRL                        0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (x)
 /* Mode is coded on 1 bit but is different depending on the MAC ID:
  * MAC0: QSGMII=0 RGMII=1
  * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
 
        /* Configure the clock src according to the mode */
        regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
-       val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+       val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
        switch (gmac->phy_mode) {
        case PHY_INTERFACE_MODE_RGMII:
                val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
index a8a730641bbb14e723f841b2b3c13454b53a5491..bb1bb72121c0474b8c1898540a28d21264479d6a 100644 (file)
@@ -85,7 +85,6 @@ struct netcp_intf {
        struct list_head        rxhook_list_head;
        unsigned int            rx_queue_id;
        void                    *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
-       u32                     rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
        struct napi_struct      rx_napi;
        struct napi_struct      tx_napi;
 
index 9749dfd78c434992f4041effc3c9a7314bdace3d..4755838c6137b462d6de262b38beadacdf099e98 100644 (file)
@@ -34,6 +34,7 @@
 #define NETCP_SOP_OFFSET       (NET_IP_ALIGN + NET_SKB_PAD)
 #define NETCP_NAPI_WEIGHT      64
 #define NETCP_TX_TIMEOUT       (5 * HZ)
+#define NETCP_PACKET_SIZE      (ETH_FRAME_LEN + ETH_FCS_LEN)
 #define NETCP_MIN_PACKET_SIZE  ETH_ZLEN
 #define NETCP_MAX_MCAST_ADDR   16
 
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
        if (likely(fdq == 0)) {
                unsigned int primary_buf_len;
                /* Allocate a primary receive queue entry */
-               buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+               buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
                primary_buf_len = SKB_DATA_ALIGN(buf_len) +
                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-               if (primary_buf_len <= PAGE_SIZE) {
-                       bufptr = netdev_alloc_frag(primary_buf_len);
-                       pad[1] = primary_buf_len;
-               } else {
-                       bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
-                                        GFP_DMA32 | __GFP_COLD);
-                       pad[1] = 0;
-               }
+               bufptr = netdev_alloc_frag(primary_buf_len);
+               pad[1] = primary_buf_len;
 
                if (unlikely(!bufptr)) {
-                       dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+                       dev_warn_ratelimited(netcp->ndev_dev,
+                                            "Primary RX buffer alloc failed\n");
                        goto fail;
                }
                dma = dma_map_single(netcp->dev, bufptr, buf_len,
                                     DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(netcp->dev, dma)))
+                       goto fail;
+
                pad[0] = (u32)bufptr;
 
        } else {
                /* Allocate a secondary receive queue entry */
-               page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+               page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
                if (unlikely(!page)) {
                        dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
                        goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
 
        /* Map the linear buffer */
        dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
-       if (unlikely(!dma_addr)) {
+       if (unlikely(dma_mapping_error(dev, dma_addr))) {
                dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
                return NULL;
        }
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
        knav_queue_disable_notify(netcp->rx_queue);
 
        /* open Rx FDQs */
-       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
-            netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+            ++i) {
                snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
                netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
                if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                netcp->rx_queue_depths[0] = 128;
        }
 
-       ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
-                                        netcp->rx_buffer_sizes,
-                                        KNAV_DMA_FDQ_PER_CHAN);
-       if (ret) {
-               dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
-               netcp->rx_buffer_sizes[0] = 1536;
-       }
-
        ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
        if (ret < 0) {
                dev_err(dev, "missing \"rx-pool\" parameter\n");
index 2ffbf13471d09ad4c27d8c70fbb4dd3145befa75..216bfd350169a9da723035876d152e89b17c8432 100644 (file)
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
        dev->type = ARPHRD_AX25;
 
        /* Perform the low-level AX25 initialization. */
-       if ((err = ax_open(ax->dev))) {
+       err = ax_open(ax->dev);
+       if (err)
                goto out_free_netdev;
-       }
 
-       if (register_netdev(dev))
+       err = register_netdev(dev);
+       if (err)
                goto out_free_buffers;
 
        /* after register_netdev() - because else printk smashes the kernel */
index 3cc316cb7e6be792b06dfc2c520eae9809f1008b..d8757bf9ad755ed6a3114d9d0a9664e59618744a 100644 (file)
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
 
        netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
 
+       if (len < 0) {
+               ndev->stats.rx_errors++;
+               ndev->stats.rx_length_errors++;
+               goto enqueue_again;
+       }
+
        skb_put(skb, len);
        skb->protocol = eth_type_trans(skb, ndev);
        skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
                return;
        }
 
+enqueue_again:
        rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
        if (rc) {
                dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
 
                rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
                                              ndev->mtu + ETH_HLEN);
-               if (rc == -EINVAL) {
+               if (rc) {
                        dev_kfree_skb(skb);
                        goto err;
                }
index b2197b506acbe86f3540d5ae1d8334129c2bbe57..1e1fbb049ec63b79c1d8255c517364f739a8d38b 100644 (file)
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
        bool needs_aneg = false, do_suspend = false;
        enum phy_state old_state;
        int err = 0;
+       int old_link;
 
        mutex_lock(&phydev->lock);
 
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
                phydev->adjust_link(phydev->attached_dev);
                break;
        case PHY_RUNNING:
-               /* Only register a CHANGE if we are
-                * polling or ignoring interrupts
+               /* Only register a CHANGE if we are polling or ignoring
+                * interrupts and link changed since latest checking.
                 */
-               if (!phy_interrupt_is_valid(phydev))
-                       phydev->state = PHY_CHANGELINK;
+               if (!phy_interrupt_is_valid(phydev)) {
+                       old_link = phydev->link;
+                       err = phy_read_status(phydev);
+                       if (err)
+                               break;
+
+                       if (old_link != phydev->link)
+                               phydev->state = PHY_CHANGELINK;
+               }
                break;
        case PHY_CHANGELINK:
                err = phy_read_status(phydev);
index c0f6479e19d48e51fd76c3bbce161ffdd7846842..70b08958763a129fff47ad00a1db130c1334f254 100644 (file)
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
 }
 
 /*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode.  If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down.  If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
  */
 static int lan87xx_read_status(struct phy_device *phydev)
 {
        int err = genphy_read_status(phydev);
+       int i;
 
        if (!phydev->link) {
                /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
                if (rc < 0)
                        return rc;
 
-               /* Sleep 64 ms to allow ~5 link test pulses to be sent */
-               msleep(64);
+               /* Wait max 640 ms to detect energy */
+               for (i = 0; i < 64; i++) {
+                       /* Sleep to allow link test pulses to be sent */
+                       msleep(10);
+                       rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+                       if (rc < 0)
+                               return rc;
+                       if (rc & MII_LAN83C185_ENERGYON)
+                               break;
+               }
 
                /* Re-enable EDPD */
                rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
 
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
+       .read_status    = lan87xx_read_status,
        .config_init    = smsc_phy_config_init,
        .soft_reset     = smsc_phy_reset,
 
index 9d15566521a719b525a28a009f1d999c91a00da2..fa8f5046afe90627242f6d2d523178da653c9427 100644 (file)
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 static void ppp_ccp_closed(struct ppp *ppp);
 static struct compressor *find_compressor(int type);
 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp);
 static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
 static void ppp_destroy_interface(struct ppp *ppp);
 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
                file->private_data = NULL;
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_dec_and_test(&pf->refcnt)) {
                        switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                mutex_lock(&ppp_mutex);
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_long_read(&file->f_count) < 2) {
                        ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
                /* Create a new ppp unit */
                if (get_user(unit, p))
                        break;
-               ppp = ppp_create_interface(net, unit, &err);
+               ppp = ppp_create_interface(net, unit, file, &err);
                if (!ppp)
                        break;
                file->private_data = &ppp->file;
-               ppp->owner = file;
                err = -EFAULT;
                if (put_user(ppp->file.index, p))
                        break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
 static __net_exit void ppp_exit_net(struct net *net)
 {
        struct ppp_net *pn = net_generic(net, ppp_net_id);
+       struct ppp *ppp;
+       LIST_HEAD(list);
+       int id;
+
+       rtnl_lock();
+       idr_for_each_entry(&pn->units_idr, ppp, id)
+               unregister_netdevice_queue(ppp->dev, &list);
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
 
        idr_destroy(&pn->units_idr);
 }
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
        return 0;
 }
 
+static void ppp_dev_uninit(struct net_device *dev)
+{
+       struct ppp *ppp = netdev_priv(dev);
+       struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+       ppp_lock(ppp);
+       ppp->closing = 1;
+       ppp_unlock(ppp);
+
+       mutex_lock(&pn->all_ppp_mutex);
+       unit_put(&pn->units_idr, ppp->file.index);
+       mutex_unlock(&pn->all_ppp_mutex);
+
+       ppp->owner = NULL;
+
+       ppp->file.dead = 1;
+       wake_up_interruptible(&ppp->file.rwait);
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
        .ndo_init        = ppp_dev_init,
+       .ndo_uninit      = ppp_dev_uninit,
        .ndo_start_xmit  = ppp_start_xmit,
        .ndo_do_ioctl    = ppp_net_ioctl,
        .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
  * or if there is already a unit with the requested number.
  * unit == -1 means allocate a new number.
  */
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp)
 {
        struct ppp *ppp;
        struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        ppp->mru = PPP_MRU;
        init_ppp_file(&ppp->file, INTERFACE);
        ppp->file.hdrlen = PPP_HDRLEN - 2;      /* don't count proto bytes */
+       ppp->owner = file;
        for (i = 0; i < NUM_NP; ++i)
                ppp->npmode[i] = NPMODE_PASS;
        INIT_LIST_HEAD(&ppp->channels);
@@ -2775,34 +2809,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
        init_waitqueue_head(&pf->rwait);
 }
 
-/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
-       struct ppp_net *pn;
-
-       pn = ppp_pernet(ppp->ppp_net);
-       mutex_lock(&pn->all_ppp_mutex);
-
-       /* This will call dev_close() for us. */
-       ppp_lock(ppp);
-       if (!ppp->closing) {
-               ppp->closing = 1;
-               ppp_unlock(ppp);
-               unregister_netdev(ppp->dev);
-               unit_put(&pn->units_idr, ppp->file.index);
-       } else
-               ppp_unlock(ppp);
-
-       ppp->file.dead = 1;
-       ppp->owner = NULL;
-       wake_up_interruptible(&ppp->file.rwait);
-
-       mutex_unlock(&pn->all_ppp_mutex);
-}
-
 /*
  * Free the memory used by a ppp unit.  This is only called once
  * there are no channels connected to the unit and no file structs
index 9d43460ce3c71f0b54c69b84fa5a0ec8b7341d5f..64a60afbe50cc4ca0ff12b65ad6331a5fcc5e3a7 100644 (file)
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
        /* 4. Gobi 1000 devices */
index 7fbca37a1adffe5d46d3603e9fd44d4dbd16d331..237f8e5e493ddaae958684e8ed411c6f7f2363d6 100644 (file)
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* Do we support "hardware" checksums? */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
                /* This opens up the world of extra features. */
-               dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+               dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
                if (csum)
-                       dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+                       dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
                        dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
index 7193b7304fdd3ed4b69c0125732d4a024d4a4b36..848ea6a399f236b14cc5d9a79dd38038e0331aec 100644 (file)
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
                chan->netdev->base_addr = chan->cosa->datareg;
                chan->netdev->irq = chan->cosa->irq;
                chan->netdev->dma = chan->cosa->dma;
-               if (register_hdlc_device(chan->netdev)) {
+               err = register_hdlc_device(chan->netdev);
+               if (err) {
                        netdev_warn(chan->netdev,
                                    "register_hdlc_device() failed\n");
                        free_netdev(chan->netdev);
index 25d1cbd34306e03ea4827e09509c345d11b5a1df..b2f0d245bcf3a0e71fb96797c47f71ad0ca736db 100644 (file)
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
                switch (phy->rev) {
                case 6:
                case 5:
-                       if (sprom->fem.ghz5.extpa_gain == 3)
+                       if (sprom->fem.ghz2.extpa_gain == 3)
                                return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
                        /* fall through */
                case 4:
index 5000bfcded617f32ca177ae7a0711f13a3ce65d3..5514ad6d4e54373d2a48564ec790489dfe1808dc 100644 (file)
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
        cmd->scan_priority =
                iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
-       if (iwl_mvm_scan_total_iterations(params) == 0)
+       if (iwl_mvm_scan_total_iterations(params) == 1)
                cmd->ooc_priority =
                        iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
        else
index 6203c4ad9bba5d8ce3bb2f46f832c3bb4ebe85fc..9e144e71da0b5980264702a6210684cfa34edab5 100644 (file)
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
-               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+                       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                                    CSR_HW_IF_CONFIG_REG_PREPARE |
                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+                       mdelay(1);
+                       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+               }
                mdelay(5);
        }
 
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
        if (ret >= 0)
                return 0;
 
+       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
+       msleep(1);
+
        for (iter = 0; iter < 10; iter++) {
                /* If HW is not ready, prepare the conditions to check again */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
                do {
                        ret = iwl_pcie_set_hw_ready(trans);
-                       if (ret >= 0)
-                               return 0;
+                       if (ret >= 0) {
+                               ret = 0;
+                               goto out;
+                       }
 
                        usleep_range(200, 1000);
                        t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
        IWL_ERR(trans, "Couldn't prepare the card\n");
 
+out:
+       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
        return ret;
 }
 
index 2b86c2135de36f627b397add88628bc47aa37271..607acb53c847558793d47a528d0830f9c79c8cbf 100644 (file)
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr) {
-               if (txq->wd_timeout)
-                       mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+               if (txq->wd_timeout) {
+                       /*
+                        * If the TXQ is active, then set the timer, if not,
+                        * set the timer in remainder so that the timer will
+                        * be armed with the right value when the station will
+                        * wake up.
+                        */
+                       if (!txq->frozen)
+                               mod_timer(&txq->stuck_timer,
+                                         jiffies + txq->wd_timeout);
+                       else
+                               txq->frozen_expiry_remainder = txq->wd_timeout;
+               }
                IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
                iwl_trans_pcie_ref(trans);
        }
index b6cc9ff47fc2e59b3cc92fe4002f05e59d981e56..1c6788aecc62658fe2cc7c4961415ef4715c8dde 100644 (file)
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                (struct rsi_91x_sdiodev *)adapter->rsi_dev;
        u32 len;
        u32 num_blocks;
+       const u8 *fw;
        const struct firmware *fw_entry = NULL;
        u32 block_size = dev->tx_blk_size;
        int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
+       fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw)
+               return -ENOMEM;
        len = fw_entry->size;
 
        if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
-       status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+       status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
        release_firmware(fw_entry);
        return status;
 }
index 1106ce76707e1095fd523c5c541fa3740e9c9496..30c2cf7fa93b0c6015d22236c3a9189b462cb064 100644 (file)
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
        fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw)
+               return -ENOMEM;
        len = fw_entry->size;
 
        if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
        status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
        release_firmware(fw_entry);
        return status;
 }
index 3b3a88b53b119909112a806ee71ab4d4bfa67a79..585d0883c7e58760eed503de64ced513c8331c82 100644 (file)
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+       struct rtl_tcb_desc tcb_desc;
 
-       if (skb)
-               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+       if (skb) {
+               memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+       }
 }
 
 static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
index 1017f02d7bf7520868b25330493822e08f7826a0..7bf88d9dcdc3fc4732170c121ef78d5a85ca3fa9 100644 (file)
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
 module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
                   bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
index 1a83e190fc15e4158b5e441cc267ad149d465abe..28577a31549d1569032d63457464fe11fdf44d32 100644 (file)
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
 {
        atomic_dec(&queue->inflight_packets);
+
+       /* Wake the dealloc thread _after_ decrementing inflight_packets so
+        * that if kthread_stop() has already been called, the dealloc thread
+        * does not wait forever with nothing to wake it.
+        */
+       wake_up(&queue->dealloc_wq);
 }
 
 int xenvif_schedulable(struct xenvif *vif)
index 7d50711476fe1e88debca95beb790d770261f036..3f44b522b8311a2c64eba48e6a9b7217ea0cb3a7 100644 (file)
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
                                                        struct sk_buff *skb,
                                                        struct xen_netif_tx_request *txp,
-                                                       struct gnttab_map_grant_ref *gop)
+                                                       struct gnttab_map_grant_ref *gop,
+                                                       unsigned int frag_overflow,
+                                                       struct sk_buff *nskb)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
        int start;
        pending_ring_idx_t index;
-       unsigned int nr_slots, frag_overflow = 0;
+       unsigned int nr_slots;
 
-       /* At this point shinfo->nr_frags is in fact the number of
-        * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
-        */
-       if (shinfo->nr_frags > MAX_SKB_FRAGS) {
-               frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
-               BUG_ON(frag_overflow > MAX_SKB_FRAGS);
-               shinfo->nr_frags = MAX_SKB_FRAGS;
-       }
        nr_slots = shinfo->nr_frags;
 
        /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
        }
 
        if (frag_overflow) {
-               struct sk_buff *nskb = xenvif_alloc_skb(0);
-               if (unlikely(nskb == NULL)) {
-                       if (net_ratelimit())
-                               netdev_err(queue->vif->dev,
-                                          "Can't allocate the frag_list skb.\n");
-                       return NULL;
-               }
 
                shinfo = skb_shinfo(nskb);
                frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                     unsigned *copy_ops,
                                     unsigned *map_ops)
 {
-       struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
-       struct sk_buff *skb;
+       struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+       struct sk_buff *skb, *nskb;
        int ret;
+       unsigned int frag_overflow;
 
        while (skb_queue_len(&queue->tx_queue) < budget) {
                struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        break;
                }
 
+               skb_shinfo(skb)->nr_frags = ret;
+               if (data_len < txreq.size)
+                       skb_shinfo(skb)->nr_frags++;
+               /* At this point shinfo->nr_frags is in fact the number of
+                * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+                */
+               frag_overflow = 0;
+               nskb = NULL;
+               if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+                       frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+                       BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+                       skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+                       nskb = xenvif_alloc_skb(0);
+                       if (unlikely(nskb == NULL)) {
+                               kfree_skb(skb);
+                               xenvif_tx_err(queue, &txreq, idx);
+                               if (net_ratelimit())
+                                       netdev_err(queue->vif->dev,
+                                                  "Can't allocate the frag_list skb.\n");
+                               break;
+                       }
+               }
+
                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
                                /* Failure in xenvif_set_skb_gso is fatal. */
                                kfree_skb(skb);
+                               kfree_skb(nskb);
                                break;
                        }
                }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                (*copy_ops)++;
 
-               skb_shinfo(skb)->nr_frags = ret;
                if (data_len < txreq.size) {
-                       skb_shinfo(skb)->nr_frags++;
                        frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
                                             pending_idx);
                        xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                queue->pending_cons++;
 
-               request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
-               if (request_gop == NULL) {
-                       kfree_skb(skb);
-                       xenvif_tx_err(queue, &txreq, idx);
-                       break;
-               }
-               gop = request_gop;
+               gop = xenvif_get_requests(queue, skb, txfrags, gop,
+                                         frag_overflow, nskb);
 
                __skb_queue_tail(&queue->tx_queue, skb);
 
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
                smp_wmb();
                queue->dealloc_prod++;
        } while (ubuf);
-       wake_up(&queue->dealloc_wq);
        spin_unlock_irqrestore(&queue->callback_lock, flags);
 
        if (likely(zerocopy_success))
index 23435f2a5486f806ee8f686fdb898d0815a68013..2e2530743831a19f2ca4db3b313d1f8b2db44ebb 100644 (file)
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
        ntb->dev.bus = &ntb_bus;
        ntb->dev.parent = &ntb->pdev->dev;
        ntb->dev.release = ntb_dev_release;
-       dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+       dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
 
        ntb->ctx = NULL;
        ntb->ctx_ops = NULL;
index efe3ad4122f2ee1094da78c1bb31d86642b5b3a6..1c6386d5f79c742737e4ee1a8a2b99df686ffaa0 100644 (file)
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
 
        void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
                           void *data, int len);
+       struct list_head rx_post_q;
        struct list_head rx_pend_q;
        struct list_head rx_free_q;
-       spinlock_t ntb_rx_pend_q_lock;
-       spinlock_t ntb_rx_free_q_lock;
+       /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+       spinlock_t ntb_rx_q_lock;
        void *rx_buff;
        unsigned int rx_index;
        unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
        bool link_is_up;
        struct delayed_work link_work;
        struct work_struct link_cleanup;
+
+       struct dentry *debugfs_node_dir;
 };
 
 enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
        char *buf;
        ssize_t ret, out_offset, out_count;
 
+       qp = filp->private_data;
+
+       if (!qp || !qp->link_is_up)
+               return 0;
+
        out_count = 1000;
 
        buf = kmalloc(out_count, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       qp = filp->private_data;
        out_offset = 0;
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
        return entry;
 }
 
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+                                          struct list_head *list,
+                                          struct list_head *to_list)
+{
+       struct ntb_queue_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+
+       if (list_empty(list)) {
+               entry = NULL;
+       } else {
+               entry = list_first_entry(list, struct ntb_queue_entry, entry);
+               list_move_tail(&entry->entry, to_list);
+       }
+
+       spin_unlock_irqrestore(lock, flags);
+
+       return entry;
+}
+
 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
                                     unsigned int qp_num)
 {
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
 }
 
 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
-                     unsigned int size)
+                     resource_size_t size)
 {
        struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
        struct pci_dev *pdev = nt->ndev->pdev;
-       unsigned int xlat_size, buff_size;
+       size_t xlat_size, buff_size;
        int rc;
 
+       if (!size)
+               return -EINVAL;
+
        xlat_size = round_up(size, mw->xlat_align_size);
        buff_size = round_up(size, mw->xlat_align);
 
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
        if (!mw->virt_addr) {
                mw->xlat_size = 0;
                mw->buff_size = 0;
-               dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+               dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
                        buff_size);
                return -ENOMEM;
        }
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
 
                if (qp->event_handler)
                        qp->event_handler(qp->cb_data, qp->link_is_up);
+
+               tasklet_schedule(&qp->rxc_db_work);
        } else if (nt->link_is_up)
                schedule_delayed_work(&qp->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        qp->tx_max_frame = min(transport_mtu, tx_size / 2);
        qp->tx_max_entry = tx_size / qp->tx_max_frame;
 
-       if (nt_debugfs_dir) {
+       if (nt->debugfs_node_dir) {
                char debugfs_name[4];
 
                snprintf(debugfs_name, 4, "qp%d", qp_num);
                qp->debugfs_dir = debugfs_create_dir(debugfs_name,
-                                                    nt_debugfs_dir);
+                                                    nt->debugfs_node_dir);
 
                qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
                                                        qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
        INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
 
-       spin_lock_init(&qp->ntb_rx_pend_q_lock);
-       spin_lock_init(&qp->ntb_rx_free_q_lock);
+       spin_lock_init(&qp->ntb_rx_q_lock);
        spin_lock_init(&qp->ntb_tx_free_q_lock);
 
+       INIT_LIST_HEAD(&qp->rx_post_q);
        INIT_LIST_HEAD(&qp->rx_pend_q);
        INIT_LIST_HEAD(&qp->rx_free_q);
        INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
                goto err2;
        }
 
+       if (nt_debugfs_dir) {
+               nt->debugfs_node_dir =
+                       debugfs_create_dir(pci_name(ndev->pdev),
+                                          nt_debugfs_dir);
+       }
+
        for (i = 0; i < qp_count; i++) {
                rc = ntb_transport_init_queue(nt, i);
                if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
        kfree(nt);
 }
 
-static void ntb_rx_copy_callback(void *data)
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
 {
-       struct ntb_queue_entry *entry = data;
-       struct ntb_transport_qp *qp = entry->qp;
-       void *cb_data = entry->cb_data;
-       unsigned int len = entry->len;
-       struct ntb_payload_header *hdr = entry->rx_hdr;
+       struct ntb_queue_entry *entry;
+       void *cb_data;
+       unsigned int len;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+       while (!list_empty(&qp->rx_post_q)) {
+               entry = list_first_entry(&qp->rx_post_q,
+                                        struct ntb_queue_entry, entry);
+               if (!(entry->flags & DESC_DONE_FLAG))
+                       break;
+
+               entry->rx_hdr->flags = 0;
+               iowrite32(entry->index, &qp->rx_info->entry);
 
-       hdr->flags = 0;
+               cb_data = entry->cb_data;
+               len = entry->len;
 
-       iowrite32(entry->index, &qp->rx_info->entry);
+               list_move_tail(&entry->entry, &qp->rx_free_q);
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+               spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
 
-       if (qp->rx_handler && qp->client_ready)
-               qp->rx_handler(qp, qp->cb_data, cb_data, len);
+               if (qp->rx_handler && qp->client_ready)
+                       qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+               spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+       }
+
+       spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
+static void ntb_rx_copy_callback(void *data)
+{
+       struct ntb_queue_entry *entry = data;
+
+       entry->flags |= DESC_DONE_FLAG;
+
+       ntb_complete_rxc(entry->qp);
 }
 
 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
        ntb_rx_copy_callback(entry);
 }
 
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
-                        size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
 {
        struct dma_async_tx_descriptor *txd;
        struct ntb_transport_qp *qp = entry->qp;
        struct dma_chan *chan = qp->dma_chan;
        struct dma_device *device;
-       size_t pay_off, buff_off;
+       size_t pay_off, buff_off, len;
        struct dmaengine_unmap_data *unmap;
        dma_cookie_t cookie;
        void *buf = entry->buf;
 
-       entry->len = len;
+       len = entry->len;
 
        if (!chan)
                goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
        struct ntb_payload_header *hdr;
        struct ntb_queue_entry *entry;
        void *offset;
-       int rc;
 
        offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
        hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
                return -EIO;
        }
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
        if (!entry) {
                dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
                qp->rx_err_no_buf++;
-
-               rc = -ENOMEM;
-               goto err;
+               return -EAGAIN;
        }
 
+       entry->rx_hdr = hdr;
+       entry->index = qp->rx_index;
+
        if (hdr->len > entry->len) {
                dev_dbg(&qp->ndev->pdev->dev,
                        "receive buffer overflow! Wanted %d got %d\n",
                        hdr->len, entry->len);
                qp->rx_err_oflow++;
 
-               rc = -EIO;
-               goto err;
-       }
+               entry->len = -EIO;
+               entry->flags |= DESC_DONE_FLAG;
 
-       dev_dbg(&qp->ndev->pdev->dev,
-               "RX OK index %u ver %u size %d into buf size %d\n",
-               qp->rx_index, hdr->ver, hdr->len, entry->len);
+               ntb_complete_rxc(qp);
+       } else {
+               dev_dbg(&qp->ndev->pdev->dev,
+                       "RX OK index %u ver %u size %d into buf size %d\n",
+                       qp->rx_index, hdr->ver, hdr->len, entry->len);
 
-       qp->rx_bytes += hdr->len;
-       qp->rx_pkts++;
+               qp->rx_bytes += hdr->len;
+               qp->rx_pkts++;
 
-       entry->index = qp->rx_index;
-       entry->rx_hdr = hdr;
+               entry->len = hdr->len;
 
-       ntb_async_rx(entry, offset, hdr->len);
+               ntb_async_rx(entry, offset);
+       }
 
        qp->rx_index++;
        qp->rx_index %= qp->rx_max_entry;
 
        return 0;
-
-err:
-       /* FIXME: if this syncrhonous update of the rx_index gets ahead of
-        * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
-        * scenarios:
-        *
-        * 1) The peer might miss this update, but observe the update
-        * from the memcpy completion callback.  In this case, the buffer will
-        * not be freed on the peer to be reused for a different packet.  The
-        * successful rx of a later packet would clear the condition, but the
-        * condition could persist if several rx fail in a row.
-        *
-        * 2) The peer may observe this update before the asyncrhonous copy of
-        * prior packets is completed.  The peer may overwrite the buffers of
-        * the prior packets before they are copied.
-        *
-        * 3) Both: the peer may observe the update, and then observe the index
-        * decrement by the asynchronous completion callback.  Who knows what
-        * badness that will cause.
-        */
-       hdr->flags = 0;
-       iowrite32(qp->rx_index, &qp->rx_info->entry);
-
-       return rc;
 }
 
 static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
                        break;
        }
 
-       if (qp->dma_chan)
+       if (i && qp->dma_chan)
                dma_async_issue_pending(qp->dma_chan);
 
        if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
                        goto err1;
 
                entry->qp = qp;
-               ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+               ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
                             &qp->rx_free_q);
        }
 
@@ -1634,7 +1674,7 @@ err2:
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 err1:
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
        if (qp->dma_chan)
                dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  */
 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 {
-       struct ntb_transport_ctx *nt = qp->transport;
        struct pci_dev *pdev;
        struct ntb_queue_entry *entry;
        u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
        qp->tx_handler = NULL;
        qp->event_handler = NULL;
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
-               dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+               kfree(entry);
+       }
+
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
                kfree(entry);
        }
 
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 
-       nt->qp_bitmap_free |= qp_bit;
+       qp->transport->qp_bitmap_free |= qp_bit;
 
        dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
 }
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
        if (!qp || qp->client_ready)
                return NULL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
        if (!entry)
                return NULL;
 
        buf = entry->cb_data;
        *len = entry->len;
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
 
        return buf;
 }
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
        if (!qp)
                return -EINVAL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
        if (!entry)
                return -ENOMEM;
 
        entry->cb_data = cb;
        entry->buf = data;
        entry->len = len;
+       entry->flags = 0;
+
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
 
-       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+       tasklet_schedule(&qp->rxc_db_work);
 
        return 0;
 }
index 73de4efcbe6edc85c8f3fb54e0c925a7ab30492b..944f50015ed07b41b2de0da464812910c411cc1a 100644 (file)
@@ -2,7 +2,7 @@
 # PCI configuration
 #
 config PCI_BUS_ADDR_T_64BIT
-       def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+       def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
        depends on PCI
 
 config PCI_MSI
index cefd636681b6418ce75376879dc26fe3891bc47d..b978bbfe044c163be9cf1a0a4f35bb1edbbab783 100644 (file)
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
        else if (type == PCI_EXP_TYPE_UPSTREAM ||
                 type == PCI_EXP_TYPE_DOWNSTREAM) {
                parent = pci_upstream_bridge(pdev);
-               if (!parent->has_secondary_link)
+
+               /*
+                * Usually there's an upstream device (Root Port or Switch
+                * Downstream Port), but we can't assume one exists.
+                */
+               if (parent && !parent->has_secondary_link)
                        pdev->has_secondary_link = 1;
        }
 }
index cb13299195271ffed4854ed6b0b593c5e8487019..3271cd1abe7c0e6c5d2e813171aa5df494f9d977 100644 (file)
@@ -4,7 +4,6 @@
 
 menuconfig CHROME_PLATFORMS
        bool "Platform support for Chrome hardware"
-       depends on X86 || ARM
        ---help---
          Say Y here to get to see options for platform support for
          various Chromebooks and Chromeboxes. This option alone does
index 26270c351624f229cf85b6069ffe53ee26d50bef..ce129e595b55b663a11600e46571a1dba0d2ccf7 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.6.0.17"
+#define DRV_VERSION            "1.6.0.17a"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index 155b286f1a9d3cc8b366a6a5b7610ae562337f62..25436cd2860cc18a63ac599ed58e3c871c264d8e 100644 (file)
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        unsigned long ptr;
        struct fc_rport_priv *rdata;
        spinlock_t *io_lock = NULL;
+       int io_lock_acquired = 0;
 
        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        spin_lock_irqsave(io_lock, flags);
 
        /* initialize rest of io_req */
+       io_lock_acquired = 1;
        io_req->port_id = rport->port_id;
        io_req->start_time = jiffies;
        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 
        /* if only we issued IO, will we have the io lock */
-       if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+       if (io_lock_acquired)
                spin_unlock_irqrestore(io_lock, flags);
 
        atomic_dec(&fnic->in_flight);
index 1b3a094734522803c7f3fecd39da5c297a1feb43..30f9ef0c0d4f8cea52b182f370a04ed1ba4a9908 100644 (file)
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
        if (resp) {
                resp(sp, fp, arg);
                res = true;
-       } else if (!IS_ERR(fp)) {
-               fc_frame_free(fp);
        }
 
        spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
         * If new exch resp handler is valid then call that
         * first.
         */
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
 
        fc_exch_release(ep);
        return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
        fc_exch_hold(ep);
        if (!rc)
                fc_exch_delete(ep);
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
        if (has_rec)
                fc_exch_timer_set(ep, ep->r_a_tov);
        fc_exch_release(ep);
index c6795941b45d98579cd6117cc5b72f8c5c4d0bf9..2d5909c4685ca63375f5041d960effada171f5fc 100644 (file)
@@ -1039,11 +1039,26 @@ restart:
                fc_fcp_pkt_hold(fsp);
                spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
-               if (!fc_fcp_lock_pkt(fsp)) {
+               spin_lock_bh(&fsp->scsi_pkt_lock);
+               if (!(fsp->state & FC_SRB_COMPL)) {
+                       fsp->state |= FC_SRB_COMPL;
+                       /*
+                        * TODO: dropping scsi_pkt_lock and then reacquiring
+                        * again around fc_fcp_cleanup_cmd() is required,
+                        * since fc_fcp_cleanup_cmd() calls into
+                        * fc_seq_set_resp() and that func preempts cpu using
+                        * schedule. May be schedule and related code should be
+                        * removed instead of unlocking here to avoid scheduling
+                        * while atomic bug.
+                        */
+                       spin_unlock_bh(&fsp->scsi_pkt_lock);
+
                        fc_fcp_cleanup_cmd(fsp, error);
+
+                       spin_lock_bh(&fsp->scsi_pkt_lock);
                        fc_io_compl(fsp);
-                       fc_fcp_unlock_pkt(fsp);
                }
+               spin_unlock_bh(&fsp->scsi_pkt_lock);
 
                fc_fcp_pkt_release(fsp);
                spin_lock_irqsave(&si->scsi_queue_lock, flags);
index 8053f24f03499335112721cd50c2816da40393a6..98d9bb6ff725ff46621a408bdf1f208175e21daf 100644 (file)
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       unsigned long flags;
 
        del_timer_sync(&conn->transport_timer);
 
+       mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->frwd_lock);
        conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
        if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        }
        spin_unlock_bh(&session->frwd_lock);
 
-       /*
-        * Block until all in-progress commands for this connection
-        * time out or fail.
-        */
-       for (;;) {
-               spin_lock_irqsave(session->host->host_lock, flags);
-               if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
-                       spin_unlock_irqrestore(session->host->host_lock, flags);
-                       break;
-               }
-               spin_unlock_irqrestore(session->host->host_lock, flags);
-               msleep_interruptible(500);
-               iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
-                                 "host_busy %d host_failed %d\n",
-                                 atomic_read(&session->host->host_busy),
-                                 session->host->host_failed);
-               /*
-                * force eh_abort() to unblock
-                */
-               wake_up(&conn->ehwait);
-       }
-
        /* flush queued up work because we free the connection below */
        iscsi_suspend_tx(conn);
 
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        if (session->leadconn == conn)
                session->leadconn = NULL;
        spin_unlock_bh(&session->frwd_lock);
+       mutex_unlock(&session->eh_mutex);
 
        iscsi_destroy_conn(cls_conn);
 }
index cfadccef045c5f91d9efb5f575c98d1d9c68b090..6457a8a0db9c37ed8892c28effe768a533f2e7f3 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
-#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
        }
 }
 EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- *             formatted sense data buffer
- * @buf:       Where to build sense data
- * @info:      64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
-       if ((buf[0] & 0x7f) == 0x72) {
-               u8 *ucp, len;
-
-               len = buf[7];
-               ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
-               if (!ucp) {
-                       buf[7] = len + 0xa;
-                       ucp = buf + 8 + len;
-               }
-               ucp[0] = 0;
-               ucp[1] = 0xa;
-               ucp[2] = 0x80; /* Valid bit */
-               ucp[3] = 0;
-               put_unaligned_be64(info, &ucp[4]);
-       } else if ((buf[0] & 0x7f) == 0x70) {
-               buf[0] |= 0x80;
-               put_unaligned_be64(info, &buf[3]);
-       }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
index 9e43ae1d2163dacaae769a816dc6973cfabbdca0..e4b7998379485454d26e39f6a5a91917a155ffbb 100644 (file)
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
 {
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        struct scsi_device *sdev = to_scsi_device(dev);
-       int err;
+       int err = 0;
 
-       err = blk_pre_runtime_suspend(sdev->request_queue);
-       if (err)
-               return err;
-       if (pm && pm->runtime_suspend)
+       if (pm && pm->runtime_suspend) {
+               err = blk_pre_runtime_suspend(sdev->request_queue);
+               if (err)
+                       return err;
                err = pm->runtime_suspend(dev);
-       blk_post_runtime_suspend(sdev->request_queue, err);
-
+               blk_post_runtime_suspend(sdev->request_queue, err);
+       }
        return err;
 }
 
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        int err = 0;
 
-       blk_pre_runtime_resume(sdev->request_queue);
-       if (pm && pm->runtime_resume)
+       if (pm && pm->runtime_resume) {
+               blk_pre_runtime_resume(sdev->request_queue);
                err = pm->runtime_resume(dev);
-       blk_post_runtime_resume(sdev->request_queue, err);
-
+               blk_post_runtime_resume(sdev->request_queue, err);
+       }
        return err;
 }
 
index 3b2fcb4fada0491c4500b42555fdef542b4399d2..a20da8c25b4f960224fb4d772aafea38c57e1656 100644 (file)
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
        max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               max_xfer);
-       blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+       sdkp->disk->queue->limits.max_sectors =
+               min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
        kfree(buffer);
index cd77a064c772f1bbe897482d2372fe5bc6434328..fd092909a4577a7c4a708516bf3344fee331ad84 100644 (file)
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
-       if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+       if (hdr->flags & ISCSI_FLAG_CMD_READ)
                cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
-       } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+       else
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
index c2e9fea90b4a4bc16a0384d79fa9684c9f4176e0..860e840461778271191ba4ee8f1a4011dba5e78c 100644 (file)
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
                if (!strcmp(t->tf_ops->name, fo->name)) {
                        BUG_ON(atomic_read(&t->tf_access_cnt));
                        list_del(&t->tf_list);
+                       mutex_unlock(&g_tf_lock);
+                       /*
+                        * Wait for any outstanding fabric se_deve_entry->rcu_head
+                        * callbacks to complete post kfree_rcu(), before allowing
+                        * fabric driver unload of TFO->module to proceed.
+                        */
+                       rcu_barrier();
                        kfree(t);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&g_tf_lock);
index 62ea4e8e70a8935398f2a0e86fc44627dfa3368e..be9cefc07407e80ef5dd7dfcbd8a0d025faf97f6 100644 (file)
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
        list_for_each_entry(tb, &backend_list, list) {
                if (tb->ops == ops) {
                        list_del(&tb->list);
+                       mutex_unlock(&backend_mutex);
+                       /*
+                        * Wait for any outstanding backend driver ->rcu_head
+                        * callbacks to complete post TBO->free_device() ->
+                        * call_rcu(), before allowing backend driver module
+                        * unload of target_backend_ops->owner to proceed.
+                        */
+                       rcu_barrier();
                        kfree(tb);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&backend_mutex);
index b5ba1ec3c35476361103d7dca47a1934cdd3289f..f87d4cef6d398c072e953e7eaa6b5d9d5b469d70 100644 (file)
@@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
        struct se_dev_entry *deve;
        struct se_session *sess = cmd->se_sess;
        struct se_node_acl *nacl;
+       struct scsi_lun slun;
        unsigned char *buf;
        u32 lun_count = 0, offset = 8;
-
-       if (cmd->data_length < 16) {
-               pr_warn("REPORT LUNS allocation length %u too small\n",
-                       cmd->data_length);
-               return TCM_INVALID_CDB_FIELD;
-       }
+       __be32 len;
 
        buf = transport_kmap_data_sg(cmd);
-       if (!buf)
+       if (cmd->data_length && !buf)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        /*
@@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * coming via a target_core_mod PASSTHROUGH op, and not through
         * a $FABRIC_MOD.  In that case, report LUN=0 only.
         */
-       if (!sess) {
-               int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
-               lun_count = 1;
+       if (!sess)
                goto done;
-       }
+
        nacl = sess->se_node_acl;
 
        rcu_read_lock();
@@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
                 * See SPC2-R20 7.19.
                 */
                lun_count++;
-               if ((offset + 8) > cmd->data_length)
+               if (offset >= cmd->data_length)
                        continue;
 
-               int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+               int_to_scsilun(deve->mapped_lun, &slun);
+               memcpy(buf + offset, &slun,
+                      min(8u, cmd->data_length - offset));
                offset += 8;
        }
        rcu_read_unlock();
@@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * See SPC3 r07, page 159.
         */
 done:
-       lun_count *= 8;
-       buf[0] = ((lun_count >> 24) & 0xff);
-       buf[1] = ((lun_count >> 16) & 0xff);
-       buf[2] = ((lun_count >> 8) & 0xff);
-       buf[3] = (lun_count & 0xff);
-       transport_kunmap_data_sg(cmd);
+       /*
+        * If no LUNs are accessible, report virtual LUN 0.
+        */
+       if (lun_count == 0) {
+               int_to_scsilun(0, &slun);
+               if (cmd->data_length > 8)
+                       memcpy(buf + offset, &slun,
+                              min(8u, cmd->data_length - offset));
+               lun_count = 1;
+       }
+
+       if (buf) {
+               len = cpu_to_be32(lun_count * 8);
+               memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
        return 0;
index 6509c61b96484993333a4198138945f43154fdfd..620dcd405ff6eec9ae65b0af8e93c25daae15d1f 100644 (file)
@@ -68,7 +68,7 @@ struct power_table {
  *     registered cooling device.
  * @cpufreq_state: integer value representing the current state of cpufreq
  *     cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
  *     frequency.
  * @max_level: maximum cooling level. One less than total number of valid
  *     cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
        int id;
        struct thermal_cooling_device *cool_dev;
        unsigned int cpufreq_state;
-       unsigned int cpufreq_val;
+       unsigned int clipped_freq;
        unsigned int max_level;
        unsigned int *freq_table;       /* In descending order */
        struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_dev_list);
 
 /**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
 {
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       mutex_lock(&cooling_cpufreq_lock);
+       mutex_lock(&cooling_list_lock);
        list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
                if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
-                       mutex_unlock(&cooling_cpufreq_lock);
+                       mutex_unlock(&cooling_list_lock);
                        return get_level(cpufreq_dev, freq);
                }
        }
-       mutex_unlock(&cooling_cpufreq_lock);
+       mutex_unlock(&cooling_list_lock);
 
        pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
        return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
                                    unsigned long event, void *data)
 {
        struct cpufreq_policy *policy = data;
-       unsigned long max_freq = 0;
+       unsigned long clipped_freq;
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       switch (event) {
+       if (event != CPUFREQ_ADJUST)
+               return NOTIFY_DONE;
 
-       case CPUFREQ_ADJUST:
-               mutex_lock(&cooling_cpufreq_lock);
-               list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
-                       if (!cpumask_test_cpu(policy->cpu,
-                                             &cpufreq_dev->allowed_cpus))
-                               continue;
+       mutex_lock(&cooling_list_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+                       continue;
 
-                       max_freq = cpufreq_dev->cpufreq_val;
+               /*
+                * policy->max is the maximum allowed frequency defined by user
+                * and clipped_freq is the maximum that thermal constraints
+                * allow.
+                *
+                * If clipped_freq is lower than policy->max, then we need to
+                * readjust policy->max.
+                *
+                * But, if clipped_freq is greater than policy->max, we don't
+                * need to do anything.
+                */
+               clipped_freq = cpufreq_dev->clipped_freq;
 
-                       if (policy->max != max_freq)
-                               cpufreq_verify_within_limits(policy, 0,
-                                                            max_freq);
-               }
-               mutex_unlock(&cooling_cpufreq_lock);
+               if (policy->max > clipped_freq)
+                       cpufreq_verify_within_limits(policy, 0, clipped_freq);
                break;
-       default:
-               return NOTIFY_DONE;
        }
+       mutex_unlock(&cooling_list_lock);
 
        return NOTIFY_OK;
 }
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 
        clip_freq = cpufreq_device->freq_table[state];
        cpufreq_device->cpufreq_state = state;
-       cpufreq_device->cpufreq_val = clip_freq;
+       cpufreq_device->clipped_freq = clip_freq;
 
        cpufreq_update_policy(cpu);
 
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
                        pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
 
-       cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+       cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
 
        mutex_lock(&cooling_cpufreq_lock);
 
+       mutex_lock(&cooling_list_lock);
+       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+       mutex_unlock(&cooling_list_lock);
+
        /* Register the notifier for first cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       if (!cpufreq_dev_count++)
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
-       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
        mutex_unlock(&cooling_cpufreq_lock);
 
        return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
                return;
 
        cpufreq_dev = cdev->devdata;
-       mutex_lock(&cooling_cpufreq_lock);
-       list_del(&cpufreq_dev->node);
 
        /* Unregister the notifier for the last cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       mutex_lock(&cooling_cpufreq_lock);
+       if (!--cpufreq_dev_count)
                cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
                                            CPUFREQ_POLICY_NOTIFIER);
+
+       mutex_lock(&cooling_list_lock);
+       list_del(&cpufreq_dev->node);
+       mutex_unlock(&cooling_list_lock);
+
        mutex_unlock(&cooling_cpufreq_lock);
 
        thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
index 63a448f9d93b66eedec7fde829983588fe6251f0..7006860f2f3693b04ee44996c5085e2f94ceae44 100644 (file)
@@ -334,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
                                      max_allocatable_power, current_temp,
                                      (s32)control_temp - (s32)current_temp);
 
-       devm_kfree(&tz->device, req_power);
+       kfree(req_power);
 unlock:
        mutex_unlock(&tz->lock);
 
@@ -426,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
                return -EINVAL;
        }
 
-       params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
                return -ENOMEM;
 
@@ -468,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
        return 0;
 
 free:
-       devm_kfree(&tz->device, params);
+       kfree(params);
        return ret;
 }
 
 static void power_allocator_unbind(struct thermal_zone_device *tz)
 {
        dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
-       devm_kfree(&tz->device, tz->governor_data);
+       kfree(tz->governor_data);
        tz->governor_data = NULL;
 }
 
index 658c34bb9076f813058dfb03a47907ca48c6eb0a..1aaf89300621abc811f57f549c25b9a540a21d99 100644 (file)
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
        int y;
        int c = scr_readw((u16 *) vc->vc_pos);
 
+       ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
        if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
                return;
 
-       ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
        if (vc->vc_cursor_type & 0x10)
                fbcon_del_cursor_timer(info);
        else
index 2d98de535e0f7374804474c58de752ffb2848aa4..f888561568d91735a3a765a772630e8b2be0a54a 100644 (file)
@@ -298,7 +298,7 @@ config FB_ARMCLCD
 
 # Helper logic selected only by the ARM Versatile platform family.
 config PLAT_VERSATILE_CLCD
-       def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+       def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
        depends on ARM
        depends on FB_ARMCLCD && FB=y
 
index 928ee639c0c19ba3a2346737c41b44f38e82c0b5..bf407b6ba15ca0002166a0704124eeda0fb2052e 100644 (file)
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
                        }
                        prev = port;
                } while (of_node_cmp(port->name, "port") != 0);
+
+               of_node_put(ports);
        }
 
        return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
        if (!port)
                return NULL;
 
-       np = of_get_next_parent(port);
+       np = of_get_parent(port);
 
        for (i = 0; i < 2 && np; ++i) {
                struct property *prop;
index 86bd457d039d2ad9a85e82d3f26a3630e3134da7..50bce45e7f3d47d78163058eff366b32d4f56180 100644 (file)
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
                goto err_free_dma;
        }
 
-       ret = clk_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable clock\n");
                goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
        misc_deregister(&priv->misc_dev);
 
 err_disable_clk:
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return ret;
 }
index 111c2d1911d32ea38e86b11c0af753133ccfab05..b5102aa6090d111e25727f78422c8cbc183f086a 100644 (file)
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
                index = disp->native_mode;
 
        ret = videomode_from_timings(disp, vm, index);
-       if (ret)
-               return ret;
 
        display_timings_release(disp);
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(of_get_videomode);
index 1495eccb161762f482db77121f5102eb1caf5ff9..96093ae369a5613938962b4970decda66c19d342 100644 (file)
@@ -452,12 +452,10 @@ static void xen_free_irq(unsigned irq)
        irq_free_desc(irq);
 }
 
-static void xen_evtchn_close(unsigned int port, unsigned int cpu)
+static void xen_evtchn_close(unsigned int port)
 {
        struct evtchn_close close;
 
-       xen_evtchn_op_close(port, cpu);
-
        close.port = port;
        if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
                BUG();
@@ -546,7 +544,7 @@ out:
 
 err:
        pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
-       xen_evtchn_close(evtchn, NR_CPUS);
+       xen_evtchn_close(evtchn);
        return 0;
 }
 
@@ -567,7 +565,7 @@ static void shutdown_pirq(struct irq_data *data)
                return;
 
        mask_evtchn(evtchn);
-       xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn));
+       xen_evtchn_close(evtchn);
        xen_irq_info_cleanup(info);
 }
 
@@ -611,7 +609,7 @@ static void __unbind_from_irq(unsigned int irq)
        if (VALID_EVTCHN(evtchn)) {
                unsigned int cpu = cpu_from_irq(irq);
 
-               xen_evtchn_close(evtchn, cpu);
+               xen_evtchn_close(evtchn);
 
                switch (type_from_irq(irq)) {
                case IRQT_VIRQ:
index 6df8aac966b909b2fd5db9d9e4950c448f1621c9..ed673e1acd6159a3ca34dc10238fef8936e43249 100644 (file)
@@ -255,12 +255,6 @@ static void evtchn_fifo_unmask(unsigned port)
        }
 }
 
-static bool evtchn_fifo_is_linked(unsigned port)
-{
-       event_word_t *word = event_word_from_port(port);
-       return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word));
-}
-
 static uint32_t clear_linked(volatile event_word_t *word)
 {
        event_word_t new, old, w;
@@ -287,8 +281,7 @@ static void handle_irq_for_port(unsigned port)
 
 static void consume_one_event(unsigned cpu,
                              struct evtchn_fifo_control_block *control_block,
-                             unsigned priority, unsigned long *ready,
-                             bool drop)
+                             unsigned priority, unsigned long *ready)
 {
        struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
        uint32_t head;
@@ -320,15 +313,13 @@ static void consume_one_event(unsigned cpu,
        if (head == 0)
                clear_bit(priority, ready);
 
-       if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
-               if (likely(!drop))
-                       handle_irq_for_port(port);
-       }
+       if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
+               handle_irq_for_port(port);
 
        q->head[priority] = head;
 }
 
-static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
+static void evtchn_fifo_handle_events(unsigned cpu)
 {
        struct evtchn_fifo_control_block *control_block;
        unsigned long ready;
@@ -340,16 +331,11 @@ static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
 
        while (ready) {
                q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
-               consume_one_event(cpu, control_block, q, &ready, drop);
+               consume_one_event(cpu, control_block, q, &ready);
                ready |= xchg(&control_block->ready, 0);
        }
 }
 
-static void evtchn_fifo_handle_events(unsigned cpu)
-{
-       __evtchn_fifo_handle_events(cpu, false);
-}
-
 static void evtchn_fifo_resume(void)
 {
        unsigned cpu;
@@ -385,26 +371,6 @@ static void evtchn_fifo_resume(void)
        event_array_pages = 0;
 }
 
-static void evtchn_fifo_close(unsigned port, unsigned int cpu)
-{
-       if (cpu == NR_CPUS)
-               return;
-
-       get_online_cpus();
-       if (cpu_online(cpu)) {
-               if (WARN_ON(irqs_disabled()))
-                       goto out;
-
-               while (evtchn_fifo_is_linked(port))
-                       cpu_relax();
-       } else {
-               __evtchn_fifo_handle_events(cpu, true);
-       }
-
-out:
-       put_online_cpus();
-}
-
 static const struct evtchn_ops evtchn_ops_fifo = {
        .max_channels      = evtchn_fifo_max_channels,
        .nr_channels       = evtchn_fifo_nr_channels,
@@ -418,7 +384,6 @@ static const struct evtchn_ops evtchn_ops_fifo = {
        .unmask            = evtchn_fifo_unmask,
        .handle_events     = evtchn_fifo_handle_events,
        .resume            = evtchn_fifo_resume,
-       .close             = evtchn_fifo_close,
 };
 
 static int evtchn_fifo_alloc_control_block(unsigned cpu)
index d18e12315ec0832899fea654dee742298842356a..50c2050a1e32026901217a10029b4b344054e389 100644 (file)
@@ -68,7 +68,6 @@ struct evtchn_ops {
        bool (*test_and_set_mask)(unsigned port);
        void (*mask)(unsigned port);
        void (*unmask)(unsigned port);
-       void (*close)(unsigned port, unsigned cpu);
 
        void (*handle_events)(unsigned cpu);
        void (*resume)(void);
@@ -146,12 +145,6 @@ static inline void xen_evtchn_resume(void)
                evtchn_ops->resume();
 }
 
-static inline void xen_evtchn_op_close(unsigned port, unsigned cpu)
-{
-       if (evtchn_ops->close)
-               return evtchn_ops->close(port, cpu);
-}
-
 void xen_evtchn_2l_init(void);
 int xen_evtchn_fifo_init(void);
 
index 9ad327238ba931243967455b5790916dc6b184f1..e30353575d5da11f75e8c927ba53945a23b73d76 100644 (file)
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
 
        rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
                               addrs);
-       if (!rv)
+       if (!rv) {
                vunmap(vaddr);
+               free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+       }
        else
                WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
                     node->nr_handles);
index 80cc1b35d46043c16bc456e0cadf61e76c281d52..ebb5e37455a07acd86f5fbf1b76d474e99b937fb 100644 (file)
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
 
                        err = -EINVAL;
                        if (old) {
-                               struct fuse_dev *fud = fuse_get_dev(old);
+                               struct fuse_dev *fud = NULL;
+
+                               /*
+                                * Check against file->f_op because CUSE
+                                * uses the same ioctl handler.
+                                */
+                               if (old->f_op == file->f_op &&
+                                   old->f_cred->user_ns == file->f_cred->user_ns)
+                                       fud = fuse_get_dev(old);
 
                                if (fud) {
                                        mutex_lock(&fuse_mutex);
index fbbcf0993312eb9c278dc1343ea2db0fe4edf959..1c2105ed20c5ef4fb390878fb5442943ceec29ae 100644 (file)
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
                return 0;
 
        /* Allowed if parent directory not sticky and world-writable. */
-       parent = nd->path.dentry->d_inode;
+       parent = nd->inode;
        if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
                return 0;
 
index 57ca8cc383a615344498202384b1b814911bc766..3b4d8a4a23fb760867fc7d59ede2a3459eac2375 100644 (file)
@@ -743,8 +743,6 @@ struct drm_connector {
        uint8_t num_h_tile, num_v_tile;
        uint8_t tile_h_loc, tile_v_loc;
        uint16_t tile_h_size, tile_v_size;
-
-       struct list_head destroy_list;
 };
 
 /**
index 799050198323e852c7cbb45e3c1236cc67d386ac..53c53c459b15c8207997da61234d0ab9ea2805ae 100644 (file)
@@ -347,6 +347,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
        return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
 }
 
+/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
+{
+       unsigned int ver, mnl;
+
+       ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+       if (ver != 2 && ver != 31)
+               return NULL;
+
+       mnl = drm_eld_mnl(eld);
+       if (mnl > 16)
+               return NULL;
+
+       return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
 /**
  * drm_eld_sad_count - Get ELD SAD count.
  * @eld: pointer to an eld memory structure with sad_count set
index 45c39a37f9249562761dc9615ffecf12ec194846..8bc073d297db2a233cf389d6c0656dec78c0445a 100644 (file)
        {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 6c78956aa47092440edb3a73e7b9389ac3a57558..d2992bfa17063a052a08a106b9105284ce4bfa4a 100644 (file)
@@ -385,8 +385,6 @@ enum {
        SATA_SSP                = 0x06, /* Software Settings Preservation */
        SATA_DEVSLP             = 0x09, /* Device Sleep */
 
-       SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
        /* feature values for SET_MAX */
        ATA_SET_MAX_ADDR        = 0x00,
        ATA_SET_MAX_PASSWD      = 0x01,
@@ -530,8 +528,6 @@ struct ata_bmdma_prd {
 #define ata_id_cdb_intr(id)    (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
 #define ata_id_has_da(id)      ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
 #define ata_id_has_devslp(id)  ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
-                               ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
 
 static inline bool ata_id_has_hipm(const u16 *id)
 {
@@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
        return false;
 }
 
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
 /**
  *     ata_id_major_version    -       get ATA level of drive
  *     @id: Identify data
index d9a366d24e3bb8736cc7acdaa4f541c9720747a6..6240063bdcac4c42da3f108e9b81c5800085c29b 100644 (file)
@@ -344,7 +344,7 @@ struct intel_iommu {
 
 #ifdef CONFIG_INTEL_IOMMU
        unsigned long   *domain_ids; /* bitmap of domains */
-       struct dmar_domain **domains; /* ptr to domains */
+       struct dmar_domain ***domains; /* ptr to domains */
        spinlock_t      lock; /* protect context, domain ids */
        struct root_entry *root_entry; /* virtual address */
 
index 92188b0225bb31f33eba9deacc4b9b88036c1d8e..51744bcf74eec7bee678b9e1f63c3a10f247350e 100644 (file)
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
 extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
                                             void *vcpu_info);
+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
 #endif
 
 /* Handling of unhandled and spurious interrupts: */
index 2e872f92dbac0cecc2c5b3a65fbe7ff8678e48d5..bf6f117fcf4d80cb7de6147e86c6ba19fa13febd 100644 (file)
@@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
        return atomic_read(&(page)->_mapcount) >= 0;
 }
 
+/*
+ * Return true only if the page has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool page_is_pfmemalloc(struct page *page)
+{
+       /*
+        * Page index cannot be this large so this must be
+        * a pfmemalloc page.
+        */
+       return page->index == -1UL;
+}
+
+/*
+ * Only to be called by the page allocator on a freshly allocated
+ * page.
+ */
+static inline void set_page_pfmemalloc(struct page *page)
+{
+       page->index = -1UL;
+}
+
+static inline void clear_page_pfmemalloc(struct page *page)
+{
+       page->index = 0;
+}
+
 /*
  * Different kinds of faults, as returned by handle_mm_fault().
  * Used to decide whether a process gets delivered SIGBUS or
index 0038ac7466fd26e7562a026af68632573e3bb0ea..15549578d55998e5497c5da58a50fa1531e132d8 100644 (file)
@@ -63,15 +63,6 @@ struct page {
                union {
                        pgoff_t index;          /* Our offset within mapping. */
                        void *freelist;         /* sl[aou]b first free object */
-                       bool pfmemalloc;        /* If set by the page allocator,
-                                                * ALLOC_NO_WATERMARKS was set
-                                                * and the low watermark was not
-                                                * met implying that the system
-                                                * is under some pressure. The
-                                                * caller should try ensure
-                                                * this page is only used to
-                                                * free other pages.
-                                                */
                };
 
                union {
index d6cdd6e87d53bcd1b4f390f61f73b1c91b076bdd..9b88536487e667b8727414e4131188db869711d4 100644 (file)
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
        /*
-        * Propagate page->pfmemalloc to the skb if we can. The problem is
-        * that not all callers have unique ownership of the page. If
-        * pfmemalloc is set, we check the mapping as a mapping implies
-        * page->index is set (index and pfmemalloc share space).
-        * If it's a valid mapping, we cannot use page->pfmemalloc but we
-        * do not lose pfmemalloc information as the pages would not be
-        * allocated using __GFP_MEMALLOC.
+        * Propagate page pfmemalloc to the skb if we can. The problem is
+        * that not all callers have unique ownership of the page but rely
+        * on page_is_pfmemalloc doing the right thing(tm).
         */
        frag->page.p              = page;
        frag->page_offset         = off;
        skb_frag_size_set(frag, size);
 
        page = compound_head(page);
-       if (page->pfmemalloc && !page->mapping)
+       if (page_is_pfmemalloc(page))
                skb->pfmemalloc = true;
 }
 
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
 static inline void skb_propagate_pfmemalloc(struct page *page,
                                             struct sk_buff *skb)
 {
-       if (page && page->pfmemalloc)
+       if (page_is_pfmemalloc(page))
                skb->pfmemalloc = true;
 }
 
@@ -2884,11 +2880,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
  *
  * PHY drivers may accept clones of transmitted packets for
  * timestamping via their phy_driver.txtstamp method. These drivers
- * must call this function to return the skb back to the stack, with
- * or without a timestamp.
+ * must call this function to return the skb back to the stack with a
+ * timestamp.
  *
  * @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps, may be NULL if not available
+ * @hwtstamps: hardware time stamps
  *
  */
 void skb_complete_tx_timestamp(struct sk_buff *skb,
index 45534da57759a30d4a8c281bda094c8d4921dba3..644bdc61c387581c29a29d20c0cf1c3fc5085830 100644 (file)
@@ -74,8 +74,6 @@ enum rc_filter_type {
  * @input_dev: the input child device used to communicate events to userspace
  * @driver_type: specifies if protocol decoding is done in hardware or software
  * @idle: used to keep track of RX state
- * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
- *     wakeup protocols is the set of all raw encoders
  * @allowed_protocols: bitmask with the supported RC_BIT_* protocols
  * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
  * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@ struct rc_dev {
        struct input_dev                *input_dev;
        enum rc_driver_type             driver_type;
        bool                            idle;
-       bool                            encode_wakeup;
        u64                             allowed_protocols;
        u64                             enabled_protocols;
        u64                             allowed_wakeup_protocols;
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
 #define US_TO_NS(usec)         ((usec) * 1000)
 #define MS_TO_US(msec)         ((msec) * 1000)
 #define MS_TO_NS(msec)         ((msec) * 1000 * 1000)
-#define NS_TO_US(nsec)         DIV_ROUND_UP(nsec, 1000L)
 
 void ir_raw_event_handle(struct rc_dev *dev);
 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
 int ir_raw_event_store_with_filter(struct rc_dev *dev,
                                struct ir_raw_event *ev);
 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
-int ir_raw_encode_scancode(u64 protocols,
-                          const struct rc_scancode_filter *scancode,
-                          struct ir_raw_event *events, unsigned int max);
 
 static inline void ir_raw_event_reset(struct rc_dev *dev)
 {
index 22a44c2f596380856acb1bccf95f14d0c2144994..c192e1b46cdc27372dc56ba704aa630ff113dfd6 100644 (file)
@@ -139,6 +139,7 @@ enum vb2_io_modes {
  * @VB2_BUF_STATE_PREPARING:   buffer is being prepared in videobuf
  * @VB2_BUF_STATE_PREPARED:    buffer prepared in videobuf and by the driver
  * @VB2_BUF_STATE_QUEUED:      buffer queued in videobuf, but not in driver
+ * @VB2_BUF_STATE_REQUEUEING:  re-queue a buffer to the driver
  * @VB2_BUF_STATE_ACTIVE:      buffer queued in driver and possibly used
  *                             in a hardware operation
  * @VB2_BUF_STATE_DONE:                buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@ enum vb2_buffer_state {
        VB2_BUF_STATE_PREPARING,
        VB2_BUF_STATE_PREPARED,
        VB2_BUF_STATE_QUEUED,
+       VB2_BUF_STATE_REQUEUEING,
        VB2_BUF_STATE_ACTIVE,
        VB2_BUF_STATE_DONE,
        VB2_BUF_STATE_ERROR,
index 4942710ef720ea5716e8cc6ebf0df941e22500ba..8d1d7fa67ec48bad6872be07258066f9410eec6e 100644 (file)
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
                                   u64 * info_out);
 
 extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
 
 extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 
index 1ab2813273cd1cf7d0116ca3d64b08de18013304..8cb3a7ecd6f89a51871e830e6ca3d8e7dbc79587 100644 (file)
@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup {
        unsigned int reg;
 };
 
-struct tegra_smmu_ops {
-       void (*flush_dcache)(struct page *page, unsigned long offset,
-                            size_t size);
-};
-
 struct tegra_smmu_soc {
        const struct tegra_mc_client *clients;
        unsigned int num_clients;
@@ -66,9 +61,8 @@ struct tegra_smmu_soc {
        bool supports_round_robin_arbitration;
        bool supports_request_limit;
 
+       unsigned int num_tlb_lines;
        unsigned int num_asids;
-
-       const struct tegra_smmu_ops *ops;
 };
 
 struct tegra_mc;
index 865a141b118b15874e27b0e56473bac8854c4823..427bc41df3aef3a3f931bd94830f027bafea2661 100644 (file)
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
        int io_ops_count;
 };
 
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
 /* gets a pointer to data from the firmware block header */
 static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
 {
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
        const struct snd_soc_tplg_widget_events *events, int num_events,
        u16 event_type);
 
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+                                               u32 index)
+{
+       return 0;
+}
+
+#endif
+
 #endif
index 51b8066a223b504ebf14ec287da3e56109a0e674..247c50bd60f0d067ad8884dbe4574adf0bfcf596 100644 (file)
 #include <linux/types.h>
 #include <sound/asound.h>
 
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
 /*
  * Maximum number of channels topology kcontrol can represent.
  */
index bc3d530cb23efacb2e5695ad85a9bd3898524fa2..b471e5a3863ddbca70f2bf4dee22f40df0345fbe 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
        ipc_rcu_free(head);
 }
 
+/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked()      smp_rmb()
+
 /*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
+       ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                /* Then check that the global lock is free */
                if (!spin_is_locked(&sma->sem_perm.lock)) {
                        /*
-                        * The ipc object lock check must be visible on all
-                        * cores before rechecking the complex count.  Otherwise
-                        * we can race with  another thread that does:
+                        * We need a memory barrier with acquire semantics,
+                        * otherwise we can race with another thread that does:
                         *      complex_count++;
                         *      spin_unlock(sem_perm.lock);
                         */
-                       smp_rmb();
+                       ipc_smp_acquire__after_spin_is_unlocked();
 
                        /*
                         * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
                rcu_read_lock();
                un = list_entry_rcu(ulp->list_proc.next,
                                    struct sem_undo, list_proc);
-               if (&un->list_proc == &ulp->list_proc)
-                       semid = -1;
-                else
-                       semid = un->semid;
+               if (&un->list_proc == &ulp->list_proc) {
+                       /*
+                        * We must wait for freeary() before freeing this ulp,
+                        * in case we raced with last sem_undo. There is a small
+                        * possibility where we exit while freeary() didn't
+                        * finish unlocking sem_undo_list.
+                        */
+                       spin_unlock_wait(&ulp->lock);
+                       rcu_read_unlock();
+                       break;
+               }
+               spin_lock(&ulp->lock);
+               semid = un->semid;
+               spin_unlock(&ulp->lock);
 
+               /* exit_sem raced with IPC_RMID, nothing to do */
                if (semid == -1) {
                        rcu_read_unlock();
-                       break;
+                       continue;
                }
 
-               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
                /* exit_sem raced with IPC_RMID, nothing to do */
                if (IS_ERR(sma)) {
                        rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
                ipc_assert_locked_object(&sma->sem_perm);
                list_del(&un->list_id);
 
-               spin_lock(&ulp->lock);
+               /* we are the last process using this ulp, acquiring ulp->lock
+                * isn't required. Besides that, we are also protected against
+                * IPC_RMID as we hold sma->sem_perm lock now
+                */
                list_del_rcu(&un->list_proc);
-               spin_unlock(&ulp->lock);
 
                /* perform adjustments registered in un */
                for (i = 0; i < sma->sem_nsems; i++) {
index ee14e3a35a2994399edf176e7775e778c395e592..f0acff0f66c91380412dcbc1c899c94b1d3236b0 100644 (file)
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
        spin_unlock_irq(&callback_lock);
 
        /* use trialcs->mems_allowed as a temp variable */
-       update_nodemasks_hier(cs, &cs->mems_allowed);
+       update_nodemasks_hier(cs, &trialcs->mems_allowed);
 done:
        return retval;
 }
index d3dae3419b99566c127f1682b29f39bb184bbdb1..e6feb51141340a99a248fea0ad1dc17402b0dbdb 100644 (file)
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
 
        perf_pmu_disable(event->pmu);
 
-       event->tstamp_running += tstamp - event->tstamp_stopped;
-
        perf_set_shadow_time(event, ctx, tstamp);
 
        perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
                goto out;
        }
 
+       event->tstamp_running += tstamp - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
-       struct perf_event_context *ctx = event->ctx;
-       int ret = 0, active;
+struct period_event {
+       struct perf_event *event;
        u64 value;
+};
 
-       if (!is_sampling_event(event))
-               return -EINVAL;
-
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-
-       if (!value)
-               return -EINVAL;
+static int __perf_event_period(void *info)
+{
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       struct perf_event_context *ctx = event->ctx;
+       u64 value = pe->value;
+       bool active;
 
-       raw_spin_lock_irq(&ctx->lock);
+       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
-               if (value > sysctl_perf_event_sample_rate) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-
                event->attr.sample_freq = value;
        } else {
                event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
+       raw_spin_unlock(&ctx->lock);
 
-unlock:
+       return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+       struct period_event pe = { .event = event, };
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task;
+       u64 value;
+
+       if (!is_sampling_event(event))
+               return -EINVAL;
+
+       if (copy_from_user(&value, arg, sizeof(value)))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+               return -EINVAL;
+
+       task = ctx->task;
+       pe.value = value;
+
+       if (!task) {
+               cpu_function_call(event->cpu, __perf_event_period, &pe);
+               return 0;
+       }
+
+retry:
+       if (!task_function_call(task, __perf_event_period, &pe))
+               return 0;
+
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               task = ctx->task;
+               goto retry;
+       }
+
+       __perf_event_period(&pe);
        raw_spin_unlock_irq(&ctx->lock);
 
-       return ret;
+       return 0;
 }
 
 static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
  * to user-space before waking everybody up.
  */
 
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+       /* only the parent has fasync state */
+       if (event->parent)
+               event = event->parent;
+       return &event->fasync;
+}
+
 void perf_event_wakeup(struct perf_event *event)
 {
        ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
-               kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+               kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
                event->pending_kill = 0;
        }
 }
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
        else
                perf_event_output(event, data, regs);
 
-       if (event->fasync && event->pending_kill) {
+       if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
                irq_work_queue(&event->pending);
        }
index b2be01b1aa9dcb7a70792fa381c264b229a106d0..c8aa3f75bc4db8ad7a2242aae6406bfd6f86f8c5 100644 (file)
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
                rb->aux_priv = NULL;
        }
 
-       for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               rb_free_aux_page(rb, pg);
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
 
-       kfree(rb->aux_pages);
-       rb->aux_nr_pages = 0;
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
 }
 
 void rb_free_aux(struct ring_buffer *rb)
index 27f4332c7f84ea8b3d7ec8bb3466a64f225a8487..ae216824e8ca9224c4b76f225ef58d664a3e1726 100644 (file)
@@ -984,6 +984,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
        return -ENOSYS;
 }
 
+/**
+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
+ * @data:      Pointer to interrupt specific data
+ * @type:      IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
+ *
+ * Conditional, as the underlying parent chip might not implement it.
+ */
+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
+{
+       data = data->parent_data;
+
+       if (data->chip->irq_set_type)
+               return data->chip->irq_set_type(data, type);
+
+       return -ENOSYS;
+}
+
 /**
  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
  * @data:      Pointer to interrupt specific data
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
                if (data->chip && data->chip->irq_retrigger)
                        return data->chip->irq_retrigger(data);
 
-       return -ENOSYS;
+       return 0;
 }
 
 /**
index 04ab18151cc8fa174a5859124ee07c144f33d505..df19ae4debd09c134d438b57e4ead7c71462c2b6 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/hash.h>
 #include <linux/bootmem.h>
+#include <linux/debug_locks.h>
 
 /*
  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
        struct pv_node *node;
+       u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
 
        /*
         * We must not unlock if SLOW, because in that case we must first
         * unhash. Otherwise it would be possible to have multiple @lock
         * entries, which would be BAD.
         */
-       if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+       if (likely(lockval == _Q_LOCKED_VAL))
                return;
 
+       if (unlikely(lockval != _Q_SLOW_VAL)) {
+               if (debug_locks_silent)
+                       return;
+               WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+               return;
+       }
+
        /*
         * Since the above failed to release, this must be the SLOW path.
         * Therefore start by looking up the blocked node and unhashing it.
index 5e097fa9faf7016470b8283931023a15d20ed97d..84190f02b521c9fc77cee6e6a6722000939ee470 100644 (file)
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
                        spin_unlock(&base->lock);
                        base = new_base;
                        spin_lock(&base->lock);
-                       timer->flags &= ~TIMER_BASEMASK;
-                       timer->flags |= base->cpu;
+                       WRITE_ONCE(timer->flags,
+                                  (timer->flags & ~TIMER_BASEMASK) | base->cpu);
                }
        }
 
index 1132d733556dbc330d32eda5460f55e6e067b627..17c75a4246c8bbab8b56fe4d562cd85ea670a21f 100644 (file)
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
 extern struct cma cma_areas[MAX_CMA_AREAS];
 extern unsigned cma_area_count;
 
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
 {
        return cma->count >> cma->order_per_bit;
 }
index 6c513a63ea84c3c7ffd41201b7a419ff7b6dfd5d..7b28e9cdf1c7686428fe49802fced44088043555 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains shadow memory manipulation code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index 680ceedf810ab4c9cd08c9929f5d445de9f5aa6a..e07c94fbd0ac5a141ecf95ab7d39d046fea13e67 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains error reporting code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index ea5a936594887c43506fefe82a61a4ad6fcf4274..1f4446a90cef07c67ee1082b83f0ca87ebfefea1 100644 (file)
@@ -1146,8 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        }
 
        if (!PageHuge(p) && PageTransHuge(hpage)) {
-               if (unlikely(split_huge_page(hpage))) {
-                       pr_err("MCE: %#lx: thp split failed\n", pfn);
+               if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+                       if (!PageAnon(hpage))
+                               pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+                       else
+                               pr_err("MCE: %#lx: thp split failed\n", pfn);
                        if (TestClearPageHWPoison(p))
                                atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
@@ -1538,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
                 */
                ret = __get_any_page(page, pfn, 0);
                if (!PageLRU(page)) {
+                       /* Drop page reference which is from __get_any_page() */
+                       put_page(page);
                        pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
                                pfn, page->flags);
                        return -EIO;
@@ -1567,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
        unlock_page(hpage);
 
        ret = isolate_huge_page(hpage, &pagelist);
-       if (ret) {
-               /*
-                * get_any_page() and isolate_huge_page() takes a refcount each,
-                * so need to drop one here.
-                */
-               put_page(hpage);
-       } else {
+       /*
+        * get_any_page() and isolate_huge_page() takes a refcount each,
+        * so need to drop one here.
+        */
+       put_page(hpage);
+       if (!ret) {
                pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
                return -EBUSY;
        }
index 003dbe4b060d914dae4e93997c372ddd3e1a8e92..6da82bcb0a8b66b7326c1a021a7eac3b476cd85e 100644 (file)
@@ -1277,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
 
        /* create new memmap entry */
        firmware_map_add_hotplug(start, start + size, "System RAM");
+       memblock_add_node(start, size, nid);
 
        goto out;
 
@@ -2013,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
 
        /* remove memmap entry */
        firmware_map_remove(start, start + size, "System RAM");
+       memblock_free(start, size);
+       memblock_remove(start, size);
 
        arch_remove_memory(start, size);
 
index beda4171080232f37e779a3658045e1f813a1cff..5b5240b7f642de179efa3552245fbf9326d7a206 100644 (file)
@@ -1343,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
        set_page_owner(page, order, gfp_flags);
 
        /*
-        * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
+        * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
         * allocate the page. The expectation is that the caller is taking
         * steps that will free more memory. The caller should avoid the page
         * being used for !PFMEMALLOC purposes.
         */
-       page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+       if (alloc_flags & ALLOC_NO_WATERMARKS)
+               set_page_pfmemalloc(page);
+       else
+               clear_page_pfmemalloc(page);
 
        return 0;
 }
@@ -3345,7 +3348,7 @@ refill:
                atomic_add(size - 1, &page->_count);
 
                /* reset page count bias and offset to start of new frag */
-               nc->pfmemalloc = page->pfmemalloc;
+               nc->pfmemalloc = page_is_pfmemalloc(page);
                nc->pagecnt_bias = size;
                nc->offset = size;
        }
@@ -5060,6 +5063,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
 {
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        /* Get the start and end of the zone */
        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5123,6 +5130,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
 
index 200e22412a161fc7a2232bcb923f07bc117362b8..bbd0b47dc6a97eecea7650ce6b351e88d5a17295 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
        }
 
        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
-       if (unlikely(page->pfmemalloc))
+       if (page_is_pfmemalloc(page))
                pfmemalloc_active = true;
 
        nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
                add_zone_page_state(page_zone(page),
                        NR_SLAB_UNRECLAIMABLE, nr_pages);
        __SetPageSlab(page);
-       if (page->pfmemalloc)
+       if (page_is_pfmemalloc(page))
                SetPageSlabPfmemalloc(page);
 
        if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
index 816df0016555ad8a5cf03c8020e0b39b75b0a498..f68c0e50f3c083abe295a1dcd60668321a8f9232 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
        inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab_cache = s;
        __SetPageSlab(page);
-       if (page->pfmemalloc)
+       if (page_is_pfmemalloc(page))
                SetPageSlabPfmemalloc(page);
 
        start = page_address(page);
index 498454b3c06c3ddf0e8c3989e23bef9360587544..ea79ee9a73489f43a88e57c2eb529fc11d894e5f 100644 (file)
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
        struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
        int total = 0;
+       *err = 0;
 
        p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
                   fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
        struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
        int total = 0;
+       *err = 0;
 
        p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
                                fid->fid, (unsigned long long) offset,
index fb54e6aed096edd267fc211e4cd2a0139fe71f8a..6d0b471eede8639f55b33c5c4d434c8fddef6ee5 100644 (file)
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
+ *
+ * Returns true if the packet was snooped and consumed by DAT. False if the
+ * packet has to be delivered to the interface
  */
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        uint16_t type;
        __be32 ip_src, ip_dst;
        uint8_t *hw_src, *hw_dst;
-       bool ret = false;
+       bool dropped = false;
        unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        /* if this REPLY is directed to a client of mine, let's deliver the
         * packet to the interface
         */
-       ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
+       dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
+
+       /* if this REPLY is sent on behalf of a client of mine, let's drop the
+        * packet because the client will reply by itself
+        */
+       dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
 out:
-       if (ret)
+       if (dropped)
                kfree_skb(skb);
-       /* if ret == false -> packet has to be delivered to the interface */
-       return ret;
+       /* if dropped == false -> deliver to the interface */
+       return dropped;
 }
 
 /**
index bb01586206289929f8c5e43153b75f9c279b9f85..cffa92dd98778bf2ffdf11107243fcc0f60cab6b 100644 (file)
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
+       gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+       gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
        atomic_set(&gw_node->refcount, 1);
 
        spin_lock_bh(&bat_priv->gw.list_lock);
index c002961da75d655deb813990f5706cf37fbd6d7d..a2fc843c22432e790980fa15653cf95e6c60b384 100644 (file)
@@ -479,6 +479,9 @@ out:
  */
 void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
+       if (!vlan)
+               return;
+
        if (atomic_dec_and_test(&vlan->refcount)) {
                spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
                hlist_del_rcu(&vlan->list);
index b4824951010ba6b42bf7d7c7eb62c529ed340158..5809b39c1922320e8dbb353de212b472199c796d 100644 (file)
@@ -594,6 +594,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
        /* increase the refcounter of the related vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
+                addr, BATADV_PRINT_VID(vid))) {
+               kfree(tt_local);
+               tt_local = NULL;
+               goto out;
+       }
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
        struct batadv_softif_vlan *vlan;
+       void *tt_entry_exists;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1061,11 +1068,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
         * immediately purge it
         */
        batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
-       hlist_del_rcu(&tt_local_entry->common.hash_entry);
+
+       tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+                                            batadv_compare_tt,
+                                            batadv_choose_tt,
+                                            &tt_local_entry->common);
+       if (!tt_entry_exists)
+               goto out;
+
+       /* extra call to free the local tt entry */
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
        /* decrease the reference held for this vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               goto out;
+
        batadv_softif_vlan_free_ref(vlan);
        batadv_softif_vlan_free_ref(vlan);
 
@@ -1166,8 +1184,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                        /* decrease the reference held for this vlan */
                        vlan = batadv_softif_vlan_get(bat_priv,
                                                      tt_common_entry->vid);
-                       batadv_softif_vlan_free_ref(vlan);
-                       batadv_softif_vlan_free_ref(vlan);
+                       if (vlan) {
+                               batadv_softif_vlan_free_ref(vlan);
+                               batadv_softif_vlan_free_ref(vlan);
+                       }
 
                        batadv_tt_local_entry_free_ref(tt_local);
                }
@@ -3207,8 +3227,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 
                        /* decrease the reference held for this vlan */
                        vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-                       batadv_softif_vlan_free_ref(vlan);
-                       batadv_softif_vlan_free_ref(vlan);
+                       if (vlan) {
+                               batadv_softif_vlan_free_ref(vlan);
+                               batadv_softif_vlan_free_ref(vlan);
+                       }
 
                        batadv_tt_local_entry_free_ref(tt_local);
                }
index 7998fb27916568da087b2734a017355158044a75..92720f3fe57370137f22ae3d2b76b390da09e9a1 100644 (file)
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
        /* Make sure we copy only the significant bytes based on the
         * encryption key size, and set the rest of the value to zeroes.
         */
-       memcpy(ev.key.val, key->val, sizeof(key->enc_size));
+       memcpy(ev.key.val, key->val, key->enc_size);
        memset(ev.key.val + key->enc_size, 0,
               sizeof(ev.key.val) - key->enc_size);
 
index 0b39dcc65b94f0aa571dc22dc0c97afbf4e3d744..1285eaf5dc222e7cf75f4da796f2075f098105ae 100644 (file)
@@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
@@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
index 3da5525eb8a2dc21e5f64638881a351a72a0d300..4d74a0639c4ccd040b6371665e6a74f8dca6f800 100644 (file)
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
                + nla_total_size(1)     /* IFLA_BRPORT_FAST_LEAVE */
                + nla_total_size(1)     /* IFLA_BRPORT_LEARNING */
                + nla_total_size(1)     /* IFLA_BRPORT_UNICAST_FLOOD */
+               + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP */
+               + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP_WIFI */
                + 0;
 }
 
@@ -506,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
        [IFLA_BRPORT_LEARNING]  = { .type = NLA_U8 },
        [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
+       [IFLA_BRPORT_PROXYARP]  = { .type = NLA_U8 },
+       [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
index 4967262b27076af66347d20eca54b65a2e61d789..617088aee21d41ba98d4ef5ebee5d6c002efe029 100644 (file)
@@ -131,12 +131,12 @@ out_noerr:
        goto out;
 }
 
-static int skb_set_peeked(struct sk_buff *skb)
+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
 {
        struct sk_buff *nskb;
 
        if (skb->peeked)
-               return 0;
+               return skb;
 
        /* We have to unshare an skb before modifying it. */
        if (!skb_shared(skb))
@@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb)
 
        nskb = skb_clone(skb, GFP_ATOMIC);
        if (!nskb)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        skb->prev->next = nskb;
        skb->next->prev = nskb;
@@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb)
 done:
        skb->peeked = 1;
 
-       return 0;
+       return skb;
 }
 
 /**
@@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                        continue;
                                }
 
-                               error = skb_set_peeked(skb);
-                               if (error)
+                               skb = skb_set_peeked(skb);
+                               error = PTR_ERR(skb);
+                               if (IS_ERR(skb))
                                        goto unlock_err;
 
                                atomic_inc(&skb->users);
index 1ebdf1c0d1188c309d854bc9145c9b2f5b7b58a4..1cbd209192eacd6b7ec9a13f8180a5138760fc7e 100644 (file)
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
 
        set_freezable();
 
-       __set_current_state(TASK_RUNNING);
-
        while (!kthread_should_stop()) {
                pkt_dev = next_to_run(t);
 
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
 
                try_to_freeze();
        }
-       set_current_state(TASK_INTERRUPTIBLE);
 
        pr_debug("%s stopping all device\n", t->tsk->comm);
        pktgen_stop(t);
index 87b22c0bc08c2f33fa31948b8b2604f48b8009bc..b42f0e26f89e4cf2e37a8329da549eb5cd1200c5 100644 (file)
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                        spin_lock_bh(&queue->syn_wait_lock);
                        while ((req = lopt->syn_table[i]) != NULL) {
                                lopt->syn_table[i] = req->dl_next;
+                               /* Because of following del_timer_sync(),
+                                * we must release the spinlock here
+                                * or risk a dead lock.
+                                */
+                               spin_unlock_bh(&queue->syn_wait_lock);
                                atomic_inc(&lopt->qlen_dec);
-                               if (del_timer(&req->rsk_timer))
+                               if (del_timer_sync(&req->rsk_timer))
                                        reqsk_put(req);
                                reqsk_put(req);
+                               spin_lock_bh(&queue->syn_wait_lock);
                        }
                        spin_unlock_bh(&queue->syn_wait_lock);
                }
index b6a19ca0f99e49c7406f1fedb6c59433bbd7fd38..7b84330e5d30693cf63fe2d04b7f64f2b8893362 100644 (file)
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
 
        if (skb && frag_size) {
                skb->head_frag = 1;
-               if (virt_to_head_page(data)->pfmemalloc)
+               if (page_is_pfmemalloc(virt_to_head_page(data)))
                        skb->pfmemalloc = 1;
        }
        return skb;
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
  * Otherwise returns the provided skb. Returns NULL in error cases
  * (e.g. transport_len exceeds skb length or out-of-memory).
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
                                               unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
        unsigned int len = skb_transport_offset(skb) + transport_len;
        int ret;
 
-       if (skb->len < len) {
-               kfree_skb(skb);
+       if (skb->len < len)
                return NULL;
-       } else if (skb->len == len) {
+       else if (skb->len == len)
                return skb;
-       }
 
        skb_chk = skb_clone(skb, GFP_ATOMIC);
-       kfree_skb(skb);
-
        if (!skb_chk)
                return NULL;
 
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
  * If the skb has data beyond the given transport length, then a
  * trimmed & cloned skb is checked and returned.
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
                                     unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
 
        skb_chk = skb_checksum_maybe_trim(skb, transport_len);
        if (!skb_chk)
-               return NULL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, offset)) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (!pskb_may_pull(skb_chk, offset))
+               goto err;
 
        __skb_pull(skb_chk, offset);
        ret = skb_chkf(skb_chk);
        __skb_push(skb_chk, offset);
 
-       if (ret) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (ret)
+               goto err;
 
        return skb_chk;
+
+err:
+       if (skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return NULL;
+
 }
 EXPORT_SYMBOL(skb_checksum_trimmed);
 
index 0917123790eaf09b001c97a733039185fdb0a800..35c47ddd04f0ee3eb965fd99b06bab2ee4670774 100644 (file)
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
                return -ENODEV;
 
        /* Use already configured phy mode */
-       p->phy_interface = p->phy->interface;
+       if (p->phy_interface == PHY_INTERFACE_MODE_NA)
+               p->phy_interface = p->phy->interface;
        phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
                           p->phy_interface);
 
index 37c4bb89a7082bbe36b40d928f7fd1d95bfe8252..b0c6258ffb79a7cbcaaf1296e4842db52876b5b2 100644 (file)
@@ -2465,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
                key = l->key + 1;
                iter->pos++;
 
-               if (pos-- <= 0)
+               if (--pos <= 0)
                        break;
 
                l = NULL;
index 651cdf648ec4728bff6e709b0324b7d52ffd65ed..9fdfd9deac11dde85bc62803068fbe50e45837b8 100644 (file)
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
        struct sk_buff *skb_chk;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ip_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ip_mc_check_igmp_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
  *
  * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index 60021d0d9326ac691dcef21e1f9c20de5f8fe7c6..134957159c27eb9180e08b73360fe891574b4742 100644 (file)
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
        }
 
        spin_unlock(&queue->syn_wait_lock);
-       if (del_timer(&req->rsk_timer))
+       if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
                reqsk_put(req);
        return found;
 }
index fe8cc183411e052f6e0ba4afefbeaef1e77313cd..95ea633e8356eb9b419e4027f9954810194aa23c 100644 (file)
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
index 433231ccfb17fc6d01179247d1d81226803d18df..0330ab2e2b6329ced120cd9b7100a5a34f50e82b 100644 (file)
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_wmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_rmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_rmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one
        },
        {
                .procname       = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_wmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one
        },
        { }
 };
index d7d4c2b79cf2f516f9e3f62c6fe4415e9bc137a0..0ea2e1c5d395ac979e9a301d006867d49b866ecb 100644 (file)
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               if (!nsk)
+               if (!nsk || nsk == sk)
                        reqsk_put(req);
                return nsk;
        }
index 83aa604f9273c332c5a0e5399253d961ef92eb9a..1b8c5ba7d5f732ea2220ada929049c5c7cfd5e83 100644 (file)
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
 
        skb->sk = sk;
        skb->destructor = sock_efree;
-       dst = sk->sk_rx_dst;
+       dst = READ_ONCE(sk->sk_rx_dst);
 
        if (dst)
                dst = dst_check(dst, 0);
-       if (dst)
-               skb_dst_set_noref(skb, dst);
+       if (dst) {
+               /* DST_NOCACHE can not be used without taking a reference */
+               if (dst->flags & DST_NOCACHE) {
+                       if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+                               skb_dst_set(skb, dst);
+               } else {
+                       skb_dst_set_noref(skb, dst);
+               }
+       }
 }
 
 int udp_rcv(struct sk_buff *skb)
index 55d19861ab20f4a91b6b289be7ca3b0250df4531..548c6237b1e706f8ef72575a6a7dbad544b60fcc 100644 (file)
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                        *ppcpu_rt = NULL;
                }
        }
+
+       non_pcpu_rt->rt6i_pcpu = NULL;
 }
 
 static void rt6_release(struct rt6_info *rt)
index df8afe5ab31e4b8e75bf2fbf844f8b3e798edbba..9405b04eecc64f478960329da93f6e01d437954e 100644 (file)
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
        struct sk_buff *skb_chk = NULL;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ipv6_hdr(skb)->payload_len);
        transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ipv6_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ipv6_mc_check_mld_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
  *
  * Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index 6edb7b106de769728357174d0657c644f83e41e8..ebbb754c2111b73c87fe85aa58b3124e0a3032ef 100644 (file)
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
 }
 
 static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+                 const struct sk_buff *skb, struct sk_buff *nskb,
                  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
                  struct ipv6hdr *niph, struct tcphdr *nth,
                  unsigned int tcp_hdr_size)
 {
-       struct net *net = nf_ct_net((struct nf_conn *)nfct);
+       struct net *net = nf_ct_net(snet->tmpl);
        struct dst_entry *dst;
        struct flowi6 fl6;
 
@@ -83,7 +84,8 @@ free_nskb:
 }
 
 static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+                           const struct sk_buff *skb, const struct tcphdr *th,
                            const struct synproxy_options *opts)
 {
        struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
                          niph, nth, tcp_hdr_size);
 }
 
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+       synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
                          niph, nth, tcp_hdr_size);
 }
 
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                                          XT_SYNPROXY_OPT_SACK_PERM |
                                          XT_SYNPROXY_OPT_ECN);
 
-               synproxy_send_client_synack(skb, th, &opts);
+               synproxy_send_client_synack(snet, skb, th, &opts);
                return NF_DROP;
 
        } else if (th->ack && !(th->fin || th->rst || th->syn)) {
index 6090969937f8b6809f74c3d03f29a0703089eff1..d15586490cecaedcc29bc821163cd6c85544b0b8 100644 (file)
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
 /* allocate dst with ip6_dst_ops */
 static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
-                                       int flags,
-                                       struct fib6_table *table)
+                                       int flags)
 {
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
 
 static struct rt6_info *ip6_dst_alloc(struct net *net,
                                      struct net_device *dev,
-                                     int flags,
-                                     struct fib6_table *table)
+                                     int flags)
 {
-       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
        if (rt) {
                rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -950,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
        if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
                ort = (struct rt6_info *)ort->dst.from;
 
-       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
-                            0, ort->rt6i_table);
+       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
 
        if (!rt)
                return NULL;
@@ -983,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
        struct rt6_info *pcpu_rt;
 
        pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
-                                 rt->dst.dev, rt->dst.flags,
-                                 rt->rt6i_table);
+                                 rt->dst.dev, rt->dst.flags);
 
        if (!pcpu_rt)
                return NULL;
@@ -997,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
 /* It should be called with read_lock_bh(&tb6_lock) acquired */
 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
 {
-       struct rt6_info *pcpu_rt, *prev, **p;
+       struct rt6_info *pcpu_rt, **p;
 
        p = this_cpu_ptr(rt->rt6i_pcpu);
        pcpu_rt = *p;
 
-       if (pcpu_rt)
-               goto done;
+       if (pcpu_rt) {
+               dst_hold(&pcpu_rt->dst);
+               rt6_dst_from_metrics_check(pcpu_rt);
+       }
+       return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+       struct fib6_table *table = rt->rt6i_table;
+       struct rt6_info *pcpu_rt, *prev, **p;
 
        pcpu_rt = ip6_rt_pcpu_alloc(rt);
        if (!pcpu_rt) {
                struct net *net = dev_net(rt->dst.dev);
 
-               pcpu_rt = net->ipv6.ip6_null_entry;
-               goto done;
+               dst_hold(&net->ipv6.ip6_null_entry->dst);
+               return net->ipv6.ip6_null_entry;
        }
 
-       prev = cmpxchg(p, NULL, pcpu_rt);
-       if (prev) {
-               /* If someone did it before us, return prev instead */
+       read_lock_bh(&table->tb6_lock);
+       if (rt->rt6i_pcpu) {
+               p = this_cpu_ptr(rt->rt6i_pcpu);
+               prev = cmpxchg(p, NULL, pcpu_rt);
+               if (prev) {
+                       /* If someone did it before us, return prev instead */
+                       dst_destroy(&pcpu_rt->dst);
+                       pcpu_rt = prev;
+               }
+       } else {
+               /* rt has been removed from the fib6 tree
+                * before we have a chance to acquire the read_lock.
+                * In this case, don't brother to create a pcpu rt
+                * since rt is going away anyway.  The next
+                * dst_check() will trigger a re-lookup.
+                */
                dst_destroy(&pcpu_rt->dst);
-               pcpu_rt = prev;
+               pcpu_rt = rt;
        }
-
-done:
        dst_hold(&pcpu_rt->dst);
        rt6_dst_from_metrics_check(pcpu_rt);
+       read_unlock_bh(&table->tb6_lock);
        return pcpu_rt;
 }
 
@@ -1097,9 +1114,22 @@ redo_rt6_select:
                rt->dst.lastuse = jiffies;
                rt->dst.__use++;
                pcpu_rt = rt6_get_pcpu_route(rt);
-               read_unlock_bh(&table->tb6_lock);
+
+               if (pcpu_rt) {
+                       read_unlock_bh(&table->tb6_lock);
+               } else {
+                       /* We have to do the read_unlock first
+                        * because rt6_make_pcpu_route() may trigger
+                        * ip6_dst_gc() which will take the write_lock.
+                        */
+                       dst_hold(&rt->dst);
+                       read_unlock_bh(&table->tb6_lock);
+                       pcpu_rt = rt6_make_pcpu_route(rt);
+                       dst_release(&rt->dst);
+               }
 
                return pcpu_rt;
+
        }
 }
 
@@ -1555,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        if (unlikely(!idev))
                return ERR_PTR(-ENODEV);
 
-       rt = ip6_dst_alloc(net, dev, 0, NULL);
+       rt = ip6_dst_alloc(net, dev, 0);
        if (unlikely(!rt)) {
                in6_dev_put(idev);
                dst = ERR_PTR(-ENOMEM);
@@ -1742,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
        if (!table)
                goto out;
 
-       rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+       rt = ip6_dst_alloc(net, NULL,
+                          (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
 
        if (!rt) {
                err = -ENOMEM;
@@ -1831,6 +1862,7 @@ int ip6_route_add(struct fib6_config *cfg)
                int gwa_type;
 
                gw_addr = &cfg->fc_gateway;
+               gwa_type = ipv6_addr_type(gw_addr);
 
                /* if gw_addr is local we will fail to detect this in case
                 * address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1838,11 +1870,12 @@ int ip6_route_add(struct fib6_config *cfg)
                 * prefix route was assigned to, which might be non-loopback.
                 */
                err = -EINVAL;
-               if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+               if (ipv6_chk_addr_and_flags(net, gw_addr,
+                                           gwa_type & IPV6_ADDR_LINKLOCAL ?
+                                           dev : NULL, 0, 0))
                        goto out;
 
                rt->rt6i_gateway = *gw_addr;
-               gwa_type = ipv6_addr_type(gw_addr);
 
                if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
                        struct rt6_info *grt;
@@ -2397,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
-                                           DST_NOCOUNT, NULL);
+                                           DST_NOCOUNT);
        if (!rt)
                return ERR_PTR(-ENOMEM);
 
index 6748c4277affad71cd721e3a985af10c31c047ad..7a6cea5e427414062f408cfa66f57808d48382d7 100644 (file)
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
                                   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               if (!nsk)
+               if (!nsk || nsk == sk)
                        reqsk_put(req);
                return nsk;
        }
index 247552a7f6c2f23a1e4bc89b647d8d37680bf2c3..3ece7d1034c81ae8749cada074fbebecbe06d57f 100644 (file)
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
 static inline void
 minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
 {
-       int j = MAX_THR_RATES;
-       struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+       int j;
+       struct minstrel_rate_stats *tmp_mrs;
        struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
 
-       while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
-              minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
-               j--;
+       for (j = MAX_THR_RATES; j > 0; --j) {
                tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+               if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+                   minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+                       break;
        }
 
        if (j < MAX_THR_RATES - 1)
index 651039ad1681db0434cff21275f1bcbe3f8464bc..3c20d02aee738c5293a5b449f28ebff596c7232d 100644 (file)
@@ -292,7 +292,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
 {
        struct nf_conn *tmpl;
 
-       tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
+       tmpl = kzalloc(sizeof(*tmpl), flags);
        if (tmpl == NULL)
                return NULL;
 
@@ -303,7 +303,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
        if (zone) {
                struct nf_conntrack_zone *nf_ct_zone;
 
-               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
+               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
                if (!nf_ct_zone)
                        goto out_free;
                nf_ct_zone->id = zone;
@@ -1544,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
-       if (!hash) {
-               printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+       if (!hash)
                hash = vzalloc(sz);
-       }
 
        if (hash && nulls)
                for (i = 0; i < nr_slots; i++)
index 71f1e9fdfa18fb9b1f2f2730ca21af42dad98eea..d7f1685279034b5e3b62748284d24c52cc1f8907 100644 (file)
@@ -353,10 +353,8 @@ static int __net_init synproxy_net_init(struct net *net)
        int err = -ENOMEM;
 
        ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
-       if (IS_ERR(ct)) {
-               err = PTR_ERR(ct);
+       if (!ct)
                goto err1;
-       }
 
        if (!nfct_seqadj_ext_add(ct))
                goto err2;
index c6630030c9121c7af27a3052ad776cd6646eb601..43ddeee404e91f97908fb9228c1e873931b75bcc 100644 (file)
@@ -202,9 +202,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
                goto err1;
 
        ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
-       ret = PTR_ERR(ct);
-       if (IS_ERR(ct))
+       if (!ct) {
+               ret = -ENOMEM;
                goto err2;
+       }
 
        ret = 0;
        if ((info->ct_events || info->exp_events) &&
index d8e2e3918ce2fd95637c4cba8bfc4886feb91ea6..67d2104778636c62e7d3fa94c73ce5fb2cd4cb7d 100644 (file)
@@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
 
        err = __netlink_insert(table, sk);
        if (err) {
+               /* In case the hashtable backend returns with -EBUSY
+                * from here, it must not escape to the caller.
+                */
+               if (unlikely(err == -EBUSY))
+                       err = -EOVERFLOW;
                if (err == -EEXIST)
                        err = -EADDRINUSE;
                nlk_sk(sk)->portid = 0;
index 8a8c0b8b4f63a4bd8e5ff776250189558e6fcb1e..ee34f474ad1465087da8fd506410559abe47d81e 100644 (file)
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
        return 0;
 }
 
-static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
-                       __be32 *addr, __be32 new_addr)
+static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
+                                 __be32 addr, __be32 new_addr)
 {
        int transport_len = skb->len - skb_transport_offset(skb);
 
+       if (nh->frag_off & htons(IP_OFFSET))
+               return;
+
        if (nh->protocol == IPPROTO_TCP) {
                if (likely(transport_len >= sizeof(struct tcphdr)))
                        inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
-                                                *addr, new_addr, 1);
+                                                addr, new_addr, 1);
        } else if (nh->protocol == IPPROTO_UDP) {
                if (likely(transport_len >= sizeof(struct udphdr))) {
                        struct udphdr *uh = udp_hdr(skb);
 
                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                                inet_proto_csum_replace4(&uh->check, skb,
-                                                        *addr, new_addr, 1);
+                                                        addr, new_addr, 1);
                                if (!uh->check)
                                        uh->check = CSUM_MANGLED_0;
                        }
                }
        }
+}
 
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+                       __be32 *addr, __be32 new_addr)
+{
+       update_ip_l4_checksum(skb, nh, *addr, new_addr);
        csum_replace4(&nh->check, *addr, new_addr);
        skb_clear_hash(skb);
        *addr = new_addr;
index 9a6b4f66187cf3e5ab533cd01344c9856834ebb7..140a44a5f7b7f1c08b3f329707b72fc75a9a81fe 100644 (file)
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
 
        /* check for all kinds of wrapping and the like */
        start = (unsigned long)optval;
-       if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+       if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
                ret = -EINVAL;
                goto out;
        }
index a42a3b257226178eb5af04054a17813c04368613..268545050ddbd67245ead8394a82f44503657ea5 100644 (file)
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        return ret;
                ret = ACT_P_CREATED;
        } else {
+               if (bind)
+                       return 0;
                if (!ovr) {
                        tcf_hash_release(a, bind);
                        return -EEXIST;
index 21ca33c9f0368b21cdb00fbdbbca4851c2ad87a2..a9ba030435a2a5c2ca4bea21d62c6d631c6168f4 100644 (file)
@@ -288,10 +288,26 @@ begin:
 
 static void fq_codel_reset(struct Qdisc *sch)
 {
-       struct sk_buff *skb;
+       struct fq_codel_sched_data *q = qdisc_priv(sch);
+       int i;
 
-       while ((skb = fq_codel_dequeue(sch)) != NULL)
-               kfree_skb(skb);
+       INIT_LIST_HEAD(&q->new_flows);
+       INIT_LIST_HEAD(&q->old_flows);
+       for (i = 0; i < q->flows_cnt; i++) {
+               struct fq_codel_flow *flow = q->flows + i;
+
+               while (flow->head) {
+                       struct sk_buff *skb = dequeue_head(flow);
+
+                       qdisc_qstats_backlog_dec(sch, skb);
+                       kfree_skb(skb);
+               }
+
+               INIT_LIST_HEAD(&flow->flowchain);
+               codel_vars_init(&flow->cvars);
+       }
+       memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
+       sch->q.qlen = 0;
 }
 
 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
index 9cb8522d8d22a826caaec968e82c77516339ca01..f3d3fb42b8735e76aa54bd4c433574f7afc43b0e 100755 (executable)
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
 my $kconfig = $ARGV[1];
 my $lsmod_file = $ENV{'LSMOD'};
 
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
 chomp @makefiles;
 
 my %depends;
index 0b9847affbeccbc4b1a807b642120a3feaa397d0..374ea53288ca25ff6093467012afd2607192f505 100644 (file)
@@ -5190,6 +5190,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+       SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5291,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 2ae9619443d15dbeaf128cab385347db6bf26369..1d651b8a89570404cd306f239b17e939b1d5fa81 100644 (file)
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
        bool
        select SND_DMAENGINE_PCM
 
+config SND_SOC_TOPOLOGY
+       bool
+
 # All the supported SoCs
 source "sound/soc/adi/Kconfig"
 source "sound/soc/atmel/Kconfig"
index e189903fabf42958eff143e487999f0f013b85d7..669648b41d3027adf29ead27eda5f72a6ed0aaf1 100644 (file)
@@ -1,6 +1,9 @@
 snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
 snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
 snd-soc-core-objs += soc-topology.o
+endif
 
 ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
 snd-soc-core-objs += soc-generic-dmaengine-pcm.o
index 1fab9778807a0015f2578f0504306ed852eb4932..0450593980fd3525a65feca17dccea0e9940506e 100644 (file)
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
        int err = -ENODEV;
 
        down_read(&chip->shutdown_rwsem);
-       if (chip->probing && chip->in_pm)
+       if (chip->probing || chip->in_pm)
                err = 0;
        else if (!chip->shutdown)
                err = usb_autopm_get_interface(chip->pm_intf);
index de165a1b92402ac7a6267bd0a0c5aa30a0053c92..20b56eb987f89f21fe20e53decf8ee1fbe275a2e 100644 (file)
@@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                goto out_child;
        }
 
+       /*
+        * Normally perf_session__new would do this, but it doesn't have the
+        * evlist.
+        */
+       if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+               pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
+               rec->tool.ordered_events = false;
+       }
+
        if (!rec->evlist->nr_groups)
                perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
 
@@ -965,9 +974,11 @@ static struct record record = {
        .tool = {
                .sample         = process_sample_event,
                .fork           = perf_event__process_fork,
+               .exit           = perf_event__process_exit,
                .comm           = perf_event__process_comm,
                .mmap           = perf_event__process_mmap,
                .mmap2          = perf_event__process_mmap2,
+               .ordered_events = true,
        },
 };
 
index ecf319728f25d649768e33b3e1f274d04432f3fc..6135cc07213cb0d3379d58033ea87aa8dc580878 100644 (file)
@@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
 
 static void display_setup_sig(void)
 {
-       signal(SIGSEGV, display_sig);
-       signal(SIGFPE,  display_sig);
+       signal(SIGSEGV, sighandler_dump_stack);
+       signal(SIGFPE, sighandler_dump_stack);
        signal(SIGINT,  display_sig);
        signal(SIGQUIT, display_sig);
        signal(SIGTERM, display_sig);
index 094ddaee104c73d7caae22d851d79629c4715cd3..d31fac19c30b2d298ab2cf3a710b9b27c5764144 100644 (file)
@@ -638,7 +638,7 @@ ifndef DESTDIR
 prefix ?= $(HOME)
 endif
 bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
 mandir = share/man
 infodir = share/info
 perfexecdir = libexec/perf-core
index 7ff682770fdb16e71b368af16efad0edd9443d87..f1a4c833121e306a364851328f6409cb22d779eb 100644 (file)
@@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
                                                        event->fork.ptid);
        int err = 0;
 
+       if (dump_trace)
+               perf_event__fprintf_task(event, stdout);
+
+       /*
+        * There may be an existing thread that is not actually the parent,
+        * either because we are processing events out of order, or because the
+        * (fork) event that would have removed the thread was lost. Assume the
+        * latter case and continue on as best we can.
+        */
+       if (parent->pid_ != (pid_t)event->fork.ppid) {
+               dump_printf("removing erroneous parent thread %d/%d\n",
+                           parent->pid_, parent->tid);
+               machine__remove_thread(machine, parent);
+               thread__put(parent);
+               parent = machine__findnew_thread(machine, event->fork.ppid,
+                                                event->fork.ptid);
+       }
+
        /* if a thread currently exists for the thread id remove it */
        if (thread != NULL) {
                machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
 
        thread = machine__findnew_thread(machine, event->fork.pid,
                                         event->fork.tid);
-       if (dump_trace)
-               perf_event__fprintf_task(event, stdout);
 
        if (thread == NULL || parent == NULL ||
            thread__fork(thread, parent, sample->time) < 0) {
index 53e8bb7bc8521a09f1347d48de2a0dd1eeb5e0bc..2a5d8d7698aedb8c82bdf8488f1fb62ded5b438a 100644 (file)
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
        else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
                update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
-               update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+               update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
                update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
                                " #   %5.2f%% aborted cycles         ",
                                100.0 * ((total2-avg) / total));
        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / transaction   ", ratio);
        } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / elision       ", ratio);
index 28c4b746baa19bef9830814c4fe7c69f1be0b06b..0a9ae8014729c085ffd2872e2bc13871f79c9908 100644 (file)
@@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
        if (thread->pid_ == parent->pid_)
                return 0;
 
+       if (thread->mg == parent->mg) {
+               pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
+                        thread->pid_, thread->tid, parent->pid_, parent->tid);
+               return 0;
+       }
+
        /* But this one is new process, copy maps. */
        for (i = 0; i < MAP__NR_TYPES; ++i)
                if (map_groups__clone(thread->mg, parent->mg, i) < 0)