]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'arm/omap', 'arm/msm', 'arm/smmu', 'arm/tegra', 'x86/vt-d', 'x86/amd...
authorJoerg Roedel <jroedel@suse.de>
Tue, 25 Aug 2015 09:39:50 +0000 (11:39 +0200)
committerJoerg Roedel <jroedel@suse.de>
Tue, 25 Aug 2015 09:39:50 +0000 (11:39 +0200)
448 files changed:
.get_maintainer.ignore [new file with mode: 0644]
.mailmap
Documentation/Intel-IOMMU.txt
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/iommu/arm,smmu.txt
Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
Documentation/devicetree/bindings/phy/ti-phy.txt
Documentation/input/alps.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/atomic.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/spinlock.h
arch/arc/include/asm/spinlock_types.h
arch/arc/include/uapi/asm/ptrace.h
arch/arc/kernel/setup.c
arch/arc/kernel/time.c
arch/arc/lib/memcpy-archs.S
arch/arc/lib/memset-archs.S
arch/arc/plat-axs10x/axs10x.c
arch/arm/Makefile
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/k2e.dtsi
arch/arm/boot/dts/k2hk.dtsi
arch/arm/boot/dts/k2l.dtsi
arch/arm/boot/dts/keystone.dtsi
arch/arm/boot/dts/omap2430.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/vdso.c
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm/vdso/Makefile
arch/arm64/kernel/signal32.c
arch/arm64/kernel/vdso.c
arch/mips/Kconfig
arch/mips/ath79/setup.c
arch/mips/cavium-octeon/smp.c
arch/mips/include/asm/mach-bcm63xx/dma-coherence.h [deleted file]
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/stackframe.h
arch/mips/kernel/genex.S
arch/mips/kernel/mips-mt-fpaff.c
arch/mips/kernel/prom.c
arch/mips/kernel/relocate_kernel.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/signal32.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/lantiq/irq.c
arch/mips/loongson64/loongson-3/smp.c
arch/mips/mm/cache.c
arch/mips/mm/fault.c
arch/mips/mti-malta/malta-int.c
arch/mips/mti-malta/malta-time.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/netlogic/common/smp.c
arch/mips/paravirt/paravirt-smp.c
arch/mips/pistachio/time.c
arch/mips/pmcs-msp71xx/msp_smp.c
arch/mips/ralink/irq.c
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sibyte/bcm1480/smp.c
arch/mips/sibyte/sb1250/smp.c
arch/powerpc/kernel/signal_32.c
arch/s390/kvm/kvm-s390.c
arch/sparc/include/asm/visasm.h
arch/sparc/lib/NG4memcpy.S
arch/sparc/lib/VISsave.S
arch/sparc/lib/ksyms.c
arch/tile/kernel/compat_signal.c
arch/x86/entry/entry_64_compat.S
arch/x86/include/asm/sigcontext.h
arch/x86/include/asm/switch_to.h
arch/x86/include/uapi/asm/sigcontext.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/process.c
arch/x86/kernel/signal.c
arch/x86/kernel/step.c
arch/x86/kvm/mtrr.c
arch/x86/kvm/x86.c
arch/x86/math-emu/fpu_entry.c
arch/x86/math-emu/fpu_system.h
arch/x86/math-emu/get_address.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/xen-ops.h
block/blk-settings.c
crypto/authencesn.c
drivers/acpi/video_detect.c
drivers/ata/ahci_brcmstb.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/sata_sx4.c
drivers/base/regmap/regcache-rbtree.c
drivers/block/rbd.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/block/zram/zram_drv.c
drivers/char/hw_random/core.c
drivers/clk/pxa/clk-pxa3xx.c
drivers/clocksource/sh_cmt.c
drivers/clocksource/timer-imx-gpt.c
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/crypto/caam/caamhash.c
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/nx/nx-sha256.c
drivers/crypto/nx/nx-sha512.c
drivers/crypto/qat/qat_common/qat_algs.c
drivers/dma/dmaengine.c
drivers/edac/ppc4xx_edac.c
drivers/extcon/extcon-palmas.c
drivers/extcon/extcon.c
drivers/firmware/broadcom/bcm47xx_nvram.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/hid/hid-input.c
drivers/hid/hid-uclogic.c
drivers/hid/wacom_sys.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/g762.c
drivers/hwmon/nct7904.c
drivers/i2c/busses/i2c-bfin-twi.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-slave-eeprom.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/input/joystick/turbografx.c
drivers/input/keyboard/gpio_keys_polled.c
drivers/input/misc/axp20x-pek.c
drivers/input/misc/twl4030-vibra.c
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/elantech.h
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu-v3.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/fsl_pamu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/io-pgtable.c
drivers/iommu/io-pgtable.h
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/irq_remapping.c
drivers/iommu/msm_iommu.c
drivers/iommu/omap-iommu-debug.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.h
drivers/iommu/omap-iopgtable.h
drivers/iommu/tegra-smmu.c
drivers/irqchip/irq-crossbar.c
drivers/irqchip/irq-mips-gic.c
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-policy-smq.c
drivers/md/dm-thin-metadata.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/md/raid1.c
drivers/md/raid5.c
drivers/media/dvb-frontends/Kconfig
drivers/media/pci/cobalt/Kconfig
drivers/media/pci/cobalt/cobalt-irq.c
drivers/media/pci/mantis/mantis_dma.c
drivers/media/rc/ir-rc5-decoder.c
drivers/media/rc/ir-rc6-decoder.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/rc/rc-core-priv.h
drivers/media/rc/rc-ir-raw.c
drivers/media/rc/rc-loopback.c
drivers/media/rc/rc-main.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/memory/omap-gpmc.c
drivers/memory/tegra/tegra114.c
drivers/memory/tegra/tegra124.c
drivers/memory/tegra/tegra30.c
drivers/mfd/Kconfig
drivers/mfd/arizona-core.c
drivers/misc/eeprom/at24.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/fs_enet/mac-fec.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/hamradio/mkiss.c
drivers/net/ntb_netdev.c
drivers/net/phy/phy.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/wan/cosa.c
drivers/net/wireless/b43/tables_nphy.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
drivers/net/wireless/rsi/rsi_91x_usb_ops.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8723be/sw.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/ntb/ntb.c
drivers/ntb/ntb_transport.c
drivers/pci/Kconfig
drivers/pci/probe.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-ti-pipe3.c
drivers/platform/chrome/Kconfig
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libiscsi.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_pm.c
drivers/scsi/sd.c
drivers/staging/comedi/drivers/das1800.c
drivers/staging/lustre/lustre/obdclass/debug.c
drivers/staging/vt6655/device_main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_configfs.c
drivers/target/target_core_hba.c
drivers/target/target_core_spc.c
drivers/thermal/cpu_cooling.c
drivers/thermal/hisi_thermal.c
drivers/thermal/power_allocator.c
drivers/thermal/samsung/Kconfig
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/thermal_core.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/host.c
drivers/usb/chipidea/host.h
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/udc/bdc/bdc_ep.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/video/console/fbcon.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/omap2/dss/dss-of.c
drivers/video/fbdev/pxa3xx-gcu.c
drivers/video/of_videomode.c
drivers/virtio/virtio_input.c
drivers/xen/balloon.c
drivers/xen/gntdev.c
drivers/xen/xenbus/xenbus_client.c
fs/btrfs/qgroup.c
fs/ceph/caps.c
fs/ceph/locks.c
fs/ceph/super.h
fs/dcache.c
fs/file_table.c
fs/fuse/dev.c
fs/hugetlbfs/inode.c
fs/namei.c
fs/nfsd/nfs4layouts.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/notify/mark.c
fs/ocfs2/aops.c
fs/ocfs2/dlmglue.c
fs/signalfd.c
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_crtc_helper.h
include/drm/drm_edid.h
include/drm/drm_pciids.h
include/linux/ata.h
include/linux/fs.h
include/linux/intel-iommu.h
include/linux/irq.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/page-flags.h
include/linux/skbuff.h
include/media/rc-core.h
include/media/videobuf2-core.h
include/scsi/scsi_eh.h
include/soc/tegra/mc.h
include/sound/soc-topology.h
include/uapi/linux/pci_regs.h
include/uapi/sound/asoc.h
init/main.c
ipc/mqueue.c
ipc/sem.c
ipc/shm.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/irq/chip.c
kernel/kthread.c
kernel/locking/qspinlock_paravirt.h
kernel/module.c
kernel/signal.c
kernel/time/timer.c
lib/iommu-common.c
mm/cma.h
mm/huge_memory.c
mm/kasan/kasan.c
mm/kasan/report.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/migrate.c
mm/page-writeback.c
mm/page_alloc.c
mm/shmem.c
mm/slab.c
mm/slab_common.c
mm/slub.c
mm/vmscan.c
net/9p/client.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/gateway_client.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/bluetooth/mgmt.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/core/datagram.c
net/core/pktgen.c
net/core/request_sock.c
net/core/skbuff.c
net/dsa/slave.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv6/ip6_fib.c
net/ipv6/mcast_snoop.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/mac80211/rc80211_minstrel.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_synproxy_core.c
net/netfilter/xt_CT.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/rds/info.c
net/sched/act_mirred.c
net/sched/sch_fq_codel.c
scripts/kconfig/streamline_config.pl
security/yama/yama_lsm.c
sound/firewire/amdtp.c
sound/firewire/amdtp.h
sound/firewire/fireworks/fireworks.c
sound/firewire/fireworks/fireworks.h
sound/firewire/fireworks/fireworks_stream.c
sound/hda/ext/hdac_ext_controller.c
sound/hda/ext/hdac_ext_stream.c
sound/pci/hda/patch_realtek.c
sound/pci/oxygen/oxygen_mixer.c
sound/soc/Kconfig
sound/soc/Makefile
sound/soc/codecs/cs4265.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5645.h
sound/soc/intel/baytrail/sst-baytrail-ipc.c
sound/soc/intel/haswell/sst-haswell-ipc.c
sound/soc/soc-topology.c
sound/usb/card.c
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/config/Makefile
tools/perf/util/machine.c
tools/perf/util/stat-shadow.c
tools/perf/util/thread.c

diff --git a/.get_maintainer.ignore b/.get_maintainer.ignore
new file mode 100644 (file)
index 0000000..cca6d87
--- /dev/null
@@ -0,0 +1 @@
+Christoph Hellwig <hch@lst.de>
index b4091b7a78fe11ccd0e5f44f0703ace69dc09707..4b31af54ccd5864359c0810f9733f3026181a631 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
 Al Viro <viro@ftp.linux.org.uk>
 Al Viro <viro@zenIV.linux.org.uk>
 Andreas Herrmann <aherrman@de.ibm.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
index cf9431db873150d38a3d2477280d481ff42bfa83..7b57fc087088f49756eeb8eaabf403bfbbd92b93 100644 (file)
@@ -10,7 +10,7 @@ This guide gives a quick cheat sheet for some basic understanding.
 Some Keywords
 
 DMAR - DMA remapping
-DRHD - DMA Engine Reporting Structure
+DRHD - DMA Remapping Hardware Unit Definition
 RMRR - Reserved memory Region Reporting Structure
 ZLR  - Zero length reads from PCI devices
 IOVA - IO Virtual address.
index d6b794cef0b8b9907ab5a055a6502180b4350148..91e6e5c478d006245c5a88e7ae7e304d6fa7f097 100644 (file)
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
                            "qcom,kpss-acc-v1"
                            "qcom,kpss-acc-v2"
                            "rockchip,rk3066-smp"
+                           "ste,dbx500-smp"
 
        - cpu-release-addr
                Usage: required for systems that have an "enable-method"
index 06760503a819f5fbc15e747626ce3f734b2888ff..718074501fcbc97b5c7b5c33cab2736e433f5220 100644 (file)
@@ -43,6 +43,12 @@ conditions.
 
 ** System MMU optional properties:
 
+- dma-coherent  : Present if page table walks made by the SMMU are
+                  cache coherent with the CPU.
+
+                  NOTE: this only applies to the SMMU itself, not
+                  masters connected upstream of the SMMU.
+
 - calxeda,smmu-secure-config-access : Enable proper handling of buggy
                   implementations that always use secure access to
                   SMMU configuration registers. In this case non-secure
index 42531dc387aa6babaed955c1a821bf6a3eb77b84..869699925fd599e3d34c39155dfbb027d2262dca 100644 (file)
@@ -8,6 +8,11 @@ Required properties:
 - ti,hwmods  : Name of the hwmod associated with the IOMMU instance
 - reg        : Address space for the configuration registers
 - interrupts : Interrupt specifier for the IOMMU instance
+- #iommu-cells : Should be 0. OMAP IOMMUs are all "single-master" devices,
+                 and needs no additional data in the pargs specifier. Please
+                 also refer to the generic bindings document for more info
+                 on this property,
+                     Documentation/devicetree/bindings/iommu/iommu.txt
 
 Optional properties:
 - ti,#tlb-entries : Number of entries in the translation look-aside buffer.
@@ -18,6 +23,7 @@ Optional properties:
 Example:
        /* OMAP3 ISP MMU */
        mmu_isp: mmu@480bd400 {
+               #iommu-cells = <0>;
                compatible = "ti,omap2-iommu";
                reg = <0x480bd400 0x80>;
                interrupts = <24>;
index 305e3df3d9b1eb9a994c845eb28959275d2f20ed..9cf9446eaf2eac41d57251cb5853037e2b31e7c2 100644 (file)
@@ -82,6 +82,9 @@ Optional properties:
  - id: If there are multiple instance of the same type, in order to
    differentiate between each instance "id" can be used (e.g., multi-lane PCIe
    PHY). If "id" is not provided, it is set to default value of '1'.
+ - syscon-pllreset: Handle to system control region that contains the
+   CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
+   register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
 
 This is usually a subnode of ocp2scp to which it is connected.
 
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
                        "sysclk",
                        "refclk";
 };
+
+sata_phy: phy@4A096000 {
+       compatible = "ti,phy-pipe3-sata";
+       reg = <0x4A096000 0x80>, /* phy_rx */
+             <0x4A096400 0x64>, /* phy_tx */
+             <0x4A096800 0x40>; /* pll_ctrl */
+       reg-names = "phy_rx", "phy_tx", "pll_ctrl";
+       ctrl-module = <&omap_control_sata>;
+       clocks = <&sys_clkin1>, <&sata_ref_clk>;
+       clock-names = "sysclk", "refclk";
+       syscon-pllreset = <&scm_conf 0x3fc>;
+       #phy-cells = <0>;
+};
index c86f2f1ae4f6aa2d9af3e3987e8be06fd237dbef..1fec1135791d98c987105872c63b5e96589633d3 100644 (file)
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
 
 Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
-buttons get reported separately in the PSM, PSR and PSL bits.
+the DualPoint Stick. The M, R and L bits signal the combined status of both
+the pointingstick and touchpad buttons, except for Dell dualpoint devices
+where the pointingstick buttons get reported separately in the PSM, PSR
+and PSL bits.
 
 Dualpoint device -- interleaved packet format
 ---------------------------------------------
index a9ae6c105520011994801168a7841b4d713b716e..569568f6644f2092211b7bb2690c7defe49977dd 100644 (file)
@@ -3587,6 +3587,15 @@ S:       Maintained
 F:     drivers/gpu/drm/rockchip/
 F:     Documentation/devicetree/bindings/video/rockchip*
 
+DRM DRIVERS FOR STI
+M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M:     Vincent Abriou <vincent.abriou@st.com>
+L:     dri-devel@lists.freedesktop.org
+T:     git http://git.linaro.org/people/benjamin.gaignard/kernel.git
+S:     Maintained
+F:     drivers/gpu/drm/sti
+F:     Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+
 DSBR100 USB FM RADIO DRIVER
 M:     Alexey Klimov <klimov.linux@gmail.com>
 L:     linux-media@vger.kernel.org
index e79448d90f194be05c2ac9e7f969cb458643c2cc..246053f04fb5cecf72e477cd94257e052955d21f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc8
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
 # Defaults to vmlinux, but the arch makefile usually adds further targets
 all: vmlinux
 
+# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
+# values of the respective KBUILD_* variables
+ARCH_CPPFLAGS :=
+ARCH_AFLAGS :=
+ARCH_CFLAGS :=
 include arch/$(SRCARCH)/Makefile
 
 KBUILD_CFLAGS  += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -848,10 +853,10 @@ export mod_strip_cmd
 mod_compress_cmd = true
 ifdef CONFIG_MODULE_COMPRESS
   ifdef CONFIG_MODULE_COMPRESS_GZIP
-    mod_compress_cmd = gzip -n
+    mod_compress_cmd = gzip -n -f
   endif # CONFIG_MODULE_COMPRESS_GZIP
   ifdef CONFIG_MODULE_COMPRESS_XZ
-    mod_compress_cmd = xz
+    mod_compress_cmd = xz -f
   endif # CONFIG_MODULE_COMPRESS_XZ
 endif # CONFIG_MODULE_COMPRESS
 export mod_compress_cmd
index 91cf4055acab0439e564a96056012befd5fb4c36..bd4670d1b89bcabf043f13015c01b59397b73427 100644 (file)
@@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
 
 config ARC_PAGE_SIZE_16K
        bool "16KB"
-       depends on ARC_MMU_V3
+       depends on ARC_MMU_V3 || ARC_MMU_V4
 
 config ARC_PAGE_SIZE_4K
        bool "4KB"
-       depends on ARC_MMU_V3
+       depends on ARC_MMU_V3 || ARC_MMU_V4
 
 endchoice
 
@@ -365,6 +365,11 @@ config ARC_HAS_LLSC
        default y
        depends on !ARC_CANT_LLSC
 
+config ARC_STAR_9000923308
+       bool "Workaround for llock/scond livelock"
+       default y
+       depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
+
 config ARC_HAS_SWAPE
        bool "Insn: SWAPE (endian-swap)"
        default y
@@ -379,6 +384,10 @@ config ARC_HAS_LL64
          dest operands with 2 possible source operands.
        default y
 
+config ARC_HAS_DIV_REM
+       bool "Insn: div, divu, rem, remu"
+       default y
+
 config ARC_HAS_RTC
        bool "Local 64-bit r/o cycle counter"
        default n
index 46d87310220dadaf96be4ff08c42b240d2eb4916..8a27a48304a4c0127d97996d73c7d7dc0515d8a3 100644 (file)
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44)                      += -fsection-anchors
 cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
 cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
 
+ifdef CONFIG_ISA_ARCV2
+
 ifndef CONFIG_ARC_HAS_LL64
-cflags-$(CONFIG_ISA_ARCV2)             += -mno-ll64
+cflags-y                               += -mno-ll64
+endif
+
+ifndef CONFIG_ARC_HAS_DIV_REM
+cflags-y                               += -mno-div-rem
+endif
+
 endif
 
 cflags-$(CONFIG_ARC_DW2_UNWIND)                += -fasynchronous-unwind-tables
index 070f58827a5c12c2e19469ff4280f7c69e0f36a3..c8f57b8449dcf6a36aa61cd3589b90ebba42d7ea 100644 (file)
 #define ECR_C_BIT_DTLB_LD_MISS         8
 #define ECR_C_BIT_DTLB_ST_MISS         9
 
-
 /* Auxiliary registers */
 #define AUX_IDENTITY           4
 #define AUX_INTR_VEC_BASE      0x25
-
+#define AUX_NON_VOL            0x5e
 
 /*
  * Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
 
 struct bcr_perip {
 #ifdef CONFIG_CPU_BIG_ENDIAN
-       unsigned int start:8, pad2:8, sz:8, pad:8;
+       unsigned int start:8, pad2:8, sz:8, ver:8;
 #else
-       unsigned int pad:8, sz:8, pad2:8, start:8;
+       unsigned int ver:8, sz:8, pad2:8, start:8;
 #endif
 };
 
index 03484cb4d16d2eb4fada0095ee427726c23bd2e1..87d18ae53115596f7b64a56a4a07a572d54c3cbd 100644 (file)
 
 #define atomic_set(v, i) (((v)->counter) = (i))
 
-#ifdef CONFIG_ISA_ARCV2
-#define PREFETCHW      "       prefetchw   [%1]        \n"
-#else
-#define PREFETCHW
+#ifdef CONFIG_ARC_STAR_9000923308
+
+#define SCOND_FAIL_RETRY_VAR_DEF                                               \
+       unsigned int delay = 1, tmp;                                            \
+
+#define SCOND_FAIL_RETRY_ASM                                                   \
+       "       bz      4f                      \n"                             \
+       "   ; --- scond fail delay ---          \n"                             \
+       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
+       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
+       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
+       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
+       "       b       1b                      \n"     /* start over */        \
+       "4: ; --- success ---                   \n"                             \
+
+#define SCOND_FAIL_RETRY_VARS                                                  \
+         ,[delay] "+&r" (delay),[tmp] "=&r"    (tmp)                           \
+
+#else  /* !CONFIG_ARC_STAR_9000923308 */
+
+#define SCOND_FAIL_RETRY_VAR_DEF
+
+#define SCOND_FAIL_RETRY_ASM                                                   \
+       "       bnz     1b                      \n"                             \
+
+#define SCOND_FAIL_RETRY_VARS
+
 #endif
 
 #define ATOMIC_OP(op, c_op, asm_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
-       unsigned int temp;                                              \
+       unsigned int val;                                               \
+       SCOND_FAIL_RETRY_VAR_DEF                                        \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
-       "       " #asm_op " %0, %0, %2  \n"                             \
-       "       scond   %0, [%1]        \n"                             \
-       "       bnz     1b              \n"                             \
-       : "=&r"(temp)   /* Early clobber, to prevent reg reuse */       \
-       : "r"(&v->counter), "ir"(i)                                     \
+       "1:     llock   %[val], [%[ctr]]                \n"             \
+       "       " #asm_op " %[val], %[val], %[i]        \n"             \
+       "       scond   %[val], [%[ctr]]                \n"             \
+       "                                               \n"             \
+       SCOND_FAIL_RETRY_ASM                                            \
+                                                                       \
+       : [val] "=&r"   (val) /* Early clobber to prevent reg reuse */  \
+         SCOND_FAIL_RETRY_VARS                                         \
+       : [ctr] "r"     (&v->counter), /* Not "m": llock only supports reg direct addr mode */  \
+         [i]   "ir"    (i)                                             \
        : "cc");                                                        \
 }                                                                      \
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
 static inline int atomic_##op##_return(int i, atomic_t *v)             \
 {                                                                      \
-       unsigned int temp;                                              \
+       unsigned int val;                                               \
+       SCOND_FAIL_RETRY_VAR_DEF                                        \
                                                                        \
        /*                                                              \
         * Explicit full memory barrier needed before/after as          \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        smp_mb();                                                       \
                                                                        \
        __asm__ __volatile__(                                           \
-       "1:                             \n"                             \
-       PREFETCHW                                                       \
-       "       llock   %0, [%1]        \n"                             \
-       "       " #asm_op " %0, %0, %2  \n"                             \
-       "       scond   %0, [%1]        \n"                             \
-       "       bnz     1b              \n"                             \
-       : "=&r"(temp)                                                   \
-       : "r"(&v->counter), "ir"(i)                                     \
+       "1:     llock   %[val], [%[ctr]]                \n"             \
+       "       " #asm_op " %[val], %[val], %[i]        \n"             \
+       "       scond   %[val], [%[ctr]]                \n"             \
+       "                                               \n"             \
+       SCOND_FAIL_RETRY_ASM                                            \
+                                                                       \
+       : [val] "=&r"   (val)                                           \
+         SCOND_FAIL_RETRY_VARS                                         \
+       : [ctr] "r"     (&v->counter),                                  \
+         [i]   "ir"    (i)                                             \
        : "cc");                                                        \
                                                                        \
        smp_mb();                                                       \
                                                                        \
-       return temp;                                                    \
+       return val;                                                     \
 }
 
 #else  /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
 #undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
 
 /**
  * __atomic_add_unless - add unless the number is a given value
index 91694ec1ce959498fd5b4431962b03bbdf4119b7..69095da1fcfd1e35f16234aaf473896194064d38 100644 (file)
 struct pt_regs {
 
        /* Real registers */
-       long bta;       /* bta_l1, bta_l2, erbta */
+       unsigned long bta;      /* bta_l1, bta_l2, erbta */
 
-       long lp_start, lp_end, lp_count;
+       unsigned long lp_start, lp_end, lp_count;
 
-       long status32;  /* status32_l1, status32_l2, erstatus */
-       long ret;       /* ilink1, ilink2 or eret */
-       long blink;
-       long fp;
-       long r26;       /* gp */
+       unsigned long status32; /* status32_l1, status32_l2, erstatus */
+       unsigned long ret;      /* ilink1, ilink2 or eret */
+       unsigned long blink;
+       unsigned long fp;
+       unsigned long r26;      /* gp */
 
-       long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+       unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
 
-       long sp;        /* user/kernel sp depending on where we came from  */
-       long orig_r0;
+       unsigned long sp;       /* User/Kernel depending on where we came from */
+       unsigned long orig_r0;
 
        /*
         * To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
                unsigned long event;
        };
 
-       long user_r25;
+       unsigned long user_r25;
 };
 #else
 
 struct pt_regs {
 
-       long orig_r0;
+       unsigned long orig_r0;
 
        union {
                struct {
@@ -76,26 +76,26 @@ struct pt_regs {
                unsigned long event;
        };
 
-       long bta;       /* bta_l1, bta_l2, erbta */
+       unsigned long bta;      /* bta_l1, bta_l2, erbta */
 
-       long user_r25;
+       unsigned long user_r25;
 
-       long r26;       /* gp */
-       long fp;
-       long sp;        /* user/kernel sp depending on where we came from  */
+       unsigned long r26;      /* gp */
+       unsigned long fp;
+       unsigned long sp;       /* user/kernel sp depending on where we came from  */
 
-       long r12;
+       unsigned long r12;
 
        /*------- Below list auto saved by h/w -----------*/
-       long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
+       unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
 
-       long blink;
-       long lp_end, lp_start, lp_count;
+       unsigned long blink;
+       unsigned long lp_end, lp_start, lp_count;
 
-       long ei, ldi, jli;
+       unsigned long ei, ldi, jli;
 
-       long ret;
-       long status32;
+       unsigned long ret;
+       unsigned long status32;
 };
 
 #endif
@@ -103,10 +103,10 @@ struct pt_regs {
 /* Callee saved registers - need to be saved only when you are scheduled out */
 
 struct callee_regs {
-       long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+       unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
 };
 
-#define instruction_pointer(regs)      (unsigned long)((regs)->ret)
+#define instruction_pointer(regs)      ((regs)->ret)
 #define profile_pc(regs)               instruction_pointer(regs)
 
 /* return 1 if user mode or 0 if kernel mode */
@@ -142,7 +142,7 @@ struct callee_regs {
 
 static inline long regs_return_value(struct pt_regs *regs)
 {
-       return regs->r0;
+       return (long)regs->r0;
 }
 
 #endif /* !__ASSEMBLY__ */
index e1651df6a93d5bc8ab0af3a833c7c6ffd23acacc..db8c59d1eaeb760798c287a15720573ed58b9e4a 100644 (file)
 #define arch_spin_unlock_wait(x) \
        do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
 
+#ifdef CONFIG_ARC_HAS_LLSC
+
+/*
+ * A normal LLOCK/SCOND based system, w/o need for livelock workaround
+ */
+#ifndef CONFIG_ARC_STAR_9000923308
+
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
-       unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+       unsigned int val;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bnz     1b                      \n"
+       "       mov     %[got_it], 1            \n"
+       "4:                                     \n"
+       "                                       \n"
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       smp_mb();
+
+       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+       smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * zero means writer holds the lock exclusively, deny Reader.
+        * Otherwise grant lock to first/subseq reader
+        *
+        *      if (rw->counter > 0) {
+        *              rw->counter--;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
+       "       sub     %[val], %[val], 1       \n"     /* reader lock */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
+       "       sub     %[val], %[val], 1       \n"     /* counter-- */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"     /* retry if collided with someone */
+       "       mov     %[got_it], 1            \n"
+       "                                       \n"
+       "4: ; --- done ---                      \n"
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+        * deny writer. Otherwise if unlocked grant to writer
+        * Hence the claim that Linux rwlocks are unfair to writers.
+        * (can be starved for an indefinite time by readers).
+        *
+        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+        *              rw->counter = 0;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"     /* retry if collided with someone */
+       "       mov     %[got_it], 1            \n"
+       "                                       \n"
+       "4: ; --- done ---                      \n"
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * rw->counter++;
+        */
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       add     %[val], %[val], 1       \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter))
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       smp_mb();
+
+       rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+
+       smp_mb();
+}
+
+#else  /* CONFIG_ARC_STAR_9000923308 */
+
+/*
+ * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
+ * coherency transactions in the SCU. The exclusive line state keeps rotating
+ * among contenting cores leading to a never ending cycle. So break the cycle
+ * by deferring the retry of failed exclusive access (SCOND). The actual delay
+ * needed is function of number of contending cores as well as the unrelated
+ * coherency traffic from other cores. To keep the code simple, start off with
+ * small delay of 1 which would suffice most cases and in case of contention
+ * double the delay. Eventually the delay is sufficient such that the coherency
+ * pipeline is drained, thus a subsequent exclusive access would succeed.
+ */
+
+#define SCOND_FAIL_RETRY_VAR_DEF                                               \
+       unsigned int delay, tmp;                                                \
+
+#define SCOND_FAIL_RETRY_ASM                                                   \
+       "   ; --- scond fail delay ---          \n"                             \
+       "       mov     %[tmp], %[delay]        \n"     /* tmp = delay */       \
+       "2:     brne.d  %[tmp], 0, 2b           \n"     /* while (tmp != 0) */  \
+       "       sub     %[tmp], %[tmp], 1       \n"     /* tmp-- */             \
+       "       rol     %[delay], %[delay]      \n"     /* delay *= 2 */        \
+       "       b       1b                      \n"     /* start over */        \
+       "                                       \n"                             \
+       "4: ; --- done ---                      \n"                             \
+
+#define SCOND_FAIL_RETRY_VARS                                                  \
+         ,[delay] "=&r" (delay), [tmp] "=&r"   (tmp)                           \
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       unsigned int val;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 0b   \n"     /* spin while LOCKED */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bz      4f                      \n"     /* done */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val)
+         SCOND_FAIL_RETRY_VARS
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       unsigned int val, got_it = 0;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[slock]]      \n"
+       "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
+       "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
+       "       bz.d    4f                      \n"
+       "       mov.z   %[got_it], 1            \n"     /* got it */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+         SCOND_FAIL_RETRY_VARS
+       : [slock]       "r"     (&(lock->slock)),
+         [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       smp_mb();
+
+       lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+       smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       /*
+        * zero means writer holds the lock exclusively, deny Reader.
+        * Otherwise grant lock to first/subseq reader
+        *
+        *      if (rw->counter > 0) {
+        *              rw->counter--;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 0b\n"     /* <= 0: spin while write locked */
+       "       sub     %[val], %[val], 1       \n"     /* reader lock */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz      4f                      \n"     /* done */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
+       "       sub     %[val], %[val], 1       \n"     /* counter-- */
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz.d    4f                      \n"
+       "       mov.z   %[got_it], 1            \n"     /* got it */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       /*
+        * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+        * deny writer. Otherwise if unlocked grant to writer
+        * Hence the claim that Linux rwlocks are unfair to writers.
+        * (can be starved for an indefinite time by readers).
+        *
+        *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+        *              rw->counter = 0;
+        *              ret = 1;
+        *      }
+        */
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 0b \n"     /* while !UNLOCKED spin */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz      4f                      \n"
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+       unsigned int val, got_it = 0;
+       SCOND_FAIL_RETRY_VAR_DEF;
+
+       smp_mb();
+
+       __asm__ __volatile__(
+       "0:     mov     %[delay], 1             \n"
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
+       "       mov     %[val], %[WR_LOCKED]    \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bz.d    4f                      \n"
+       "       mov.z   %[got_it], 1            \n"     /* got it */
+       "                                       \n"
+       SCOND_FAIL_RETRY_ASM
+
+       : [val]         "=&r"   (val),
+         [got_it]      "+&r"   (got_it)
+         SCOND_FAIL_RETRY_VARS
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
+         [WR_LOCKED]   "ir"    (0)
+       : "memory", "cc");
+
+       smp_mb();
+
+       return got_it;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * rw->counter++;
+        */
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       add     %[val], %[val], 1       \n"
+       "       scond   %[val], [%[rwlock]]     \n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter))
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+       unsigned int val;
+
+       smp_mb();
+
+       /*
+        * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+        */
+       __asm__ __volatile__(
+       "1:     llock   %[val], [%[rwlock]]     \n"
+       "       scond   %[UNLOCKED], [%[rwlock]]\n"
+       "       bnz     1b                      \n"
+       "                                       \n"
+       : [val]         "=&r"   (val)
+       : [rwlock]      "r"     (&(rw->counter)),
+         [UNLOCKED]    "r"     (__ARCH_RW_LOCK_UNLOCKED__)
+       : "memory", "cc");
+
+       smp_mb();
+}
+
+#undef SCOND_FAIL_RETRY_VAR_DEF
+#undef SCOND_FAIL_RETRY_ASM
+#undef SCOND_FAIL_RETRY_VARS
+
+#endif /* CONFIG_ARC_STAR_9000923308 */
+
+#else  /* !CONFIG_ARC_HAS_LLSC */
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 
        /*
         * This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        __asm__ __volatile__(
        "1:     ex  %0, [%1]            \n"
        "       breq  %0, %2, 1b        \n"
-       : "+&r" (tmp)
+       : "+&r" (val)
        : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
        : "memory");
 
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        smp_mb();
 }
 
+/* 1 - lock taken successfully */
 static inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
-       unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+       unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 
        smp_mb();
 
        __asm__ __volatile__(
        "1:     ex  %0, [%1]            \n"
-       : "+r" (tmp)
+       : "+r" (val)
        : "r"(&(lock->slock))
        : "memory");
 
        smp_mb();
 
-       return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+       return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
 }
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-       unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+       unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
 
        /*
         * RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
        __asm__ __volatile__(
        "       ex  %0, [%1]            \n"
-       : "+r" (tmp)
+       : "+r" (val)
        : "r"(&(lock->slock))
        : "memory");
 
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 /*
  * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
  *
  * The spinlock itself is contained in @counter and access to it is
  * serialized with @lock_mutex.
- *
- * Unfair locking as Writers could be starved indefinitely by Reader(s)
  */
 
-/* Would read_trylock() succeed? */
-#define arch_read_can_lock(x)  ((x)->counter > 0)
-
-/* Would write_trylock() succeed? */
-#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
-
 /* 1 - lock taken successfully */
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
        arch_spin_unlock(&(rw->lock_mutex));
 }
 
+#endif
+
+#define arch_read_can_lock(x)  ((x)->counter > 0)
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
 #define arch_read_lock_flags(lock, flags)      arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags)     arch_write_lock(lock)
 
index 662627ced4f23a966c85feffb9f9d38a4f7df10a..4e1ef5f650c6f2fc74ee1fbb09957d8d23e7b7da 100644 (file)
@@ -26,7 +26,9 @@ typedef struct {
  */
 typedef struct {
        volatile unsigned int   counter;
+#ifndef CONFIG_ARC_HAS_LLSC
        arch_spinlock_t         lock_mutex;
+#endif
 } arch_rwlock_t;
 
 #define __ARCH_RW_LOCK_UNLOCKED__      0x01000000
index 76a7739aab1c5173f397c0f8a5a79c5169489f41..0b3ef63d4a03b3ef2ff119535ee3c020641e1888 100644 (file)
 */
 struct user_regs_struct {
 
-       long pad;
+       unsigned long pad;
        struct {
-               long bta, lp_start, lp_end, lp_count;
-               long status32, ret, blink, fp, gp;
-               long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
-               long sp;
+               unsigned long bta, lp_start, lp_end, lp_count;
+               unsigned long status32, ret, blink, fp, gp;
+               unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+               unsigned long sp;
        } scratch;
-       long pad2;
+       unsigned long pad2;
        struct {
-               long r25, r24, r23, r22, r21, r20;
-               long r19, r18, r17, r16, r15, r14, r13;
+               unsigned long r25, r24, r23, r22, r21, r20;
+               unsigned long r19, r18, r17, r16, r15, r14, r13;
        } callee;
-       long efa;       /* break pt addr, for break points in delay slots */
-       long stop_pc;   /* give dbg stop_pc after ensuring brkpt trap */
+       unsigned long efa;      /* break pt addr, for break points in delay slots */
+       unsigned long stop_pc;  /* give dbg stop_pc after ensuring brkpt trap */
 };
 #endif /* !__ASSEMBLY__ */
 
index 18cc01591c96e64186a8b13c1aef5b8011091b12..cabde9dc0696479cc3a4d3074fd526cf89c85182 100644 (file)
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
        struct bcr_perip uncached_space;
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+       unsigned long perip_space;
        FIX_PTR(cpu);
 
        READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
        cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 
        READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
-       BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE);
+        if (uncached_space.ver < 3)
+               perip_space = uncached_space.start << 24;
+       else
+               perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
+
+       BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
 
        READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
 
@@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
                pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
        else if (!cpu->extn.fpu_dp && fpu_enabled)
                panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
+
+       if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+           !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
+               panic("llock/scond livelock workaround missing\n");
 }
 
 /*
index 3364d2bbc515471bba6478b8b34a417251ffde56..4294761a2b3e7ad3b36f5eca5bc26490e31ed61f 100644 (file)
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
        return 0;
 }
 
-static void arc_clkevent_set_mode(enum clock_event_mode mode,
-                                 struct clock_event_device *dev)
+static int arc_clkevent_set_periodic(struct clock_event_device *dev)
 {
-       switch (mode) {
-       case CLOCK_EVT_MODE_PERIODIC:
-                /*
-                 * At X Hz, 1 sec = 1000ms -> X cycles;
-                 *                    10ms -> X / 100 cycles
-                 */
-               arc_timer_event_setup(arc_get_core_freq() / HZ);
-               break;
-       case CLOCK_EVT_MODE_ONESHOT:
-               break;
-       default:
-               break;
-       }
-
-       return;
+       /*
+        * At X Hz, 1 sec = 1000ms -> X cycles;
+        *                    10ms -> X / 100 cycles
+        */
+       arc_timer_event_setup(arc_get_core_freq() / HZ);
+       return 0;
 }
 
 static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
-       .name           = "ARC Timer0",
-       .features       = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
-       .mode           = CLOCK_EVT_MODE_UNUSED,
-       .rating         = 300,
-       .irq            = TIMER0_IRQ,   /* hardwired, no need for resources */
-       .set_next_event = arc_clkevent_set_next_event,
-       .set_mode       = arc_clkevent_set_mode,
+       .name                   = "ARC Timer0",
+       .features               = CLOCK_EVT_FEAT_ONESHOT |
+                                 CLOCK_EVT_FEAT_PERIODIC,
+       .rating                 = 300,
+       .irq                    = TIMER0_IRQ,   /* hardwired, no need for resources */
+       .set_next_event         = arc_clkevent_set_next_event,
+       .set_state_periodic     = arc_clkevent_set_periodic,
 };
 
 static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
         * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
         */
        struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
-       int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC;
+       int irq_reenable = clockevent_state_periodic(evt);
 
        /*
         * Any write to CTRL reg ACks the interrupt, we rewrite the
index 1b2b3acfed52df6f1fb0aad58fa834d3d689482d..0cab0b8a57c5665e6686e9bef843fbfa51f141fd 100644 (file)
@@ -206,7 +206,7 @@ unalignedOffby3:
        ld.ab   r6, [r1, 4]
        prefetch [r1, 28]       ;Prefetch the next read location
        ld.ab   r8, [r1,4]
-       prefetch [r3, 32]       ;Prefetch the next write location
+       prefetchw [r3, 32]      ;Prefetch the next write location
 
        SHIFT_1 (r7, r6, 8)
        or      r7, r7, r5
index 92d573c734b5b3d52dec2d8fcf6eb67cc96d16f6..365b183648154c70de1726955b9242e88d3cc60c 100644 (file)
 
 #undef PREALLOC_NOT_AVAIL
 
-#ifdef PREALLOC_NOT_AVAIL
-#define PREWRITE(A,B)  prefetchw [(A),(B)]
-#else
-#define PREWRITE(A,B)  prealloc [(A),(B)]
-#endif
-
 ENTRY(memset)
        prefetchw [r0]          ; Prefetch the write location
        mov.f   0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
 
 ;;; Convert len to Dwords, unfold x8
        lsr.f   lp_count, lp_count, 6
+
        lpnz    @.Lset64bytes
        ;; LOOP START
-       PREWRITE(r3, 64)        ;Prefetch the next write location
+#ifdef PREALLOC_NOT_AVAIL
+       prefetchw [r3, 64]      ;Prefetch the next write location
+#else
+       prealloc  [r3, 64]
+#endif
+#ifdef CONFIG_ARC_HAS_LL64
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
+#else
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+#endif
 .Lset64bytes:
 
        lsr.f   lp_count, r2, 5 ;Last remaining  max 124 bytes
        lpnz    .Lset32bytes
        ;; LOOP START
        prefetchw   [r3, 32]    ;Prefetch the next write location
+#ifdef CONFIG_ARC_HAS_LL64
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
        std.ab  r4, [r3, 8]
+#else
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+       st.ab   r4, [r3, 4]
+#endif
 .Lset32bytes:
 
        and.f   lp_count, r2, 0x1F ;Last remaining 31 bytes
index 99f7da513a48462031a58815d48428509bad0848..e7769c3ab5f2b7793aff703ca5e926983b45ec29 100644 (file)
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
 
 static void __init axs103_early_init(void)
 {
+       /*
+        * AXS103 configurations for SMP/QUAD configurations share device tree
+        * which defaults to 90 MHz. However recent failures of Quad config
+        * revealed P&R timing violations so clamp it down to safe 50 MHz
+        * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
+        *
+        * This hack is really hacky as of now. Fix it properly by getting the
+        * number of cores as return value of platform's early SMP callback
+        */
+#ifdef CONFIG_ARC_MCIP
+       unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
+       if (num_cores > 2)
+               arc_set_core_freq(50 * 1000000);
+#endif
+
        switch (arc_get_core_freq()/1000000) {
        case 33:
                axs103_set_freq(1, 1, 1);
index 07ab3d203916732337f909ec5f903db4c7bb1294..7451b447cc2d2cb8cc68a9bf59f125f2dd2ce347 100644 (file)
@@ -312,6 +312,9 @@ INSTALL_TARGETS     = zinstall uinstall install
 
 PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
 
+bootpImage uImage: zImage
+zImage: Image
+
 $(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
index 8f1e25bcecbd76273f62671e8b9afa8f193ea261..1e29ccf77ea24f56fd16f8960d2b585ccc7ce9fc 100644 (file)
                                ranges = <0 0x2000 0x2000>;
 
                                scm_conf: scm_conf@0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon", "simple-bus";
                                        reg = <0x0 0x1400>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
                                ctrl-module = <&omap_control_sata>;
                                clocks = <&sys_clkin1>, <&sata_ref_clk>;
                                clock-names = "sysclk", "refclk";
+                               syscon-pllreset = <&scm_conf 0x3fc>;
                                #phy-cells = <0>;
                        };
 
index e6d13592080d7c701056c2f6a73326aa11e715b5..b57033e8c633187a5f52c367a788f46196967fdc 100644 (file)
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
-                       interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
-                                       <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                                       <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
                                 <&clks IMX6QDL_CLK_LVDS1_GATE>,
                                 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
index 1b6494fbdb91b9301c607652efbd9a9fb34a9f36..675fb8e492c6aa0478a6d5df01b30fbe1e281b7d 100644 (file)
                                        <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
                        };
                };
+
+               mdio: mdio@24200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x24200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2e-netcp.dtsi"
        };
 };
-
-&mdio {
-       reg = <0x24200f00 0x100>;
-};
index ae6472407b2277012096d733bb80951592555d03..d0810a5f296857394397c7f1c60bffa0011bb6e1 100644 (file)
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x25c>;
                };
+
+               mdio: mdio@02090300 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x02090300 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2hk-netcp.dtsi"
        };
 };
index 0e007483615e4f097bb747a2d882b2e2d3a030aa..49fd414f680c93ab50cf0dae72d2e9261181da21 100644 (file)
@@ -29,7 +29,6 @@
        };
 
        soc {
-
                /include/ "k2l-clocks.dtsi"
 
                uart2: serial@02348400 {
                        #gpio-cells = <2>;
                        gpio,syscon-dev = <&devctrl 0x24c>;
                };
+
+               mdio: mdio@26200f00 {
+                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <0x26200f00 0x100>;
+                       status = "disabled";
+                       clocks = <&clkcpgmac>;
+                       clock-names = "fck";
+                       bus_freq        = <2500000>;
+               };
                /include/ "k2l-netcp.dtsi"
        };
 };
        /* Pin muxed. Enabled and configured by Bootloader */
        status = "disabled";
 };
-
-&mdio {
-       reg = <0x26200f00 0x100>;
-};
index e7a6f6deabb6c0d89d4ca1e2c2ae63639249d010..72816d65f7ec3fcf5d7c47ce792ae57db369754b 100644 (file)
                                  1 0 0x21000A00 0x00000100>;
                };
 
-               mdio: mdio@02090300 {
-                       compatible      = "ti,keystone_mdio", "ti,davinci_mdio";
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       reg             = <0x02090300 0x100>;
-                       status = "disabled";
-                       clocks = <&clkpa>;
-                       clock-names = "fck";
-                       bus_freq        = <2500000>;
-               };
-
                kirq0: keystone_irq@26202a0 {
                        compatible = "ti,keystone-irq";
                        interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
index 11a7963be0035a002fa77c2bec6809b34444e584..2390f387c27163bb76e918bb73e26966bee7fb48 100644 (file)
@@ -51,7 +51,8 @@
                                };
 
                                scm_conf: scm_conf@270 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x270 0x240>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index 7d31c6ff246f47b14afd5eeb332d01a955faef35..abc4473e6f8a17e51d5e66416be089ab24d7b472 100644 (file)
                                };
 
                                omap4_padconf_global: omap4_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0x170>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index c8fd648a7108515def0e9492936fd3760f156579..b1a1263e600168291091a963f9f56aefc87fd59e 100644 (file)
                                };
 
                                omap5_padconf_global: omap5_padconf_global@5a0 {
-                                       compatible = "syscon";
+                                       compatible = "syscon",
+                                                    "simple-bus";
                                        reg = <0x5a0 0xec>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
index a75f3289e653ab2973e2d7dd1cb12c8a12724451..b8f81fb418ce60039ad4e8e04f2892ca34d26bc8 100644 (file)
 #include "skeleton.dtsi"
 
 / {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               enable-method = "ste,dbx500-smp";
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+               };
+               CPU0: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x300>;
+               };
+               CPU1: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a9";
+                       reg = <0x301>;
+               };
+       };
+
        soc {
                #address-cells = <1>;
                #size-cells = <1>;
                interrupt-parent = <&intc>;
                ranges;
 
-               cpus {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-
-                       cpu-map {
-                               cluster0 {
-                                       core0 {
-                                               cpu = <&CPU0>;
-                                       };
-                                       core1 {
-                                               cpu = <&CPU1>;
-                                       };
-                               };
-                       };
-                       CPU0: cpu@0 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <0>;
-                       };
-                       CPU1: cpu@1 {
-                               device_type = "cpu";
-                               compatible = "arm,cortex-a9";
-                               reg = <1>;
-                       };
-               };
-
                ptm@801ae000 {
                        compatible = "arm,coresight-etm3x", "arm,primecell";
                        reg = <0x801ae000 0x1000>;
index 92828a1dec80c1c33d051d9b76063727598495d5..b48dd4f37f8067e781ee3e135ed7aff27940371f 100644 (file)
@@ -61,6 +61,7 @@ work_pending:
        movlt   scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
        ldmia   sp, {r0 - r6}                   @ have to reload r0 - r6
        b       local_restart                   @ ... and off we go
+ENDPROC(ret_fast_syscall)
 
 /*
  * "slow" syscall return path.  "why" tells us if this was a real syscall.
index bd755d97e459d77ff05cc8a1264f336c58c1b598..29e2991465cb27b579f729deec65e2293a0a04b5 100644 (file)
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
        sub     lr, r4, r5                      @ mmu has been enabled
        add     r3, r7, lr
        ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
+ARM_BE8(eor    r4, r4, r5)                     @ Swap r5 and r4 in BE:
+ARM_BE8(eor    r5, r4, r5)                     @ it can be done in 3 steps
+ARM_BE8(eor    r4, r4, r5)                     @ without using a temp reg.
        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
        badr    lr, __enable_mmu                @ return address
        mov     r13, r12                        @ __secondary_switched address
index efe17dd9b9218b7ef16299700a0f2a6d74ca61c1..54a5aeab988d3526657b8e3089942ca8cfe4fe5e 100644 (file)
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        struct timespec64 *wtm = &tk->wall_to_monotonic;
 
        if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
 
        vdso_write_begin(vdso_data);
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->tk_is_cntvct                 = tk_is_cntvct(tk);
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = (u32)(tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift);
        vdso_data->wtm_clock_sec                = wtm->tv_sec;
        vdso_data->wtm_clock_nsec               = wtm->tv_nsec;
 
index 3e58d710013c3ad9b377fc76e6dad58f377e88a7..4b39af2dfda9963345afe18c89131c1056a90b41 100644 (file)
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
        }
 
        /* the mmap semaphore is taken only if not in an atomic context */
-       atomic = in_atomic();
+       atomic = faulthandler_disabled();
 
        if (!atomic)
                down_read(&current->mm->mmap_sem);
index 6001f1c9d136f45fabd7d61e97638855d0beb46a..4a87e86dec45d1546153ca0ebb7310bbd5f82d93 100644 (file)
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
                pd->base = of_iomap(np, 0);
                if (!pd->base) {
                        pr_warn("%s: failed to map memory\n", __func__);
-                       kfree(pd->pd.name);
+                       kfree_const(pd->pd.name);
                        kfree(pd);
-                       of_node_put(np);
                        continue;
                }
 
index 8e52621b5a6bf3ab42ddef8a5c3db79c3391fcf1..e1d2e991d17a31fc15f1c44616c9b4b7f9de3a84 100644 (file)
@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
        .irq_mask               = wakeupgen_mask,
        .irq_unmask             = wakeupgen_unmask,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_type           = irq_chip_set_type_parent,
        .flags                  = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
index 9d259d94e429c4cc493542ad4cf238a513b13743..1160434eece0509c3797733b49e8fcb1262e42e7 100644 (file)
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
 VDSO_LDFLAGS += -nostdlib -shared
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
 VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
 
 obj-$(CONFIG_VDSO) += vdso.o
 extra-$(CONFIG_VDSO) += vdso.lds
index 1670f15ef69e34972986081deb9b1f87b0bb2bb3..948f0ad2de231b5e3f5efa62e204162cadf26503 100644 (file)
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitely for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
                break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE))
index ec37ab3f524f303419d2cc3a82b79c119e61de1d..97bc68f4c689f28eac7188f5e0b792b5293c37da 100644 (file)
@@ -199,16 +199,15 @@ up_fail:
  */
 void update_vsyscall(struct timekeeper *tk)
 {
-       struct timespec xtime_coarse;
        u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
 
        ++vdso_data->tb_seq_count;
        smp_wmb();
 
-       xtime_coarse = __current_kernel_time();
        vdso_data->use_syscall                  = use_syscall;
-       vdso_data->xtime_coarse_sec             = xtime_coarse.tv_sec;
-       vdso_data->xtime_coarse_nsec            = xtime_coarse.tv_nsec;
+       vdso_data->xtime_coarse_sec             = tk->xtime_sec;
+       vdso_data->xtime_coarse_nsec            = tk->tkr_mono.xtime_nsec >>
+                                                       tk->tkr_mono.shift;
        vdso_data->wtm_clock_sec                = tk->wall_to_monotonic.tv_sec;
        vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
index cee5f93e5712f3120d36847dc02d74709bc21929..199a8357838cb24bde2ce5ed5dec7db62feb8d2e 100644 (file)
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
        select BCM7120_L2_IRQ
        select BRCMSTB_L2_IRQ
        select IRQ_MIPS_CPU
-       select RAW_IRQ_ACCESSORS
        select DMA_NONCOHERENT
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_LITTLE_ENDIAN
index 01a644f174dd08e34843ca501035b077d777bea2..1ba21204ebe021ee164a9f8f4828dd3f3c836f84 100644 (file)
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
 {
        return ATH79_MISC_IRQ(5);
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 56f5d080ef9d6cb698ba70cf7027783167000284..b7fa9ae28c3659dbf457aecd7cd17255cd34f5da 100644 (file)
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
        cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
 
        if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+               generic_smp_call_function_interrupt();
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644 (file)
index 11d3b57..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
-
-#include <asm/bmips.h>
-
-#define plat_post_dma_flush    bmips_post_dma_flush
-
-#include <asm/mach-generic/dma-coherence.h>
-
-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
index 9d810675814291d14ce004f9d4447678af2227c3..ae85694752644339af22557a5ae424cde8271400 100644 (file)
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
                 * Make sure the buddy is global too (if it's !none,
                 * it better already be global)
                 */
+#ifdef CONFIG_SMP
+               /*
+                * For SMP, multiple CPUs can race, so we need to do
+                * this atomically.
+                */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+               unsigned long page_global = _PAGE_GLOBAL;
+               unsigned long tmp;
+
+               __asm__ __volatile__ (
+                       "       .set    push\n"
+                       "       .set    noreorder\n"
+                       "1:     " LL_INSN "     %[tmp], %[buddy]\n"
+                       "       bnez    %[tmp], 2f\n"
+                       "        or     %[tmp], %[tmp], %[global]\n"
+                       "       " SC_INSN "     %[tmp], %[buddy]\n"
+                       "       beqz    %[tmp], 1b\n"
+                       "        nop\n"
+                       "2:\n"
+                       "       .set pop"
+                       : [buddy] "+m" (buddy->pte),
+                         [tmp] "=&r" (tmp)
+                       : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
                if (pte_none(*buddy))
                        pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
        }
 #endif
 }
index 16f1ea9ab191234ee8dc5599803b91dcc2ccf745..03722d4326a1aad05935b58805ec8d881703201e 100644 (file)
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
 extern void play_dead(void);
 #endif
 
-extern asmlinkage void smp_call_function_interrupt(void);
-
 static inline void arch_send_call_function_single_ipi(int cpu)
 {
        extern struct plat_smp_ops *mp_ops;     /* private */
index 28d6d9364bd1f2c431df08c72f58262e5297ec5c..a71da576883c8f4b1a3d60279ebfaefb95798031 100644 (file)
                .set    noreorder
                bltz    k0, 8f
                 move   k1, sp
+#ifdef CONFIG_EVA
+               /*
+                * Flush interAptiv's Return Prediction Stack (RPS) by writing
+                * EntryHi. Toggling Config7.RPS is slower and less portable.
+                *
+                * The RPS isn't automatically flushed when exceptions are
+                * taken, which can result in kernel mode speculative accesses
+                * to user addresses if the RPS mispredicts. That's harmless
+                * when user and kernel share the same address space, but with
+                * EVA the same user segments may be unmapped to kernel mode,
+                * even containing sensitive MMIO regions or invalid memory.
+                *
+                * This can happen when the kernel sets the return address to
+                * ret_from_* and jr's to the exception handler, which looks
+                * more like a tail call than a function call. If nested calls
+                * don't evict the last user address in the RPS, it will
+                * mispredict the return and fetch from a user controlled
+                * address into the icache.
+                *
+                * More recent EVA-capable cores with MAAR to restrict
+                * speculative accesses aren't affected.
+                */
+               MFC0    k0, CP0_ENTRYHI
+               MTC0    k0, CP0_ENTRYHI
+#endif
                .set    reorder
                /* Called from user mode, new stack. */
                get_saved_sp
index af42e7003f12d025cd31e2a5d167f2f4b158d37a..baa7b6fc0a60b1879976c2d4158f73d01e0ca53b 100644 (file)
@@ -407,7 +407,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    noat
        SAVE_ALL
        FEXPORT(handle_\exception\ext)
-       __BUILD_clear_\clear
+       __build_clear_\clear
        .set    at
        __BUILD_\verbose \exception
        move    a0, sp
index 3e4491aa6d6b2425865e1d1a3a909cf05aaa4e28..789d7bf4fef3203b3038a9ddaf20c5f70f1bc948 100644 (file)
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
                                      unsigned long __user *user_mask_ptr)
 {
        unsigned int real_len;
-       cpumask_t mask;
+       cpumask_t allowed, mask;
        int retval;
        struct task_struct *p;
 
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
        if (retval)
                goto out_unlock;
 
-       cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
+       cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+       cpumask_and(&mask, &allowed, cpu_active_mask);
 
 out_unlock:
        read_unlock(&tasklist_lock);
index b130033838ba0c391ef4f9f0c7aa19e5a50b6eb7..5fcec3032f38f6aebdf668af3318997e4f53921f 100644 (file)
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
        return mips_machine_name;
 }
 
-#ifdef CONFIG_OF
+#ifdef CONFIG_USE_OF
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
        return add_memory_region(base, size, BOOT_MEM_RAM);
index 74bab9ddd0e1984c9d4e4e95c038bb1d269b60dd..c6bbf21650515d1e71eead45b41a7729f8794476 100644 (file)
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
 
 process_entry:
        PTR_L           s2, (s0)
-       PTR_ADD         s0, s0, SZREG
+       PTR_ADDIU       s0, s0, SZREG
 
        /*
         * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
        /* copy page word by word */
        REG_L           s5, (s2)
        REG_S           s5, (s4)
-       PTR_ADD         s4, s4, SZREG
-       PTR_ADD         s2, s2, SZREG
-       LONG_SUB        s6, s6, 1
+       PTR_ADDIU       s4, s4, SZREG
+       PTR_ADDIU       s2, s2, SZREG
+       LONG_ADDIU      s6, s6, -1
        beq             s6, zero, process_entry
        b               copy_word
        b               process_entry
index ad4d44635c7601162ca0dd8f1b626df28eeeafb2..a6f6b762c47a4c5a2d395e13a1d564964595abe1 100644 (file)
@@ -80,7 +80,7 @@ syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_64_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 446cc654da56c5f5fcaad749242dd98d593776e1..4b2010654c463158b7dee80194de736195c04595 100644 (file)
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
        SAVE_STATIC
        move    s0, t2
        move    a0, sp
-       daddiu  a1, v0, __NR_N32_Linux
+       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 2f                  # seccomp failed? Skip syscall
index 19a7705f2a015ef4b38e1cd0a16eb22c2a2d3ca3..5d7f2634996fd4920f0a4c94e00cd42fae5934b1 100644 (file)
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
index 336708ae5c5b4c74b75416058feabb4bef5e30b1..78cf8c2f1de0e8790923d25ab6e42a85e53a6fe9 100644 (file)
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
        if (action == 0)
                scheduler_ipi();
        else
-               smp_call_function_interrupt();
+               generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
        if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+               generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index d0744cc77ea7f7a02d94c96faf190787f7e88f64..a31896c33716d424bb30397c17b29af07c6728bb 100644 (file)
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
        cpu_startup_entry(CPUHP_ONLINE);
 }
 
-/*
- * Call into both interrupt handlers, as we share the IPI for them
- */
-void __irq_entry smp_call_function_interrupt(void)
-{
-       irq_enter();
-       generic_smp_call_function_interrupt();
-       irq_exit();
-}
-
 static void stop_this_cpu(void *dummy)
 {
        /*
index e207a43b5f8f0bcbf0544e5289cfc08126cbc7f5..8ea28e6ab37dead56439dc37871b6b18e8ec02d5 100644 (file)
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
 void show_stack(struct task_struct *task, unsigned long *sp)
 {
        struct pt_regs regs;
+       mm_segment_t old_fs = get_fs();
        if (sp) {
                regs.regs[29] = (unsigned long)sp;
                regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
                        prepare_frametrace(&regs);
                }
        }
+       /*
+        * show_stack() deals exclusively with kernel mode, so be sure to access
+        * the stack in the kernel (not user) address space.
+        */
+       set_fs(KERNEL_DS);
        show_stacktrace(task, &regs);
+       set_fs(old_fs);
 }
 
 static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
        const int field = 2 * sizeof(unsigned long);
        int multi_match = regs->cp0_status & ST0_TS;
        enum ctx_state prev_state;
+       mm_segment_t old_fs = get_fs();
 
        prev_state = exception_enter();
        show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
                dump_tlb_all();
        }
 
+       if (!user_mode(regs))
+               set_fs(KERNEL_DS);
+
        show_code((unsigned int __user *) regs->cp0_epc);
 
+       set_fs(old_fs);
+
        /*
         * Some chips may have other causes of machine check (e.g. SB1
         * graduation timer)
index af84bef0c90de4bc65e14669dca132ebb7147846..eb3efd137fd17cdb6e1defa163744a480ad16185 100644 (file)
@@ -438,7 +438,7 @@ do {                                                        \
                : "memory");                                \
 } while(0)
 
-#define     StoreDW(addr, value, res) \
+#define     _StoreDW(addr, value, res) \
 do {                                                        \
                __asm__ __volatile__ (                      \
                        ".set\tpush\n\t"                    \
index 6ab10573490de8a2d5a449e45c3585547c51f7c4..2c218c3bbca57be3d029cdb3320b712092bccd46 100644 (file)
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
        return IRQ_HANDLED;
 }
 
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
 {
        return ltq_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 509877c6e9d908d7bac6110982c7208ab69204af..1a4738a8f2d3906ccffb58bdf8d9b35ee4b04ef3 100644 (file)
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
-       if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+       if (action & SMP_CALL_FUNCTION) {
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
+       }
 
        if (action & SMP_ASK_C0COUNT) {
                BUG_ON(cpu != 0);
index 77d96db8253c422ac9e48d93e02c6b6f39b41c1b..aab218c36e0d3e2f7669c47343e583e527103169 100644 (file)
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
                protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
                protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
                protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
-               protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
                protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
-               protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
                protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
 
                protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
                protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
                protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
                protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
-               protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
+               protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
                protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
-               protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ);
+               protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
                protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
 
        } else {
index 36c0f26fac6b0780318958a59fc2665a444a10ea..852a41c6da4507080d611dce0b1fc206caf30556 100644 (file)
@@ -133,7 +133,8 @@ good_area:
 #endif
                                goto bad_area;
                        }
-                       if (!(vma->vm_flags & VM_READ)) {
+                       if (!(vma->vm_flags & VM_READ) &&
+                           exception_epc(regs) != address) {
 #if 0
                                pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
                                          raw_smp_processor_id(),
index d1392f8f5811f65ec72445026ac12c4fe15fe6b1..fa8f591f371361ba6fe3654617e6f44690a48a25 100644 (file)
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index 5625b190edc061afbf2a8885e976b48014270325..b7bf721eabf5411bfb2f55b6b7d2cdc9ba887f96 100644 (file)
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
 
        return mips_cpu_perf_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
 
 static void __init init_rtc(void)
 {
-       /* stop the clock whilst setting it up */
-       CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
+       unsigned char freq, ctrl;
 
-       /* 32KHz time base */
-       CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+       /* Set 32KHz time base if not already set */
+       freq = CMOS_READ(RTC_FREQ_SELECT);
+       if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
+               CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
 
-       /* start the clock */
-       CMOS_WRITE(RTC_24H, RTC_CONTROL);
+       /* Ensure SET bit is clear so RTC can run */
+       ctrl = CMOS_READ(RTC_CONTROL);
+       if (ctrl & RTC_SET)
+               CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
 }
 
 void __init plat_time_init(void)
index e1d69895fb1de44f5d8503027f86ebb50f40d5a6..a120b7a5a8fe4e9af03ccb40fc9e7e123f88d633 100644 (file)
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
        return -1;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index dc3e327fbbac105e71c6b89a039e0f79f91e6f3d..f5fff228b347b6da07d68212fb11f4ae140548c8 100644 (file)
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
 {
        clear_c0_eimr(irq);
        ack_c0_eirr(irq);
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
        set_c0_eimr(irq);
 }
 
index 42181c7105df70992892ead68933bcd5375ab74b..f8d3e081b2ebc77e6752dc10a61a69e9a8172b3d 100644 (file)
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
        return IRQ_HANDLED;
 }
 
index 7c73fcb92a108799866c8d603019129d33c57ee6..8a377346f0cabbf5ce91199ca039ec013b16dda1 100644 (file)
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
 {
        return gic_get_c0_perfcount_int();
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 int get_c0_fdc_int(void)
 {
index 10170580a2def4501bb4f149c707d050a900246f..ffa0f7101a9773ec8e24813f37e3c270d912b5e2 100644 (file)
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index 53707aacc0f86cb13134546de07ccaf71d76321d..8c624a8b9ea29f5611abceb531de09c865e46b7c 100644 (file)
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
 {
        return rt_perfcount_irq;
 }
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
 
 unsigned int get_c0_compare_int(void)
 {
index 3fbaef97a1b8d31791e8999bd222e3e1b01c3701..16ec4e12daa3fb7bed3355a5cd56cdb3c87946fc 100644 (file)
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
                scheduler_ipi();
        } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
                LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
-               smp_call_function_interrupt();
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
        } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
                LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
-               smp_call_function_interrupt();
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
        } else
 #endif
        {
index af7d44edd9a8f118b79944ecf21310634f308b56..4c71aea2566372c3f3af8627c79e91fb9aede4ea 100644 (file)
@@ -29,8 +29,6 @@
 #include <asm/sibyte/bcm1480_regs.h>
 #include <asm/sibyte/bcm1480_int.h>
 
-extern void smp_call_function_interrupt(void);
-
 /*
  * These are routines for dealing with the bcm1480 smp capabilities
  * independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
-       if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+       if (action & SMP_CALL_FUNCTION) {
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
+       }
 }
index c0c4b3f88a086f2c331cce0b311d9c547d3b5a6a..1cf66f5ff23d1a5afca26ffd9bc638566d8f68cb 100644 (file)
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
        if (action & SMP_RESCHEDULE_YOURSELF)
                scheduler_ipi();
 
-       if (action & SMP_CALL_FUNCTION)
-               smp_call_function_interrupt();
+       if (action & SMP_CALL_FUNCTION) {
+               irq_enter();
+               generic_smp_call_function_interrupt();
+               irq_exit();
+       }
 }
index d3a831ac0f927e17304d55c406c60029ad55a4e0..da50e0c9c57e69af8779f0df979231104704ab6e 100644 (file)
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
 
 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
 {
-       memset(to, 0, sizeof *to);
-
        if (copy_from_user(to, from, 3*sizeof(int)) ||
            copy_from_user(to->_sifields._pad,
                           from->_sifields._pad, SI_PAD_SIZE32))
index 2078f92d15ac90adcfec617b46df750e073b76d0..f32f843a3631359e49b88169ab8a1eed2b76b946 100644 (file)
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
 
 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->requests)
-               return 0;
 retry:
        kvm_s390_vcpu_request_handled(vcpu);
+       if (!vcpu->requests)
+               return 0;
        /*
         * We use MMU_RELOAD just to re-arm the ipte notifier for the
         * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
index 1f0aa2024e94be341efc079f58145a2e70909052..6424249d5f785e698c4283677c1b975668393478 100644 (file)
  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
 
 #define VISEntryHalf                                   \
-       rd              %fprs, %o5;                     \
-       andcc           %o5, FPRS_FEF, %g0;             \
-       be,pt           %icc, 297f;                     \
-        sethi          %hi(298f), %g7;                 \
-       sethi           %hi(VISenterhalf), %g1;         \
-       jmpl            %g1 + %lo(VISenterhalf), %g0;   \
-        or             %g7, %lo(298f), %g7;            \
-       clr             %o5;                            \
-297:   wr              %o5, FPRS_FEF, %fprs;           \
-298:
+       VISEntry
+
+#define VISExitHalf                                    \
+       VISExit
 
 #define VISEntryHalfFast(fail_label)                   \
        rd              %fprs, %o5;                     \
@@ -47,7 +41,7 @@
        ba,a,pt         %xcc, fail_label;               \
 297:   wr              %o5, FPRS_FEF, %fprs;
 
-#define VISExitHalf                                    \
+#define VISExitHalfFast                                        \
        wr              %o5, 0, %fprs;
 
 #ifndef __ASSEMBLY__
index 140527a20e7df03cc0a0dd9e6a3438f44b432177..83aeeb1dffdb3b4c29293d5924cd5259e2269ce5 100644 (file)
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
        add             %o0, 0x40, %o0
        bne,pt          %icc, 1b
         LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
+#ifdef NON_USER_COPY
+       VISExitHalfFast
+#else
        VISExitHalf
-
+#endif
        brz,pn          %o2, .Lexit
         cmp            %o2, 19
        ble,pn          %icc, .Lsmall_unaligned
index b320ae9e2e2e8b27c7184f58ddc640b4adf6fdda..a063d84336d6384a03d7ddd8a5143f0909801ed8 100644 (file)
@@ -44,9 +44,8 @@ vis1: ldub            [%g6 + TI_FPSAVED], %g3
 
         stx            %g3, [%g6 + TI_GSR]
 2:     add             %g6, %g1, %g3
-       cmp             %o5, FPRS_DU
-       be,pn           %icc, 6f
-        sll            %g1, 3, %g1
+       mov             FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+       sll             %g1, 3, %g1
        stb             %o5, [%g3 + TI_FPSAVED]
        rd              %gsr, %g2
        add             %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1:        ldub            [%g6 + TI_FPSAVED], %g3
        .align          32
 80:    jmpl            %g7 + %g0, %g0
         nop
-
-6:     ldub            [%g3 + TI_FPSAVED], %o5
-       or              %o5, FPRS_DU, %o5
-       add             %g6, TI_FPREGS+0x80, %g2
-       stb             %o5, [%g3 + TI_FPSAVED]
-
-       sll             %g1, 5, %g1
-       add             %g6, TI_FPREGS+0xc0, %g3
-       wr              %g0, FPRS_FEF, %fprs
-       membar          #Sync
-       stda            %f32, [%g2 + %g1] ASI_BLK_P
-       stda            %f48, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 80f
-        nop
-
-       .align          32
-80:    jmpl            %g7 + %g0, %g0
-        nop
-
-       .align          32
-VISenterhalf:
-       ldub            [%g6 + TI_FPDEPTH], %g1
-       brnz,a,pn       %g1, 1f
-        cmp            %g1, 1
-       stb             %g0, [%g6 + TI_FPSAVED]
-       stx             %fsr, [%g6 + TI_XFSR]
-       clr             %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %g0, FPRS_FEF, %fprs
-
-1:     bne,pn          %icc, 2f
-        srl            %g1, 1, %g1
-       ba,pt           %xcc, vis1
-        sub            %g7, 8, %g7
-2:     addcc           %g6, %g1, %g3
-       sll             %g1, 3, %g1
-       andn            %o5, FPRS_DU, %g2
-       stb             %g2, [%g3 + TI_FPSAVED]
-
-       rd              %gsr, %g2
-       add             %g6, %g1, %g3
-       stx             %g2, [%g3 + TI_GSR]
-       add             %g6, %g1, %g2
-       stx             %fsr, [%g2 + TI_XFSR]
-       sll             %g1, 5, %g1
-3:     andcc           %o5, FPRS_DL, %g0
-       be,pn           %icc, 4f
-        add            %g6, TI_FPREGS, %g2
-
-       add             %g6, TI_FPREGS+0x40, %g3
-       membar          #Sync
-       stda            %f0, [%g2 + %g1] ASI_BLK_P
-       stda            %f16, [%g3 + %g1] ASI_BLK_P
-       membar          #Sync
-       ba,pt           %xcc, 4f
-        nop
-
-       .align          32
-4:     and             %o5, FPRS_DU, %o5
-       jmpl            %g7 + %g0, %g0
-        wr             %o5, FPRS_FEF, %fprs
index 1d649a95660c8cad57fbe90feadb7c43b9e8263f..8069ce12f20b13d514160cec8db0c0d88b64b27e 100644 (file)
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
 void VISenter(void);
 EXPORT_SYMBOL(VISenter);
 
-/* CRYPTO code needs this */
-void VISenterhalf(void);
-EXPORT_SYMBOL(VISenterhalf);
-
 extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
 extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
                unsigned long *);
index e8c2c04143cda81db9b51018a449b611263ab68d..c667e104a0c251d73f02ce2b812ed09878ca79a0 100644 (file)
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
        if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
                return -EFAULT;
 
-       memset(to, 0, sizeof(*to));
-
        err = __get_user(to->si_signo, &from->si_signo);
        err |= __get_user(to->si_errno, &from->si_errno);
        err |= __get_user(to->si_code, &from->si_code);
index 5a1844765a7aba6dab47b878daf6eb723c044c03..a7e257d9cb90b9f34ecb03180fec8c54f2afd82f 100644 (file)
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
         */
        andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
        movl    RIP(%rsp), %ecx         /* User %eip */
+       movq    RAX(%rsp), %rax
        RESTORE_RSI_RDI
        xorl    %edx, %edx              /* Do not leak kernel information */
        xorq    %r8, %r8
@@ -219,7 +220,6 @@ sysexit_from_sys_call:
 1:     setbe   %al                     /* 1 if error, 0 if not */
        movzbl  %al, %edi               /* zero-extend that into %edi */
        call    __audit_syscall_exit
-       movq    RAX(%rsp), %rax         /* reload syscall return value */
        movl    $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
@@ -368,6 +368,7 @@ sysretl_from_sys_call:
        RESTORE_RSI_RDI_RDX
        movl    RIP(%rsp), %ecx
        movl    EFLAGS(%rsp), %r11d
+       movq    RAX(%rsp), %rax
        xorq    %r10, %r10
        xorq    %r9, %r9
        xorq    %r8, %r8
index 6fe6b182c9981dd891a9a5bc9a55b3e6591a6f9f..9dfce4e0417d92adc623d32ff93f67109316b451 100644 (file)
@@ -57,9 +57,9 @@ struct sigcontext {
        unsigned long ip;
        unsigned long flags;
        unsigned short cs;
-       unsigned short __pad2;  /* Was called gs, but was always zero. */
-       unsigned short __pad1;  /* Was called fs, but was always zero. */
-       unsigned short ss;
+       unsigned short gs;
+       unsigned short fs;
+       unsigned short __pad0;
        unsigned long err;
        unsigned long trapno;
        unsigned long oldmask;
index 751bf4b7bf114da12231a56f4217c2583ddeafb2..d7f3b3b78ac313ca8a871d3a53b3d118850a657d 100644 (file)
@@ -79,12 +79,12 @@ do {                                                                        \
 #else /* CONFIG_X86_32 */
 
 /* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT    "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
+#define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
 
 #define __EXTRA_CLOBBER  \
        , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
-         "r12", "r13", "r14", "r15", "flags"
+         "r12", "r13", "r14", "r15"
 
 #ifdef CONFIG_CC_STACKPROTECTOR
 #define __switch_canary                                                          \
@@ -100,11 +100,7 @@ do {                                                                       \
 #define __switch_canary_iparam
 #endif /* CC_STACKPROTECTOR */
 
-/*
- * There is no need to save or restore flags, because flags are always
- * clean in kernel mode, with the possible exception of IOPL.  Kernel IOPL
- * has no effect.
- */
+/* Save restore flags to clear handle leaking NT */
 #define switch_to(prev, next, last) \
        asm volatile(SAVE_CONTEXT                                         \
             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
index 0e8a973de9ee8aec0c555a5e9e8b23348e2cc10b..40836a9a7250c99a16dc5969057177f66aa57186 100644 (file)
@@ -177,24 +177,9 @@ struct sigcontext {
        __u64 rip;
        __u64 eflags;           /* RFLAGS */
        __u16 cs;
-
-       /*
-        * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
-        * Linux saved and restored fs and gs in these slots.  This
-        * was counterproductive, as fsbase and gsbase were never
-        * saved, so arch_prctl was presumably unreliable.
-        *
-        * If these slots are ever needed for any other purpose, there
-        * is some risk that very old 64-bit binaries could get
-        * confused.  I doubt that many such binaries still work,
-        * though, since the same patch in 2.5.64 also removed the
-        * 64-bit set_thread_area syscall, so it appears that there is
-        * no TLS API that works in both pre- and post-2.5.64 kernels.
-        */
-       __u16 __pad2;           /* Was gs. */
-       __u16 __pad1;           /* Was fs. */
-
-       __u16 ss;
+       __u16 gs;
+       __u16 fs;
+       __u16 __pad0;
        __u64 err;
        __u64 trapno;
        __u64 oldmask;
index f813261d97405710c99cd0982d047a227a4c4fd3..2683f36e4e0a5e67311a7bcdbceea61c17382ec6 100644 (file)
@@ -322,7 +322,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
                irq_data->chip = &lapic_controller;
                irq_data->chip_data = data;
                irq_data->hwirq = virq + i;
-               err = assign_irq_vector_policy(virq, irq_data->node, data,
+               err = assign_irq_vector_policy(virq + i, irq_data->node, data,
                                               info);
                if (err)
                        goto error;
index b9826a981fb20fa45a7c1255e277e9ad1cd5d150..6326ae24e4d5b4f3d228111c10f5c85df0e40d3f 100644 (file)
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
        if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
                cpuc->shared_regs = allocate_shared_regs(cpu);
                if (!cpuc->shared_regs)
-                       return NOTIFY_BAD;
+                       goto err;
        }
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
 
                cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
                if (!cpuc->constraint_list)
-                       return NOTIFY_BAD;
+                       goto err_shared_regs;
 
                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
-               if (!cpuc->excl_cntrs) {
-                       kfree(cpuc->constraint_list);
-                       kfree(cpuc->shared_regs);
-                       return NOTIFY_BAD;
-               }
+               if (!cpuc->excl_cntrs)
+                       goto err_constraint_list;
+
                cpuc->excl_thread_id = 0;
        }
 
        return NOTIFY_OK;
+
+err_constraint_list:
+       kfree(cpuc->constraint_list);
+       cpuc->constraint_list = NULL;
+
+err_shared_regs:
+       kfree(cpuc->shared_regs);
+       cpuc->shared_regs = NULL;
+
+err:
+       return NOTIFY_BAD;
 }
 
 static void intel_pmu_cpu_starting(int cpu)
index 63eb68b73589bcbbc21f9c526193adca0de2e52d..377e8f8ed39186ad4ef57b33264592ed8459a037 100644 (file)
@@ -1255,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
        cpumask_set_cpu(cpu, &cqm_cpumask);
 }
 
-static void intel_cqm_cpu_prepare(unsigned int cpu)
+static void intel_cqm_cpu_starting(unsigned int cpu)
 {
        struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1296,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
        unsigned int cpu  = (unsigned long)hcpu;
 
        switch (action & ~CPU_TASKS_FROZEN) {
-       case CPU_UP_PREPARE:
-               intel_cqm_cpu_prepare(cpu);
-               break;
        case CPU_DOWN_PREPARE:
                intel_cqm_cpu_exit(cpu);
                break;
        case CPU_STARTING:
+               intel_cqm_cpu_starting(cpu);
                cqm_pick_event_reader(cpu);
                break;
        }
@@ -1373,7 +1371,7 @@ static int __init intel_cqm_init(void)
                goto out;
 
        for_each_online_cpu(i) {
-               intel_cqm_cpu_prepare(i);
+               intel_cqm_cpu_starting(i);
                cqm_pick_event_reader(i);
        }
 
index 79de954626fd971f1d24553078bed1f199d52267..d25097c3fc1d1af8af35c156f05121f9f4d46a94 100644 (file)
@@ -270,7 +270,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
        dst_fpu->fpregs_active = 0;
        dst_fpu->last_cpu = -1;
 
-       if (src_fpu->fpstate_active)
+       if (src_fpu->fpstate_active && cpu_has_fpu)
                fpu_copy(dst_fpu, src_fpu);
 
        return 0;
index 1e173f6285c73b76b2e6ab41daed7681406c5d15..d14e9ac3235a1ac73174ffb95b2990d5163d2933 100644 (file)
@@ -40,7 +40,12 @@ static void fpu__init_cpu_generic(void)
        write_cr0(cr0);
 
        /* Flush out any pending x87 state: */
-       asm volatile ("fninit");
+#ifdef CONFIG_MATH_EMULATION
+       if (!cpu_has_fpu)
+               fpstate_init_soft(&current->thread.fpu.state.soft);
+       else
+#endif
+               asm volatile ("fninit");
 }
 
 /*
index 397688beed4be5ce7d9445d7847d44613d2d84b5..c27cad7267655c3794972344adf0b7924e38c138 100644 (file)
@@ -408,6 +408,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
 static void mwait_idle(void)
 {
        if (!current_set_polling_and_test()) {
+               trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
                        smp_mb(); /* quirk */
                        clflush((void *)&current_thread_info()->flags);
@@ -419,6 +420,7 @@ static void mwait_idle(void)
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
+               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
        }
index 206996c1669db344aba7ff072f734552723e7938..71820c42b6ce6bc1020bbc44277967d7e23f011e 100644 (file)
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                COPY(r15);
 #endif /* CONFIG_X86_64 */
 
+#ifdef CONFIG_X86_32
                COPY_SEG_CPL3(cs);
                COPY_SEG_CPL3(ss);
+#else /* !CONFIG_X86_32 */
+               /* Kernel saves and restores only the CS segment register on signals,
+                * which is the bare minimum needed to allow mixed 32/64-bit code.
+                * App's signal handler can save/restore other segments if needed. */
+               COPY_SEG_CPL3(cs);
+#endif /* CONFIG_X86_32 */
 
                get_user_ex(tmpflags, &sc->flags);
                regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 #else /* !CONFIG_X86_32 */
                put_user_ex(regs->flags, &sc->flags);
                put_user_ex(regs->cs, &sc->cs);
-               put_user_ex(0, &sc->__pad2);
-               put_user_ex(0, &sc->__pad1);
-               put_user_ex(regs->ss, &sc->ss);
+               put_user_ex(0, &sc->gs);
+               put_user_ex(0, &sc->fs);
 #endif /* CONFIG_X86_32 */
 
                put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 
        regs->sp = (unsigned long)frame;
 
-       /*
-        * Set up the CS and SS registers to run signal handlers in
-        * 64-bit mode, even if the handler happens to be interrupting
-        * 32-bit or 16-bit code.
-        *
-        * SS is subtle.  In 64-bit mode, we don't need any particular
-        * SS descriptor, but we do need SS to be valid.  It's possible
-        * that the old SS is entirely bogus -- this can happen if the
-        * signal we're trying to deliver is #GP or #SS caused by a bad
-        * SS value.
-        */
+       /* Set up the CS register to run signal handlers in 64-bit mode,
+          even if the handler happens to be interrupting 32-bit code. */
        regs->cs = __USER_CS;
-       regs->ss = __USER_DS;
 
        return 0;
 }
index 6273324186ac5ca7adba69be5ded69f23d8882f7..0ccb53a9fcd9361b83c7acd26e1f64601816a3d1 100644 (file)
@@ -28,11 +28,11 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
                struct desc_struct *desc;
                unsigned long base;
 
-               seg &= ~7UL;
+               seg >>= 3;
 
                mutex_lock(&child->mm->context.lock);
                if (unlikely(!child->mm->context.ldt ||
-                            (seg >> 3) >= child->mm->context.ldt->size))
+                            seg >= child->mm->context.ldt->size))
                        addr = -1L; /* bogus selector, access would fault */
                else {
                        desc = &child->mm->context.ldt->entries[seg];
index dc0a84a6f3094ac997701de74c868763e6843229..9e8bf13572e6dc3f95d33d24f079d63e256a3a7a 100644 (file)
@@ -672,16 +672,16 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
        if (iter.mtrr_disabled)
                return mtrr_disabled_type();
 
+       /* not contained in any MTRRs. */
+       if (type == -1)
+               return mtrr_default_type(mtrr_state);
+
        /*
         * We just check one page, partially covered by MTRRs is
         * impossible.
         */
        WARN_ON(iter.partial_map);
 
-       /* not contained in any MTRRs. */
-       if (type == -1)
-               return mtrr_default_type(mtrr_state);
-
        return type;
 }
 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
index 5ef2560075bfb80e6fdabcdf51f71258091e4339..8f0f6eca69da1dc6db95c16782871580bf57091d 100644 (file)
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (guest_cpuid_has_tsc_adjust(vcpu)) {
                        if (!msr_info->host_initiated) {
                                s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
-                               kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
+                               adjust_tsc_offset_guest(vcpu, adj);
                        }
                        vcpu->arch.ia32_tsc_adjust_msr = data;
                }
@@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
 static void process_smi(struct kvm_vcpu *vcpu)
 {
        struct kvm_segment cs, ds;
+       struct desc_ptr dt;
        char buf[512];
        u32 cr0;
 
@@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
 
        kvm_x86_ops->set_cr4(vcpu, 0);
 
+       /* Undocumented: IDT limit is set to zero on entry to SMM.  */
+       dt.address = dt.size = 0;
+       kvm_x86_ops->set_idt(vcpu, &dt);
+
        __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
 
        cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
index f37e84ab49f38e335bde57880a6cbe8640fb2c4b..3d8f2e421466a8af255eba9602748fee8753a377 100644 (file)
@@ -29,7 +29,6 @@
 
 #include <asm/uaccess.h>
 #include <asm/traps.h>
-#include <asm/desc.h>
 #include <asm/user.h>
 #include <asm/fpu/internal.h>
 
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
                        math_abort(FPU_info, SIGILL);
                }
 
-               code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+               code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
                if (SEG_D_SIZE(code_descriptor)) {
                        /* The above test may be wrong, the book is not clear */
                        /* Segmented 32 bit protected mode */
index 9ccecb61a4fa129a82028b27edc18b91a2f99042..5e044d506b7aae8b17b2142966b11477cfe8e372 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mm.h>
 
-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s)      (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+       static struct desc_struct zero_desc;
+       struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+       seg >>= 3;
+       mutex_lock(&current->mm->context.lock);
+       if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+               ret = current->mm->context.ldt->entries[seg];
+       mutex_unlock(&current->mm->context.lock);
+#endif
+       return ret;
+}
+
 #define SEG_D_SIZE(x)          ((x).b & (3 << 21))
 #define SEG_G_BIT(x)           ((x).b & (1 << 23))
 #define SEG_GRANULARITY(x)     (((x).b & (1 << 23)) ? 4096 : 1)
index 6ef5e99380f92134ba86a6a693b5ac6d3434e6d4..8300db71c2a62681006e137350961742190ec9dc 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/stddef.h>
 
 #include <asm/uaccess.h>
-#include <asm/desc.h>
 
 #include "fpu_system.h"
 #include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
                addr->selector = PM_REG_(segment);
        }
 
-       descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+       descriptor = FPU_get_ldt_descriptor(addr->selector);
        base_address = SEG_BASE_ADDR(descriptor);
        address = base_address + offset;
        limit = base_address
index e88fda867a33b198bc356aded57d59f48fcfb4ee..484145368a241207d8aa80a5f758a7d0f3ef54cb 100644 (file)
@@ -8,7 +8,7 @@ config XEN
        select PARAVIRT_CLOCK
        select XEN_HAVE_PVMMU
        depends on X86_64 || (X86_32 && X86_PAE)
-       depends on X86_TSC
+       depends on X86_LOCAL_APIC && X86_TSC
        help
          This is the Linux Xen port.  Enabling this will allow the
          kernel to boot in a paravirtualized environment under the
@@ -17,7 +17,7 @@ config XEN
 config XEN_DOM0
        def_bool y
        depends on XEN && PCI_XEN && SWIOTLB_XEN
-       depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
+       depends on X86_IO_APIC && ACPI && PCI
 
 config XEN_PVHVM
        def_bool y
index 7322755f337af760db6086450591c584c4dcda77..4b6e29ac0968c1a76451d3ff773652bc4afed138 100644 (file)
@@ -13,13 +13,13 @@ CFLAGS_mmu.o                        := $(nostackp)
 obj-y          := enlighten.o setup.o multicalls.o mmu.o irq.o \
                        time.o xen-asm.o xen-asm_$(BITS).o \
                        grant-table.o suspend.o platform-pci-unplug.o \
-                       p2m.o
+                       p2m.o apic.o
 
 obj-$(CONFIG_EVENT_TRACING) += trace.o
 
 obj-$(CONFIG_SMP)              += smp.o
 obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
-obj-$(CONFIG_XEN_DOM0)         += apic.o vga.o
+obj-$(CONFIG_XEN_DOM0)         += vga.o
 obj-$(CONFIG_SWIOTLB_XEN)      += pci-swiotlb-xen.o
 obj-$(CONFIG_XEN_EFI)          += efi.o
index c20fe29e65f48b4706789e0ad59bb33b1a3acc18..2292721b1d103844ade9f8a7f436a649c811f77d 100644 (file)
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
 
 #ifdef CONFIG_XEN_DOM0
 void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
-void __init xen_init_apic(void);
 #else
 static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
                                       size_t size)
 {
 }
-static inline void __init xen_init_apic(void)
-{
-}
 #endif
 
+void __init xen_init_apic(void);
+
 #ifdef CONFIG_XEN_EFI
 extern void xen_efi_init(void);
 #else
index 12600bfffca93f4547e2325eeda9669ff443a7a7..e0057d035200c4dd5e42d191f0395a7769489905 100644 (file)
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
  * Description:
  *    Enables a low level driver to set a hard upper limit,
  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
- *    the device driver based upon the combined capabilities of I/O
- *    controller and storage device.
+ *    the device driver based upon the capabilities of the I/O
+ *    controller.
  *
  *    max_sectors is a soft limit imposed by the block layer for
  *    filesystem type requests.  This value can be overridden on a
index a3da6770bc9ed2bf66d59e8e74461829eeb4fe4e..b8efe36ce1142d0c6b0b8e45ec23965ec7135c40 100644 (file)
@@ -393,8 +393,6 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        unsigned int cryptlen = req->cryptlen;
        struct page *dstp;
@@ -412,27 +410,19 @@ static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = dst;
 
        areq_ctx->complete = authenc_esn_geniv_ahash_done;
@@ -563,8 +553,6 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
        struct scatterlist *cipher = areq_ctx->cipher;
        struct scatterlist *hsg = areq_ctx->hsg;
        struct scatterlist *tsg = areq_ctx->tsg;
-       struct scatterlist *assoc1;
-       struct scatterlist *assoc2;
        unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
        struct page *srcp;
        u8 *vsrc;
@@ -580,27 +568,19 @@ static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc))
-               return -EINVAL;
-
-       assoc1 = assoc + 1;
-       if (sg_is_last(assoc1))
-               return -EINVAL;
-
-       assoc2 = assoc + 2;
-       if (!sg_is_last(assoc2))
+       if (assoc->length < 12)
                return -EINVAL;
 
        sg_init_table(hsg, 2);
-       sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
-       sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
+       sg_set_page(hsg, sg_page(assoc), 4, assoc->offset);
+       sg_set_page(hsg + 1, sg_page(assoc), 4, assoc->offset + 8);
 
        sg_init_table(tsg, 1);
-       sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
+       sg_set_page(tsg, sg_page(assoc), 4, assoc->offset + 4);
 
        areq_ctx->cryptlen = cryptlen;
-       areq_ctx->headlen = assoc->length + assoc2->length;
-       areq_ctx->trailen = assoc1->length;
+       areq_ctx->headlen = 8;
+       areq_ctx->trailen = 4;
        areq_ctx->sg = src;
 
        areq_ctx->complete = authenc_esn_verify_ahash_done;
index 815f75ef24119eab28ce3c0c2047295c6e464c58..2922f1f252d58aafd2d6c233404ae7ca21abb524 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 #include <acpi/video.h>
 
 ACPI_MODULE_NAME("video");
@@ -41,6 +42,7 @@ void acpi_video_unregister_backlight(void);
 
 static bool backlight_notifier_registered;
 static struct notifier_block backlight_nb;
+static struct work_struct backlight_notify_work;
 
 static enum acpi_backlight_type acpi_backlight_cmdline = acpi_backlight_undef;
 static enum acpi_backlight_type acpi_backlight_dmi = acpi_backlight_undef;
@@ -262,6 +264,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
        { },
 };
 
+/* This uses a workqueue to avoid various locking ordering issues */
+static void acpi_video_backlight_notify_work(struct work_struct *work)
+{
+       if (acpi_video_get_backlight_type() != acpi_backlight_video)
+               acpi_video_unregister_backlight();
+}
+
 static int acpi_video_backlight_notify(struct notifier_block *nb,
                                       unsigned long val, void *bd)
 {
@@ -269,9 +278,8 @@ static int acpi_video_backlight_notify(struct notifier_block *nb,
 
        /* A raw bl registering may change video -> native */
        if (backlight->props.type == BACKLIGHT_RAW &&
-           val == BACKLIGHT_REGISTERED &&
-           acpi_video_get_backlight_type() != acpi_backlight_video)
-               acpi_video_unregister_backlight();
+           val == BACKLIGHT_REGISTERED)
+               schedule_work(&backlight_notify_work);
 
        return NOTIFY_OK;
 }
@@ -304,6 +312,8 @@ enum acpi_backlight_type acpi_video_get_backlight_type(void)
                acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                                    ACPI_UINT32_MAX, find_video, NULL,
                                    &video_caps, NULL);
+               INIT_WORK(&backlight_notify_work,
+                         acpi_video_backlight_notify_work);
                backlight_nb.notifier_call = acpi_video_backlight_notify;
                backlight_nb.priority = 0;
                if (backlight_register_notifier(&backlight_nb) == 0)
index ce1e3a8859815ca5724e376de6d6ab0d549c9831..14b7305d2ba0b3cc24aa101e76c87e242f01f537 100644 (file)
@@ -92,7 +92,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
         * Other architectures (e.g., ARM) either do not support big endian, or
         * else leave I/O in little endian mode.
         */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(addr);
        else
                return readl_relaxed(addr);
@@ -101,7 +101,7 @@ static inline u32 brcm_sata_readreg(void __iomem *addr)
 static inline void brcm_sata_writereg(u32 val, void __iomem *addr)
 {
        /* See brcm_sata_readreg() comments */
-       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(__BIG_ENDIAN))
+       if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(val, addr);
        else
                writel_relaxed(val, addr);
@@ -209,6 +209,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
                           priv->top_ctrl + SATA_TOP_CTRL_BUS_CTRL);
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int brcm_ahci_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
@@ -231,6 +232,7 @@ static int brcm_ahci_resume(struct device *dev)
        brcm_sata_phys_enable(priv);
        return ahci_platform_resume(dev);
 }
+#endif
 
 static struct scsi_host_template ahci_platform_sht = {
        AHCI_SHT(DRV_NAME),
index db5d9f79a247c5ceb2cb590f206927c22f6f2b7c..19bcb80b20313932021b1ee613eed97f4473e17e 100644 (file)
@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
  *     RETURNS:
  *     Block address read from @tf.
  */
-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
+u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
 {
        u64 block = 0;
 
-       if (!dev || tf->flags & ATA_TFLAG_LBA) {
+       if (tf->flags & ATA_TFLAG_LBA) {
                if (tf->flags & ATA_TFLAG_LBA48) {
                        block |= (u64)tf->hob_lbah << 40;
                        block |= (u64)tf->hob_lbam << 32;
@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
        return 0;
 }
 
-static void ata_dev_config_sense_reporting(struct ata_device *dev)
-{
-       unsigned int err_mask;
-
-       if (!ata_id_has_sense_reporting(dev->id))
-               return;
-
-       if (ata_id_sense_reporting_enabled(dev->id))
-               return;
-
-       err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
-       if (err_mask) {
-               ata_dev_dbg(dev,
-                           "failed to enable Sense Data Reporting, Emask 0x%x\n",
-                           err_mask);
-       }
-}
-
 /**
  *     ata_dev_configure - Configure the specified ATA/ATAPI device
  *     @dev: Target device to configure
@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
                                        dev->devslp_timing[i] = sata_setting[j];
                                }
                }
-               ata_dev_config_sense_reporting(dev);
+
                dev->cdb_len = 16;
        }
 
index 7465031a893c60c9e61f2c911abf218b39c81d2e..cb0508af1459ac43f4aa26f1a16d94134bd9d0bc 100644 (file)
@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
        tf->hob_lbah = buf[10];
        tf->nsect = buf[12];
        tf->hob_nsect = buf[13];
-       if (ata_id_has_ncq_autosense(dev->id))
-               tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
 
        return 0;
 }
@@ -1629,70 +1627,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
        return err_mask;
 }
 
-/**
- *     ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
- *     @dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
- *     @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
- *     @dfl_sense_key: default sense key to use
- *
- *     Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
- *     SENSE.  This function is EH helper.
- *
- *     LOCKING:
- *     Kernel thread context (may sleep).
- *
- *     RETURNS:
- *     encoded sense data on success, 0 on failure or if sense data
- *     is not available.
- */
-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
-                               struct scsi_cmnd *cmd)
-{
-       struct ata_device *dev = qc->dev;
-       struct ata_taskfile tf;
-       unsigned int err_mask;
-
-       if (!cmd)
-               return 0;
-
-       DPRINTK("ATA request sense\n");
-       ata_dev_warn(dev, "request sense\n");
-       if (!ata_id_sense_reporting_enabled(dev->id)) {
-               ata_dev_warn(qc->dev, "sense data reporting disabled\n");
-               return 0;
-       }
-       ata_tf_init(dev, &tf);
-
-       tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
-       tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
-       tf.command = ATA_CMD_REQ_SENSE_DATA;
-       tf.protocol = ATA_PROT_NODATA;
-
-       err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
-       /*
-        * ACS-4 states:
-        * The device may set the SENSE DATA AVAILABLE bit to one in the
-        * STATUS field and clear the ERROR bit to zero in the STATUS field
-        * to indicate that the command returned completion without an error
-        * and the sense data described in table 306 is available.
-        *
-        * IOW the 'ATA_SENSE' bit might not be set even though valid
-        * sense data is available.
-        * So check for both.
-        */
-       if ((tf.command & ATA_SENSE) ||
-               tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
-               ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-               ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
-                            tf.lbah, tf.lbam, tf.lbal);
-       } else {
-               ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
-                            tf.command, err_mask);
-       }
-       return err_mask;
-}
-
 /**
  *     atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
  *     @dev: device to perform REQUEST_SENSE to
@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
        memcpy(&qc->result_tf, &tf, sizeof(tf));
        qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
        qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
-       if (qc->result_tf.auxiliary) {
-               char sense_key, asc, ascq;
-
-               sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
-               asc = (qc->result_tf.auxiliary >> 8) & 0xff;
-               ascq = qc->result_tf.auxiliary & 0xff;
-               ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
-                           sense_key, asc, ascq);
-               ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
-               ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
-               qc->flags |= ATA_QCFLAG_SENSE_VALID;
-       }
-
        ehc->i.err_mask &= ~AC_ERR_DEV;
 }
 
@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
                return ATA_EH_RESET;
        }
 
-       /*
-        * Sense data reporting does not work if the
-        * device fault bit is set.
-        */
-       if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
-           !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
-               if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
-                       tmp = ata_eh_request_sense(qc, qc->scsicmd);
-                       if (tmp)
-                               qc->err_mask |= tmp;
-                       else
-                               ata_scsi_set_sense_information(qc->scsicmd, tf);
-               } else {
-                       ata_dev_warn(qc->dev, "sense data available but port frozen\n");
-               }
-       }
-
-       /* Set by NCQ autosense or request sense above */
-       if (qc->flags & ATA_QCFLAG_SENSE_VALID)
-               return 0;
-
        if (stat & (ATA_ERR | ATA_DF))
                qc->err_mask |= AC_ERR_DEV;
        else
@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
 
 #ifdef CONFIG_ATA_VERBOSE_ERROR
                if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
-                                   ATA_SENSE | ATA_ERR)) {
+                                   ATA_ERR)) {
                        if (res->command & ATA_BUSY)
                                ata_dev_err(qc->dev, "status: { Busy }\n");
                        else
-                               ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
+                               ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
                                  res->command & ATA_DRDY ? "DRDY " : "",
                                  res->command & ATA_DF ? "DF " : "",
                                  res->command & ATA_DRQ ? "DRQ " : "",
-                                 res->command & ATA_SENSE ? "SENSE " : "",
                                  res->command & ATA_ERR ? "ERR " : "");
                }
 
index 641a61a59e89c00036af65d3a31fe2cf67eb22b8..0d7f0da3a26929622080f94a2a3125c63676999e 100644 (file)
@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
            ata_scsi_park_show, ata_scsi_park_store);
 EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
 
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
 {
-       if (!cmd)
-               return;
-
        cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 
        scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
 }
 
-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                   const struct ata_taskfile *tf)
-{
-       u64 information;
-
-       if (!cmd)
-               return;
-
-       information = ata_tf_read_block(tf, NULL);
-       scsi_set_sense_information(cmd->sense_buffer, information);
-}
-
 static ssize_t
 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
            ((cdb[2] & 0x20) || need_sense)) {
                ata_gen_passthru_sense(qc);
        } else {
-               if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
-                       cmd->result = SAM_STAT_CHECK_CONDITION;
-               } else if (!need_sense) {
+               if (!need_sense) {
                        cmd->result = SAM_STAT_GOOD;
                } else {
                        /* TODO: decide which descriptor format to use
index a998a175f9f144b50e4df782bbf7d1afd5f506cb..f840ca18a7c014f5151d22e4bc55dff9fca459de 100644 (file)
@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
 extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
                           u64 block, u32 n_block, unsigned int tf_flags,
                           unsigned int tag);
-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
-                            struct ata_device *dev);
+extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
 extern unsigned ata_exec_internal(struct ata_device *dev,
                                  struct ata_taskfile *tf, const u8 *cdb,
                                  int dma_dir, void *buf, unsigned int buflen,
@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
                              struct scsi_host_template *sht);
 extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
-                                          const struct ata_taskfile *tf);
 extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(struct work_struct *work);
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
index 3a18a8a719b4ff1fa562a515b4701da241e4aeb7..fab504fd9cfd7ace54d772927a01650373d02206 100644 (file)
@@ -1238,8 +1238,12 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
        readl(mmio + PDC_SDRAM_CONTROL);
 
        /* Turn on for ECC */
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                data |= (0x01 << 16);
                writel(data, mmio + PDC_SDRAM_CONTROL);
@@ -1380,8 +1384,12 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
 
        /* ECC initiliazation. */
 
-       pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
-                         PDC_DIMM_SPD_TYPE, &spd0);
+       if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
+                              PDC_DIMM_SPD_TYPE, &spd0)) {
+               pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+                      PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+               return 1;
+       }
        if (spd0 == 0x02) {
                void *buf;
                VPRINTK("Start ECC initialization\n");
index 81751a49d8bf2334612350bba52406b9af352258..56486d92c4e72bd583630baea0fba541f36c926f 100644 (file)
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
        if (!blk)
                return -ENOMEM;
 
-       present = krealloc(rbnode->cache_present,
-                   BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
-       if (!present) {
-               kfree(blk);
-               return -ENOMEM;
+       if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
+               present = krealloc(rbnode->cache_present,
+                                  BITS_TO_LONGS(blklen) * sizeof(*present),
+                                  GFP_KERNEL);
+               if (!present) {
+                       kfree(blk);
+                       return -ENOMEM;
+               }
+
+               memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
+                      (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
+                      * sizeof(*present));
+       } else {
+               present = rbnode->cache_present;
        }
 
        /* insert the register value in the correct place in the rbnode block */
index d94529d5c8e951378eaf62d74b708edf271a550f..bc67a93aa4f4749f10d1a219789b21661c01ee21 100644 (file)
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
 #  define rbd_assert(expr)     ((void) 0)
 #endif /* !RBD_DEBUG */
 
+static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
        obj_request_done_set(obj_request);
 }
 
+static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p\n", __func__, obj_request);
+
+       if (obj_request_img_data_test(obj_request))
+               rbd_osd_copyup_callback(obj_request);
+       else
+               obj_request_done_set(obj_request);
+}
+
 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                                struct ceph_msg *msg)
 {
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
                rbd_osd_discard_callback(obj_request);
                break;
        case CEPH_OSD_OP_CALL:
+               rbd_osd_call_callback(obj_request);
+               break;
        case CEPH_OSD_OP_NOTIFY_ACK:
        case CEPH_OSD_OP_WATCH:
                rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
 }
 
 static void
-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
 {
        struct rbd_img_request *img_request;
        struct rbd_device *rbd_dev;
        struct page **pages;
        u32 page_count;
 
+       dout("%s: obj %p\n", __func__, obj_request);
+
        rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
                obj_request->type == OBJ_REQUEST_NODATA);
        rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
        if (!obj_request->result)
                obj_request->xferred = obj_request->length;
 
-       /* Finish up with the normal image object callback */
-
-       rbd_img_obj_callback(obj_request);
+       obj_request_done_set(obj_request);
 }
 
 static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
 
        /* All set, send it off. */
 
-       orig_request->callback = rbd_img_obj_copyup_callback;
        osdc = &rbd_dev->rbd_client->client->osdc;
        img_result = rbd_obj_request_submit(osdc, orig_request);
        if (!img_result)
index ced96777b677b9bcddd65bae004a7a51b5cf0dc3..954c0029fb3babc49d1a1f490f9d420934701e30 100644 (file)
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
                return;
        }
 
-       if (work_pending(&blkif->persistent_purge_work)) {
-               pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
+       if (work_busy(&blkif->persistent_purge_work)) {
+               pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
                return;
        }
 
index 6d89ed35d80c0caaf8bf57ba82c7e9f3a9194bb9..7a8a73f1fc0462feab5bad706573ff6eb4536ef7 100644 (file)
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
        ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
 
 static int blkfront_setup_indirect(struct blkfront_info *info);
+static int blkfront_gather_backend_features(struct blkfront_info *info);
 
 static int get_id_from_freelist(struct blkfront_info *info)
 {
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
                                 * Add the used indirect page back to the list of
                                 * available pages for indirect grefs.
                                 */
-                               indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
-                               list_add(&indirect_page->lru, &info->indirect_pages);
+                               if (!info->feature_persistent) {
+                                       indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+                                       list_add(&indirect_page->lru, &info->indirect_pages);
+                               }
                                s->indirect_grants[i]->gref = GRANT_INVALID_REF;
                                list_add_tail(&s->indirect_grants[i]->node, &info->grants);
                        }
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
        info->shadow_free = info->ring.req_prod_pvt;
        info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
 
-       rc = blkfront_setup_indirect(info);
+       rc = blkfront_gather_backend_features(info);
        if (rc) {
                kfree(copy);
                return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
 
 static int blkfront_setup_indirect(struct blkfront_info *info)
 {
-       unsigned int indirect_segments, segs;
+       unsigned int segs;
        int err, i;
 
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-max-indirect-segments", "%u", &indirect_segments,
-                           NULL);
-       if (err) {
-               info->max_indirect_segments = 0;
+       if (info->max_indirect_segments == 0)
                segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
-       } else {
-               info->max_indirect_segments = min(indirect_segments,
-                                                 xen_blkif_max_segments);
+       else
                segs = info->max_indirect_segments;
-       }
 
        err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
        if (err)
@@ -1796,6 +1792,68 @@ out_of_memory:
        return -ENOMEM;
 }
 
+/*
+ * Gather all backend feature-*
+ */
+static int blkfront_gather_backend_features(struct blkfront_info *info)
+{
+       int err;
+       int barrier, flush, discard, persistent;
+       unsigned int indirect_segments;
+
+       info->feature_flush = 0;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-barrier", "%d", &barrier,
+                       NULL);
+
+       /*
+        * If there's no "feature-barrier" defined, then it means
+        * we're dealing with a very old backend which writes
+        * synchronously; nothing to do.
+        *
+        * If there are barriers, then we use flush.
+        */
+       if (!err && barrier)
+               info->feature_flush = REQ_FLUSH | REQ_FUA;
+       /*
+        * And if there is "feature-flush-cache" use that above
+        * barriers.
+        */
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-flush-cache", "%d", &flush,
+                       NULL);
+
+       if (!err && flush)
+               info->feature_flush = REQ_FLUSH;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-discard", "%d", &discard,
+                       NULL);
+
+       if (!err && discard)
+               blkfront_setup_discard(info);
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                       "feature-persistent", "%u", &persistent,
+                       NULL);
+       if (err)
+               info->feature_persistent = 0;
+       else
+               info->feature_persistent = persistent;
+
+       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+                           "feature-max-indirect-segments", "%u", &indirect_segments,
+                           NULL);
+       if (err)
+               info->max_indirect_segments = 0;
+       else
+               info->max_indirect_segments = min(indirect_segments,
+                                                 xen_blkif_max_segments);
+
+       return blkfront_setup_indirect(info);
+}
+
 /*
  * Invoked when the backend is finally 'ready' (and has told produced
  * the details about the physical device - #sectors, size, etc).
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
        unsigned int physical_sector_size;
        unsigned int binfo;
        int err;
-       int barrier, flush, discard, persistent;
 
        switch (info->connected) {
        case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
        if (err != 1)
                physical_sector_size = sector_size;
 
-       info->feature_flush = 0;
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-barrier", "%d", &barrier,
-                           NULL);
-
-       /*
-        * If there's no "feature-barrier" defined, then it means
-        * we're dealing with a very old backend which writes
-        * synchronously; nothing to do.
-        *
-        * If there are barriers, then we use flush.
-        */
-       if (!err && barrier)
-               info->feature_flush = REQ_FLUSH | REQ_FUA;
-       /*
-        * And if there is "feature-flush-cache" use that above
-        * barriers.
-        */
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-flush-cache", "%d", &flush,
-                           NULL);
-
-       if (!err && flush)
-               info->feature_flush = REQ_FLUSH;
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-discard", "%d", &discard,
-                           NULL);
-
-       if (!err && discard)
-               blkfront_setup_discard(info);
-
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-                           "feature-persistent", "%u", &persistent,
-                           NULL);
-       if (err)
-               info->feature_persistent = 0;
-       else
-               info->feature_persistent = persistent;
-
-       err = blkfront_setup_indirect(info);
+       err = blkfront_gather_backend_features(info);
        if (err) {
                xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
                                 info->xbdev->otherend);
index fb655e8d1e3b17bf4cda9fd09593bc7dc770f78d..763301c7828c72650f2abaa1c723425bdd3c73f4 100644 (file)
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
        kfree(meta);
 }
 
-static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
+static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
 {
        size_t num_pages;
-       char pool_name[8];
        struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
 
        if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
                goto out_error;
        }
 
-       snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
        meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
        if (!meta->mem_pool) {
                pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
                return -EINVAL;
 
        disksize = PAGE_ALIGN(disksize);
-       meta = zram_meta_alloc(zram->disk->first_minor, disksize);
+       meta = zram_meta_alloc(zram->disk->disk_name, disksize);
        if (!meta)
                return -ENOMEM;
 
index da8faf78536a3ae01827a2ee9480c486a04297a5..5643b65cee204d950d842529e0a12123f57e92c0 100644 (file)
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
 static void start_khwrngd(void)
 {
        hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
-       if (hwrng_fill == ERR_PTR(-ENOMEM)) {
+       if (IS_ERR(hwrng_fill)) {
                pr_err("hwrng_fill thread creation failed");
                hwrng_fill = NULL;
        }
index 4b93a1efb36d11fa7171735d29bac283e4bb6d97..ac03ba49e9d1952dff14e9383ed86874690a7176 100644 (file)
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
 PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
 PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
 
-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
 #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
                    div_hp, bit, is_lp, flags)                          \
        PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
index b8ff3c64cc452a16fc4108426fb6e5b1c54e91e8..c96de14036a0adebfc7628dc9f9cd5413b5c5495 100644 (file)
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
        pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 }
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
 {
        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 
+       if (!ch->cs_enabled)
+               return;
+
        pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
        sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 }
index 2d59038dec43512e40ca46f4a0989c3b1253e0af..86c7eb66bdfb2e18628997319aa047b1aedc1da1 100644 (file)
@@ -462,6 +462,7 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
        BUG_ON(!imxtm->base);
 
        imxtm->type = type;
+       imxtm->irq = irq;
 
        _mxc_timer_init(imxtm);
 }
index ae5b2bd3a9785c63646e3e922fbe17330678b481..fa3dd840a83771735e474a658a5c6516c62f76a0 100644 (file)
@@ -180,7 +180,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                ret = exynos5250_cpufreq_init(exynos_info);
        } else {
                pr_err("%s: Unknown SoC type\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
        }
 
        if (ret)
@@ -188,12 +188,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
 
        if (exynos_info->set_freq == NULL) {
                dev_err(&pdev->dev, "No set_freq function (ERR)\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
        arm_regulator = regulator_get(NULL, "vdd_arm");
        if (IS_ERR(arm_regulator)) {
                dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
+               ret = -EINVAL;
                goto err_vdd_arm;
        }
 
@@ -225,7 +227,7 @@ err_cpufreq_reg:
        regulator_put(arm_regulator);
 err_vdd_arm:
        kfree(exynos_info);
-       return -EINVAL;
+       return ret;
 }
 
 static struct platform_driver exynos_cpufreq_platdrv = {
index e362860c2b50c49ad5289169e68b8baa2a90197c..cd593c1f66dc8af8a6208933003783e0f37b7392 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/clock.h>
 #include <asm/idle.h>
 
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
 
 static uint nowait;
 
index dae1e8099969a192b302703ec291da96ebac3429..f9c78751989ec865491570ed13bf19dbc6b1a799 100644 (file)
@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
                          state->buflen_1;
        u32 *sh_desc = ctx->sh_desc_fin, *desc;
        dma_addr_t ptr = ctx->sh_desc_fin_dma;
-       int sec4_sg_bytes;
+       int sec4_sg_bytes, sec4_sg_src_index;
        int digestsize = crypto_ahash_digestsize(ahash);
        struct ahash_edesc *edesc;
        int ret = 0;
        int sh_len;
 
-       sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+       sec4_sg_src_index = 1 + (buflen ? 1 : 0);
+       sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
 
        /* allocate space for base edesc and hw desc commands, link tables */
        edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
                                                last_buflen);
-       (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+       (edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
index 7ba495f7537042f898ef1cd7cdba7a6263f2059b..402631a19a112770af83f0f4228176703e1c0b44 100644 (file)
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
                crypt->mode |= NPE_OP_NOT_IN_PLACE;
                /* This was never tested by Intel
                 * for more than one dst buffer, I think. */
-               BUG_ON(req->dst->length < nbytes);
                req_ctx->dst = NULL;
                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
                                        flags, DMA_FROM_DEVICE))
index 08f8d5cd633491e3ff0e28ca8204d7f51be2b05b..becb738c897b1b5d93b632e3ab80ed2b146ead5a 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        struct sha256_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process = 0, leftover, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA256_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - to_process;
-               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len,
                                                 max_sg_len);
@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
 
                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index aff0fe58eac0b7aba11b465a192c280ef19fdbac..b6e183d58d73d5a4e38fff2925344783e8e581bc 100644 (file)
@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        struct sha512_state *sctx = shash_desc_ctx(desc);
        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-       struct nx_sg *in_sg;
        struct nx_sg *out_sg;
        u64 to_process, leftover = 0, total;
        unsigned long irq_flags;
@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
-       in_sg = nx_ctx->in_sg;
        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
        max_sg_len = min_t(u64, max_sg_len,
@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
        }
 
        do {
-               /*
-                * to_process: the SHA512_BLOCK_SIZE data chunk to process in
-                * this update. This value is also restricted by the sg list
-                * limits.
-                */
-               to_process = total - leftover;
-               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
-               leftover = total - to_process;
+               int used_sgs = 0;
+               struct nx_sg *in_sg = nx_ctx->in_sg;
 
                if (buf_len) {
                        data_len = buf_len;
-                       in_sg = nx_build_sg_list(nx_ctx->in_sg,
+                       in_sg = nx_build_sg_list(in_sg,
                                                 (u8 *) sctx->buf,
                                                 &data_len, max_sg_len);
 
@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                                rc = -EINVAL;
                                goto out;
                        }
+                       used_sgs = in_sg - nx_ctx->in_sg;
                }
 
+               /* to_process: SHA512_BLOCK_SIZE aligned chunk to be
+                * processed in this iteration. This value is restricted
+                * by sg list limits and number of sgs we already used
+                * for leftover data. (see above)
+                * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
+                * but because data may not be aligned, we need to account
+                * for that too. */
+               to_process = min_t(u64, total,
+                       (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
+               to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+
                data_len = to_process - buf_len;
                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
                                         &data_len, max_sg_len);
@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
                        goto out;
                }
 
-               to_process = (data_len + buf_len);
+               to_process = data_len + buf_len;
                leftover = total - to_process;
 
                /*
index 067402c7c2a93fdc02ca3242f918deba460dae91..df427c0e9e7b2c99c8ee6cbe0c91b98c1ff47c43 100644 (file)
@@ -73,7 +73,8 @@
                                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
                                       ICP_QAT_HW_CIPHER_DECRYPT)
 
-static atomic_t active_dev;
+static DEFINE_MUTEX(algs_lock);
+static unsigned int active_devs;
 
 struct qat_alg_buf {
        uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
 
 int qat_algs_register(void)
 {
-       if (atomic_add_return(1, &active_dev) == 1) {
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (++active_devs == 1) {
                int i;
 
                for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
                                CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
                                CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 
-               return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+               ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
        }
-       return 0;
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_unregister(void)
 {
-       if (atomic_sub_return(1, &active_dev) == 0)
-               return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
-       return 0;
+       int ret = 0;
+
+       mutex_lock(&algs_lock);
+       if (--active_devs == 0)
+               ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       mutex_unlock(&algs_lock);
+       return ret;
 }
 
 int qat_algs_init(void)
 {
-       atomic_set(&active_dev, 0);
        crypto_get_default_rng();
        return 0;
 }
index 4a4cce15f25dd65c6a720d949ed0d9c922ff1cba..3ff284c8e3d5aef72f229017c883c73cbe13403f 100644 (file)
@@ -689,6 +689,10 @@ struct dma_chan *dma_request_slave_channel(struct device *dev,
        struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
        if (IS_ERR(ch))
                return NULL;
+
+       dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
+       ch->device->privatecnt++;
+
        return ch;
 }
 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
index 3515b381c1312612f56953bc267ee7d5d23b0f84..711d8ad74f116ebdcc7fd3833fbc0672c7a6359b 100644 (file)
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
         */
 
        for (row = 0; row < mci->nr_csrows; row++) {
-               struct csrow_info *csi = &mci->csrows[row];
+               struct csrow_info *csi = mci->csrows[row];
 
                /*
                 * Get the configuration settings for this
index 080d5cc2705529962d2a62b17fe3f597b5bc41e3..eebdf2a33bfe4b84e1fc1886e7222641f4a56122 100644 (file)
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
        status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
        if (status) {
                dev_err(&pdev->dev, "failed to register extcon device\n");
-               kfree(palmas_usb->edev->name);
                return status;
        }
 
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
                if (status < 0) {
                        dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
                                        palmas_usb->id_irq, status);
-                       kfree(palmas_usb->edev->name);
                        return status;
                }
        }
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
                if (status < 0) {
                        dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
                                        palmas_usb->vbus_irq, status);
-                       kfree(palmas_usb->edev->name);
                        return status;
                }
        }
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int palmas_usb_remove(struct platform_device *pdev)
-{
-       struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
-
-       kfree(palmas_usb->edev->name);
-
-       return 0;
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int palmas_usb_suspend(struct device *dev)
 {
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
 
 static struct platform_driver palmas_usb_driver = {
        .probe = palmas_usb_probe,
-       .remove = palmas_usb_remove,
        .driver = {
                .name = "palmas-usb",
                .of_match_table = of_palmas_match_tbl,
index 76157ab9faf3ad84a16e738a4e338ad1bded8e3c..43b57b02d050d197fe7994ea744231b7a580eb23 100644 (file)
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
        return -EINVAL;
 }
 
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
 {
-       unsigned int id = EXTCON_NONE;
+       unsigned int id = -EINVAL;
        int i = 0;
 
-       if (edev->max_supported == 0)
-               return -EINVAL;
-
-       /* Find the the number of extcon cable */
+       /* Find the id of extcon cable */
        while (extcon_name[i]) {
                if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
                        id = i;
                        break;
                }
+               i++;
        }
 
-       if (id == EXTCON_NONE)
+       return id;
+}
+
+static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
+{
+       unsigned int id;
+
+       if (edev->max_supported == 0)
                return -EINVAL;
 
+       /* Find the the number of extcon cable */
+       id = find_cable_id_by_name(edev, name);
+       if (id < 0)
+               return id;
+
        return find_cable_index_by_id(edev, id);
 }
 
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
        struct extcon_cable *cable = container_of(attr, struct extcon_cable,
                                                  attr_state);
 
+       int i = cable->cable_index;
+
        return sprintf(buf, "%d\n",
                       extcon_get_cable_state_(cable->edev,
-                                              cable->cable_index));
+                                              cable->edev->supported_cable[i]));
 }
 
 /**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
        spin_lock_irqsave(&edev->lock, flags);
 
        if (edev->state != ((edev->state & ~mask) | (state & mask))) {
+               u32 old_state;
+
                if (check_mutually_exclusive(edev, (edev->state & ~mask) |
                                                   (state & mask))) {
                        spin_unlock_irqrestore(&edev->lock, flags);
                        return -EPERM;
                }
 
-               for (index = 0; index < edev->max_supported; index++) {
-                       if (is_extcon_changed(edev->state, state, index, &attached))
-                               raw_notifier_call_chain(&edev->nh[index], attached, edev);
-               }
-
+               old_state = edev->state;
                edev->state &= ~mask;
                edev->state |= state & mask;
 
+               for (index = 0; index < edev->max_supported; index++) {
+                       if (is_extcon_changed(old_state, edev->state, index,
+                                             &attached))
+                               raw_notifier_call_chain(&edev->nh[index],
+                                                       attached, edev);
+               }
+
                /* This could be in interrupt handler */
                prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
                if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
  */
 int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
 {
-       return extcon_get_cable_state_(edev, find_cable_index_by_name
-                                               (edev, cable_name));
+       unsigned int id;
+
+       id = find_cable_id_by_name(edev, cable_name);
+       if (id < 0)
+               return id;
+
+       return extcon_get_cable_state_(edev, id);
 }
 EXPORT_SYMBOL_GPL(extcon_get_cable_state);
 
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
 int extcon_set_cable_state(struct extcon_dev *edev,
                        const char *cable_name, bool cable_state)
 {
-       return extcon_set_cable_state_(edev, find_cable_index_by_name
-                                       (edev, cable_name), cable_state);
+       unsigned int id;
+
+       id = find_cable_id_by_name(edev, cable_name);
+       if (id < 0)
+               return id;
+
+       return extcon_set_cable_state_(edev, id, cable_state);
 }
 EXPORT_SYMBOL_GPL(extcon_set_cable_state);
 
index 87add3fdce529b1ad6890cb24adb345275e4655c..e41594510b978291de179182c46b0196b3a377e3 100644 (file)
@@ -245,4 +245,4 @@ char *bcm47xx_nvram_get_contents(size_t *nvram_size)
 }
 EXPORT_SYMBOL(bcm47xx_nvram_get_contents);
 
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
index 31b00f91cfcd5a04848be288837d6d90c0110f44..f7b49d5ce4b81d471fa3c84280560b9d0e774c78 100644 (file)
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
        uint32_t                        me_feature_version;
        uint32_t                        ce_feature_version;
        uint32_t                        pfp_feature_version;
+       uint32_t                        rlc_feature_version;
+       uint32_t                        mec_feature_version;
+       uint32_t                        mec2_feature_version;
        struct amdgpu_ring              gfx_ring[AMDGPU_MAX_GFX_RINGS];
        unsigned                        num_gfx_rings;
        struct amdgpu_ring              compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1639,6 +1642,7 @@ struct amdgpu_sdma {
        /* SDMA firmware */
        const struct firmware   *fw;
        uint32_t                fw_version;
+       uint32_t                feature_version;
 
        struct amdgpu_ring      ring;
 };
index 9736892bcdf932c328a883473a6c3e23d560cd38..3bfe67de834904628e0e4e11677c706c4848fde7 100644 (file)
@@ -317,16 +317,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        break;
                case AMDGPU_INFO_FW_GFX_RLC:
                        fw_info.ver = adev->gfx.rlc_fw_version;
-                       fw_info.feature = 0;
+                       fw_info.feature = adev->gfx.rlc_feature_version;
                        break;
                case AMDGPU_INFO_FW_GFX_MEC:
-                       if (info->query_fw.index == 0)
+                       if (info->query_fw.index == 0) {
                                fw_info.ver = adev->gfx.mec_fw_version;
-                       else if (info->query_fw.index == 1)
+                               fw_info.feature = adev->gfx.mec_feature_version;
+                       } else if (info->query_fw.index == 1) {
                                fw_info.ver = adev->gfx.mec2_fw_version;
-                       else
+                               fw_info.feature = adev->gfx.mec2_feature_version;
+                       } else
                                return -EINVAL;
-                       fw_info.feature = 0;
                        break;
                case AMDGPU_INFO_FW_SMC:
                        fw_info.ver = adev->pm.fw_version;
@@ -336,7 +337,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        if (info->query_fw.index >= 2)
                                return -EINVAL;
                        fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
-                       fw_info.feature = 0;
+                       fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
                        break;
                default:
                        return -EINVAL;
index 2f7a5efa21c23ab0fda25ee0ebbb360efae966ea..f5c22556ec2c17ff145c48440dfe5e3563e67606 100644 (file)
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
        unsigned height_in_mb = ALIGN(height / 16, 2);
        unsigned fs_in_mb = width_in_mb * height_in_mb;
 
-       unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
+       unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
 
        image_size = width * height;
        image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
 
                num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
                min_dpb_size = image_size * num_dpb_buffer;
+               min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
+                                          * 16 * num_dpb_buffer + 52 * 1024;
                break;
 
        default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
 
        buf_sizes[0x1] = dpb_size;
        buf_sizes[0x2] = image_size;
+       buf_sizes[0x4] = min_ctx_size;
        return 0;
 }
 
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
                        return -EINVAL;
                }
 
+       } else if (cmd == 0x206) {
+               if ((end - start) < ctx->buf_sizes[4]) {
+                       DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
+                                         (unsigned)(end - start),
+                                         ctx->buf_sizes[4]);
+                       return -EINVAL;
+               }
        } else if ((cmd != 0x100) && (cmd != 0x204)) {
                DRM_ERROR("invalid UVD command %X!\n", cmd);
                return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
        struct amdgpu_uvd_cs_ctx ctx = {};
        unsigned buf_sizes[] = {
                [0x00000000]    =       2048,
-               [0x00000001]    =       32 * 1024 * 1024,
-               [0x00000002]    =       2048 * 1152 * 3,
+               [0x00000001]    =       0xFFFFFFFF,
+               [0x00000002]    =       0xFFFFFFFF,
                [0x00000003]    =       2048,
+               [0x00000004]    =       0xFFFFFFFF,
        };
        struct amdgpu_ib *ib = &parser->ibs[ib_idx];
        int r;
index ab83cc1ca4cc04865b0bf918c410a4351496fb22..15df46c93f0a3d9e0810b9018ba761bfaa2cc418 100644 (file)
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
                adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
                fw_data = (const __le32 *)
                        (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
                WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
index 2db6ab0a543dada20b64d3d5d89eb4d5f36a1873..0d8bf2cb195603b8be90346a58eabfee62670d23 100644 (file)
@@ -3080,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
        mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
        amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
        adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
+       adev->gfx.mec_feature_version = le32_to_cpu(
+                                       mec_hdr->ucode_feature_version);
 
        gfx_v7_0_cp_compute_enable(adev, false);
 
@@ -3102,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
                mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
                amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
                adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
+               adev->gfx.mec2_feature_version = le32_to_cpu(
+                               mec2_hdr->ucode_feature_version);
 
                /* MEC2 */
                fw_data = (const __le32 *)
@@ -4066,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
        hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
        amdgpu_ucode_print_rlc_hdr(&hdr->header);
        adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
+       adev->gfx.rlc_feature_version = le32_to_cpu(
+                                       hdr->ucode_feature_version);
 
        gfx_v7_0_rlc_stop(adev);
 
@@ -5122,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
                dev_info(adev->dev, "  CP_HPD_EOP_CONTROL=0x%08X\n",
                         RREG32(mmCP_HPD_EOP_CONTROL));
 
-               for (queue = 0; queue < 8; i++) {
+               for (queue = 0; queue < 8; queue++) {
                        cik_srbm_select(adev, me, pipe, queue, 0);
                        dev_info(adev->dev, "  queue: %d\n", queue);
                        dev_info(adev->dev, "  CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
index 9e1d4ddbf475027e10c6e0d6d77a63efb4eec3b3..20e2cfd521d5352202070f357de89234175cb800 100644 (file)
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        int err;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
+       const struct gfx_firmware_header_v1_0 *cp_hdr;
 
        DRM_DEBUG("\n");
 
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+       adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
        err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.me_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+       adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
        err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.ce_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+       adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
        err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
        if (err)
                goto out;
        err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
+       adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
        err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        err = amdgpu_ucode_validate(adev->gfx.mec_fw);
        if (err)
                goto out;
+       cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+       adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
+       adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
        err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
                err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
                if (err)
                        goto out;
+               cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+                                               adev->gfx.mec2_fw->data;
+               adev->gfx.mec2_fw_version = le32_to_cpu(
+                                               cp_hdr->header.ucode_version);
+               adev->gfx.mec2_feature_version = le32_to_cpu(
+                                               cp_hdr->ucode_feature_version);
        } else {
                err = 0;
                adev->gfx.mec2_fw = NULL;
@@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                adev->gfx.config.max_shader_engines = 1;
                adev->gfx.config.max_tile_pipes = 2;
                adev->gfx.config.max_sh_per_se = 1;
+               adev->gfx.config.max_backends_per_se = 2;
 
                switch (adev->pdev->revision) {
                case 0xc4:
@@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                case 0xcc:
                        /* B10 */
                        adev->gfx.config.max_cu_per_sh = 8;
-                       adev->gfx.config.max_backends_per_se = 2;
                        break;
                case 0xc5:
                case 0x81:
@@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                case 0xcd:
                        /* B8 */
                        adev->gfx.config.max_cu_per_sh = 6;
-                       adev->gfx.config.max_backends_per_se = 2;
                        break;
                case 0xc6:
                case 0xca:
                case 0xce:
                        /* B6 */
                        adev->gfx.config.max_cu_per_sh = 6;
-                       adev->gfx.config.max_backends_per_se = 2;
                        break;
                case 0xc7:
                case 0x87:
@@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
                default:
                        /* B4 */
                        adev->gfx.config.max_cu_per_sh = 4;
-                       adev->gfx.config.max_backends_per_se = 1;
                        break;
                }
 
@@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
 
        hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
        amdgpu_ucode_print_rlc_hdr(&hdr->header);
-       adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
                           le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
        amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
        amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
        amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
-       adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
-       adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
-       adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
-       adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
-       adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
-       adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
 
        gfx_v8_0_cp_gfx_enable(adev, false);
 
@@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 
        mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
        amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
-       adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
 
        fw_data = (const __le32 *)
                (adev->gfx.mec_fw->data +
@@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
 
                mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
                amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
-               adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
 
                fw_data = (const __le32 *)
                        (adev->gfx.mec2_fw->data +
@@ -3125,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
                                       AMDGPU_DOORBELL_KIQ << 2);
                                WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
-                                               0x7FFFF << 2);
+                                      AMDGPU_DOORBELL_MEC_RING7 << 2);
                        }
                        tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
                        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
index d7895885fe0cf3b3e7cd5d1ae52f291053420b17..a988dfb1d3942e9246361bfd7b97bdabc5e5286c 100644 (file)
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
        int err, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
+       const struct sdma_firmware_header_v1_0 *hdr;
 
        DRM_DEBUG("\n");
 
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
                err = amdgpu_ucode_validate(adev->sdma[i].fw);
                if (err)
                        goto out;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
                        hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
                        amdgpu_ucode_print_sdma_hdr(&hdr->header);
                        fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-                       adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
                        fw_data = (const __le32 *)
                                (adev->sdma[i].fw->data +
                                 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
index 7bb37b93993fb5312eb2d46189bf09bf789c3989..2b86569b18d3656c87975175a1ff771599c958d6 100644 (file)
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
        int err, i;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
+       const struct sdma_firmware_header_v1_0 *hdr;
 
        DRM_DEBUG("\n");
 
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
                err = amdgpu_ucode_validate(adev->sdma[i].fw);
                if (err)
                        goto out;
+               hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
+               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
+               adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
 
                if (adev->firmware.smu_load) {
                        info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
                hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
                amdgpu_ucode_print_sdma_hdr(&hdr->header);
                fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
-               adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
-
                fw_data = (const __le32 *)
                        (adev->sdma[i].fw->data +
                                le32_to_cpu(hdr->header.ucode_array_offset_bytes));
index 6fad1f9648f38870b2162cb74a6320f50c34aabc..ef6182bc8e5eef229dc990354f6d31c18915f3bc 100644 (file)
@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
index aac212297b49fb1953dab1749f23dcd77977bcb7..9dcc7280e5720255baed2786ab7d8fc11554c845 100644 (file)
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        }
 
        funcs = connector->helper_private;
-       new_encoder = funcs->best_encoder(connector);
+
+       if (funcs->atomic_best_encoder)
+               new_encoder = funcs->atomic_best_encoder(connector,
+                                                        connector_state);
+       else
+               new_encoder = funcs->best_encoder(connector);
 
        if (!new_encoder) {
                DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,13 +234,14 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
                }
        }
 
+       if (WARN_ON(!connector_state->crtc))
+               return -EINVAL;
+
        connector_state->best_encoder = new_encoder;
-       if (connector_state->crtc) {
-               idx = drm_crtc_index(connector_state->crtc);
+       idx = drm_crtc_index(connector_state->crtc);
 
-               crtc_state = state->crtc_states[idx];
-               crtc_state->mode_changed = true;
-       }
+       crtc_state = state->crtc_states[idx];
+       crtc_state->mode_changed = true;
 
        DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
                         connector->base.id,
index 778bbb6425b80c9c8affddad58d993a93755c39e..eb603f1defc2250ea158864ea4371e24655138e1 100644 (file)
@@ -873,9 +873,10 @@ static void drm_dp_destroy_port(struct kref *kref)
                   from an EDID retrieval */
                if (port->connector) {
                        mutex_lock(&mgr->destroy_connector_lock);
-                       list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
+                       list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
+                       return;
                }
                drm_dp_port_teardown_pdt(port, port->pdt);
 
@@ -1294,7 +1295,6 @@ retry:
                                goto retry;
                        }
                        DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
-                       WARN(1, "fail\n");
 
                        return -EIO;
                }
@@ -2660,7 +2660,7 @@ static void drm_dp_tx_work(struct work_struct *work)
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
-       struct drm_connector *connector;
+       struct drm_dp_mst_port *port;
 
        /*
         * Not a regular list traverse as we have to drop the destroy
@@ -2669,15 +2669,21 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
         */
        for (;;) {
                mutex_lock(&mgr->destroy_connector_lock);
-               connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
-               if (!connector) {
+               port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
+               if (!port) {
                        mutex_unlock(&mgr->destroy_connector_lock);
                        break;
                }
-               list_del(&connector->destroy_list);
+               list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
-               mgr->cbs->destroy_connector(mgr, connector);
+               mgr->cbs->destroy_connector(mgr, port->connector);
+
+               drm_dp_port_teardown_pdt(port, port->pdt);
+
+               if (!port->input && port->vcpi.vcpi > 0)
+                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+               kfree(port);
        }
 }
 
index f9cc68fbd2a3e18b076ad2ced71b8f1ddf002202..b50fa0afd9071f6c64c36de23253a2ee22ce7480 100644 (file)
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
 
 static void store_vblank(struct drm_device *dev, int crtc,
-                        unsigned vblank_count_inc,
+                        u32 vblank_count_inc,
                         struct timeval *t_vblank)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
index 842d6b8dc3c435ee7d836402d4169847aef75d31..2a652359af644b51f257cde7528d70b6016897da 100644 (file)
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
        spin_lock_init(&ctx->lock);
        platform_set_drvdata(pdev, ctx);
 
-       pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
        ret = exynos_drm_ippdrv_register(ippdrv);
index 8040ed2a831f9a6f226baf8aee3ce00b213be8e6..f1c6b76c127f4db02388267775431fcd25ac7eb8 100644 (file)
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
 
        gsc_write(cfg, GSC_IN_CON);
 
-       ctx->rotation = cfg &
-               (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
+       ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
        *swap = ctx->rotation;
 
        return 0;
index 99e286489031c4a2931565823e0158428548aef2..4a00990e4ae4e8459b94a9a007044af1cc11af62 100644 (file)
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
 {
        struct hdmi_context *hdata = ctx_from_connector(connector);
        struct edid *edid;
+       int ret;
 
        if (!hdata->ddc_adpt)
                return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
 
        drm_mode_connector_update_edid_property(connector, edid);
 
-       return drm_add_edid_modes(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+
+       kfree(edid);
+
+       return ret;
 }
 
 static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
index cae98db3306205e2628b2090b731cf6cfdf79d4f..4706b56902b44f5ba205b30d3aa6e53678bbad52 100644 (file)
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
        /* handling VSYNC */
        if (val & MXR_INT_STATUS_VSYNC) {
+               /* vsync interrupt use different bit for read and clear */
+               val |= MXR_INT_CLEAR_VSYNC;
+               val &= ~MXR_INT_STATUS_VSYNC;
+
                /* interlace scan need to check shadow register */
                if (ctx->interlace) {
                        base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
 out:
        /* clear interrupts */
-       if (~val & MXR_INT_EN_VSYNC) {
-               /* vsync interrupt use different bit for read and clear */
-               val &= ~MXR_INT_EN_VSYNC;
-               val |= MXR_INT_CLEAR_VSYNC;
-       }
        mixer_reg_write(res, MXR_INT_STATUS, val);
 
        spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
        }
 
        /* enable vsync interrupt */
-       mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
-                       MXR_INT_EN_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
+       mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
 
        return 0;
 }
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
        struct mixer_context *mixer_ctx = crtc->ctx;
        struct mixer_resources *res = &mixer_ctx->mixer_res;
 
+       if (!mixer_ctx->powered) {
+               mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
+               return;
+       }
+
        /* disable vsync interrupt */
+       mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
 }
 
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
 
+       if (ctx->int_en & MXR_INT_EN_VSYNC)
+               mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
        mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
        mixer_win_reset(ctx);
 }
index fe1599d75f14e39b2a39364b78d088d4715c368f..424228be79ae5b2aa1557ca07331e4e49e665ef8 100644 (file)
@@ -606,8 +606,6 @@ static void
 tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
                 uint8_t *buf, size_t size)
 {
-       buf[PB(0)] = tda998x_cksum(buf, size);
-
        reg_clear(priv, REG_DIP_IF_FLAGS, bit);
        reg_write_range(priv, addr, buf, size);
        reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
        buf[PB(4)] = p->audio_frame[4];
        buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
 
+       buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
+
        tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
                         sizeof(buf));
 }
index 7ed8033aae6097af69d90e83bb6c97f7dc6f7225..8e35e0d013df556d8ac04fc27f9ba2bd7354fae3 100644 (file)
@@ -129,8 +129,9 @@ int intel_atomic_commit(struct drm_device *dev,
                        struct drm_atomic_state *state,
                        bool async)
 {
-       int ret;
-       int i;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+       int ret, i;
 
        if (async) {
                DRM_DEBUG_KMS("i915 does not yet support async commit\n");
@@ -142,48 +143,18 @@ int intel_atomic_commit(struct drm_device *dev,
                return ret;
 
        /* Point of no return */
-
-       /*
-        * FIXME:  The proper sequence here will eventually be:
-        *
-        * drm_atomic_helper_swap_state(dev, state)
-        * drm_atomic_helper_commit_modeset_disables(dev, state);
-        * drm_atomic_helper_commit_planes(dev, state);
-        * drm_atomic_helper_commit_modeset_enables(dev, state);
-        * drm_atomic_helper_wait_for_vblanks(dev, state);
-        * drm_atomic_helper_cleanup_planes(dev, state);
-        * drm_atomic_state_free(state);
-        *
-        * once we have full atomic modeset.  For now, just manually update
-        * plane states to avoid clobbering good states with dummy states
-        * while nuclear pageflipping.
-        */
-       for (i = 0; i < dev->mode_config.num_total_plane; i++) {
-               struct drm_plane *plane = state->planes[i];
-
-               if (!plane)
-                       continue;
-
-               plane->state->state = state;
-               swap(state->plane_states[i], plane->state);
-               plane->state->state = NULL;
-       }
+       drm_atomic_helper_swap_state(dev, state);
 
        /* swap crtc_scaler_state */
-       for (i = 0; i < dev->mode_config.num_crtc; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               if (!crtc) {
-                       continue;
-               }
-
-               to_intel_crtc(crtc)->config->scaler_state =
-                       to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
 
                if (INTEL_INFO(dev)->gen >= 9)
                        skl_detach_scalers(to_intel_crtc(crtc));
+
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
-       drm_atomic_helper_commit_planes(dev, state);
        drm_atomic_helper_wait_for_vblanks(dev, state);
        drm_atomic_helper_cleanup_planes(dev, state);
        drm_atomic_state_free(state);
index 30e0f54ba19d1284107958bb6e5d49f6309b63de..87476ff181ddbef0967d948c37119cfcbd758315 100644 (file)
@@ -11826,7 +11826,9 @@ encoder_retry:
                goto encoder_retry;
        }
 
-       pipe_config->dither = pipe_config->pipe_bpp != base_bpp;
+       /* Dithering seems to not pass-through bits correctly when it should, so
+        * only enable it on 6bpc panels. */
+       pipe_config->dither = pipe_config->pipe_bpp == 6*3;
        DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
                      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
 
@@ -12624,17 +12626,17 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
 
        modeset_update_crtc_power_domains(state);
 
-       drm_atomic_helper_commit_planes(dev, state);
-
        /* Now enable the clocks, plane, pipe, and connectors that we set up. */
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               if (!needs_modeset(crtc->state) || !crtc->state->enable)
+               if (!needs_modeset(crtc->state) || !crtc->state->enable) {
+                       drm_atomic_helper_commit_planes_on_crtc(crtc_state);
                        continue;
+               }
 
                update_scanline_offset(to_intel_crtc(crtc));
 
                dev_priv->display.crtc_enable(crtc);
-               intel_crtc_enable_planes(crtc);
+               drm_atomic_helper_commit_planes_on_crtc(crtc_state);
        }
 
        /* FIXME: add subpixel order */
@@ -12891,20 +12893,11 @@ intel_modeset_stage_output_state(struct drm_device *dev,
        return 0;
 }
 
-static bool primary_plane_visible(struct drm_crtc *crtc)
-{
-       struct intel_plane_state *plane_state =
-               to_intel_plane_state(crtc->primary->state);
-
-       return plane_state->visible;
-}
-
 static int intel_crtc_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
        struct drm_atomic_state *state = NULL;
        struct intel_crtc_state *pipe_config;
-       bool primary_plane_was_visible;
        int ret;
 
        BUG_ON(!set);
@@ -12943,38 +12936,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
 
        intel_update_pipe_size(to_intel_crtc(set->crtc));
 
-       primary_plane_was_visible = primary_plane_visible(set->crtc);
-
        ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
 
-       if (ret == 0 &&
-           pipe_config->base.enable &&
-           pipe_config->base.planes_changed &&
-           !needs_modeset(&pipe_config->base)) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
-
-               /*
-                * We need to make sure the primary plane is re-enabled if it
-                * has previously been turned off.
-                */
-               if (ret == 0 && !primary_plane_was_visible &&
-                   primary_plane_visible(set->crtc)) {
-                       WARN_ON(!intel_crtc->active);
-                       intel_post_enable_primary(set->crtc);
-               }
-
-               /*
-                * In the fastboot case this may be our only check of the
-                * state after boot.  It would be better to only do it on
-                * the first update, but we don't have a nice way of doing that
-                * (and really, set_config isn't used much for high freq page
-                * flipping, so increasing its cost here shouldn't be a big
-                * deal).
-                */
-               if (i915.fastboot && ret == 0)
-                       intel_modeset_check_state(set->crtc->dev);
-       }
-
        if (ret) {
                DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
                              set->crtc->base.id, ret);
@@ -13305,6 +13268,9 @@ intel_check_primary_plane(struct drm_plane *plane,
                         */
                        if (IS_BROADWELL(dev))
                                intel_crtc->atomic.wait_vblank = true;
+
+                       if (crtc_state)
+                               intel_crtc->atomic.post_enable_primary = true;
                }
 
                /*
@@ -13317,6 +13283,10 @@ intel_check_primary_plane(struct drm_plane *plane,
                if (!state->visible || !fb)
                        intel_crtc->atomic.disable_ips = true;
 
+               if (!state->visible && old_state->visible &&
+                   crtc_state && !needs_modeset(&crtc_state->base))
+                       intel_crtc->atomic.pre_disable_primary = true;
+
                intel_crtc->atomic.fb_bits |=
                        INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
 
@@ -15034,6 +15004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                struct intel_plane_state *plane_state;
 
                memset(crtc->config, 0, sizeof(*crtc->config));
+               crtc->config->base.crtc = &crtc->base;
 
                crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
 
index 6e8faa25379240cab60f57631adf42612f28df33..1df0e1fe235f112830a0e82a1004b0e949613c78 100644 (file)
@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
 
 static const int skl_rates[] = { 162000, 216000, 270000,
                                  324000, 432000, 540000 };
-static const int chv_rates[] = { 162000, 202500, 210000, 216000,
-                                243000, 270000, 324000, 405000,
-                                420000, 432000, 540000 };
 static const int default_rates[] = { 162000, 270000, 540000 };
 
 /**
@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
        return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
 }
 
+static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
+{
+       /* WaDisableHBR2:skl */
+       if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+               return false;
+
+       if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
+           (INTEL_INFO(dev)->gen >= 9))
+               return true;
+       else
+               return false;
+}
+
 static int
 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
 {
        if (IS_SKYLAKE(dev)) {
                *source_rates = skl_rates;
                return ARRAY_SIZE(skl_rates);
-       } else if (IS_CHERRYVIEW(dev)) {
-               *source_rates = chv_rates;
-               return ARRAY_SIZE(chv_rates);
        }
 
        *source_rates = default_rates;
 
-       if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
-               /* WaDisableHBR2:skl */
-               return (DP_LINK_BW_2_7 >> 3) + 1;
-       else if (INTEL_INFO(dev)->gen >= 8 ||
-           (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
+       /* This depends on the fact that 5.4 is last value in the array */
+       if (intel_dp_source_supports_hbr2(dev))
                return (DP_LINK_BW_5_4 >> 3) + 1;
        else
                return (DP_LINK_BW_2_7 >> 3) + 1;
@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                }
        }
 
-       /* Training Pattern 3 support, both source and sink */
+       /* Training Pattern 3 support, Intel platforms that support HBR2 alone
+        * have support for TP3 hence that check is used along with dpcd check
+        * to ensure TP3 can be enabled.
+        * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
+        * supported but still not enabled.
+        */
        if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
            intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
-           (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
+           intel_dp_source_supports_hbr2(dev)) {
                intel_dp->use_tps3 = true;
                DRM_DEBUG_KMS("Displayport TPS3 supported\n");
        } else
index 6e4cc5334f47d7105b60c0bec72fccfb4875ddac..600afdbef8c9a434f51d527c5d85e202c36bae2b 100644 (file)
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
+static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
+                                                        struct drm_connector_state *state)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct intel_dp *intel_dp = intel_connector->mst_port;
+       struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+
+       return &intel_dp->mst_encoders[crtc->pipe]->base.base;
+}
+
 static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
 static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
        .get_modes = intel_dp_mst_get_modes,
        .mode_valid = intel_dp_mst_mode_valid,
+       .atomic_best_encoder = intel_mst_atomic_best_encoder,
        .best_encoder = intel_mst_best_encoder,
 };
 
index 9b74ffae5f5a7bab8ef545525d361c29fd4c3bf6..7f2161a1ff5d4d40c7b6dca346c20518b084bf6b 100644 (file)
@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
                ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
                if (ret)
                        goto unpin_ctx_obj;
+
+               ctx_obj->dirty = true;
        }
 
        return ret;
index 52c22b02600598cfa7d18e424d69a99cce4879e7..e10f9644140f5d9fcd6e73446c74634d2b13906a 100644 (file)
@@ -165,31 +165,15 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
        return 0;
 }
 
-static int
-gk104_fifo_chan_kick(struct gk104_fifo_chan *chan)
-{
-       struct nvkm_object *obj = (void *)chan;
-       struct gk104_fifo_priv *priv = (void *)obj->engine;
-
-       nv_wr32(priv, 0x002634, chan->base.chid);
-       if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) {
-               nv_error(priv, "channel %d [%s] kick timeout\n",
-                        chan->base.chid, nvkm_client_name(chan));
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
 static int
 gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                          struct nvkm_object *object)
 {
        struct nvkm_bar *bar = nvkm_bar(parent);
+       struct gk104_fifo_priv *priv = (void *)parent->engine;
        struct gk104_fifo_base *base = (void *)parent->parent;
        struct gk104_fifo_chan *chan = (void *)parent;
        u32 addr;
-       int ret;
 
        switch (nv_engidx(object->engine)) {
        case NVDEV_ENGINE_SW    : return 0;
@@ -204,9 +188,13 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
                return -EINVAL;
        }
 
-       ret = gk104_fifo_chan_kick(chan);
-       if (ret && suspend)
-               return ret;
+       nv_wr32(priv, 0x002634, chan->base.chid);
+       if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+               nv_error(priv, "channel %d [%s] kick timeout\n",
+                        chan->base.chid, nvkm_client_name(chan));
+               if (suspend)
+                       return -EBUSY;
+       }
 
        if (addr) {
                nv_wo32(base, addr + 0x00, 0x00000000);
@@ -331,7 +319,6 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend)
                gk104_fifo_runlist_update(priv, chan->engine);
        }
 
-       gk104_fifo_chan_kick(chan);
        nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
        return nvkm_fifo_channel_fini(&chan->base, suspend);
 }
index 1162bfa464f3036192854f4d0f3f363eb1ee8cff..171d3e43c30cc02257df75645c6c77f0a726de33 100644 (file)
@@ -79,6 +79,11 @@ static void radeon_hotplug_work_func(struct work_struct *work)
        struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_connector *connector;
 
+       /* we can race here at startup, some boards seem to trigger
+        * hotplug irqs when they shouldn't. */
+       if (!rdev->mode_info.mode_config_initialized)
+               return;
+
        mutex_lock(&mode_config->mutex);
        if (mode_config->num_connector) {
                list_for_each_entry(connector, &mode_config->connector_list, head)
index 654c8daeb5ab3d0dd84a2ed1d32af633d6955ac9..97ad3bcb99a75a441a54150f779415dc59236ac7 100644 (file)
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
                                     true, NULL);
        if (unlikely(ret != 0))
-               goto out_err;
+               goto out_err_nores;
 
        ret = vmw_validate_buffers(dev_priv, sw_context);
        if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        vmw_resource_relocations_free(&sw_context->res_relocations);
 
        vmw_fifo_commit(dev_priv, command_size);
+       mutex_unlock(&dev_priv->binding_mutex);
 
        vmw_query_bo_switch_commit(dev_priv, sw_context);
        ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
                DRM_ERROR("Fence submission error. Syncing.\n");
 
        vmw_resource_list_unreserve(&sw_context->resource_list, false);
-       mutex_unlock(&dev_priv->binding_mutex);
 
        ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
                                    (void *) fence);
index 3511bbaba505a4524ad382297ec1e486e21e7e48..e3c63640df737d5527c6d2609622417a4e03c8d3 100644 (file)
@@ -462,12 +462,15 @@ out:
 
 static void hidinput_cleanup_battery(struct hid_device *dev)
 {
+       const struct power_supply_desc *psy_desc;
+
        if (!dev->battery)
                return;
 
+       psy_desc = dev->battery->desc;
        power_supply_unregister(dev->battery);
-       kfree(dev->battery->desc->name);
-       kfree(dev->battery->desc);
+       kfree(psy_desc->name);
+       kfree(psy_desc);
        dev->battery = NULL;
 }
 #else  /* !CONFIG_HID_BATTERY_STRENGTH */
index 94167310e15a4c95001d339d4da3b7319f9005eb..b905d501e752d607b6fc1731ad89ff3d23ce7cd0 100644 (file)
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
        for (p = drvdata->rdesc;
             p <= drvdata->rdesc + drvdata->rsize - 4;) {
                if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
-                   p[3] < sizeof(params)) {
+                   p[3] < ARRAY_SIZE(params)) {
                        v = params[p[3]];
                        put_unaligned(cpu_to_le32(v), (s32 *)p);
                        p += 4;
index 44958d79d598dfc3a7e6938a2babbf3e1fdc2188..01b937e63cf37ec1424a1aad9eee0caef682c010 100644 (file)
@@ -1284,6 +1284,39 @@ fail_register_pen_input:
        return error;
 }
 
+/*
+ * Not all devices report physical dimensions from HID.
+ * Compute the default from hardcoded logical dimension
+ * and resolution before driver overwrites them.
+ */
+static void wacom_set_default_phy(struct wacom_features *features)
+{
+       if (features->x_resolution) {
+               features->x_phy = (features->x_max * 100) /
+                                       features->x_resolution;
+               features->y_phy = (features->y_max * 100) /
+                                       features->y_resolution;
+       }
+}
+
+static void wacom_calculate_res(struct wacom_features *features)
+{
+       /* set unit to "100th of a mm" for devices not reported by HID */
+       if (!features->unit) {
+               features->unit = 0x11;
+               features->unitExpo = -3;
+       }
+
+       features->x_resolution = wacom_calc_hid_res(features->x_max,
+                                                   features->x_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+       features->y_resolution = wacom_calc_hid_res(features->y_max,
+                                                   features->y_phy,
+                                                   features->unit,
+                                                   features->unitExpo);
+}
+
 static void wacom_wireless_work(struct work_struct *work)
 {
        struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1341,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
                if (wacom_wac1->features.type != INTUOSHT &&
                    wacom_wac1->features.type != BAMBOO_PT)
                        wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
+               wacom_set_default_phy(&wacom_wac1->features);
+               wacom_calculate_res(&wacom_wac1->features);
                snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
                         wacom_wac1->features.name);
                snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1359,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
                        wacom_wac2->features =
                                *((struct wacom_features *)id->driver_data);
                        wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
+                       wacom_set_default_phy(&wacom_wac2->features);
                        wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
+                       wacom_calculate_res(&wacom_wac2->features);
                        snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
                                 "%s (WL) Finger",wacom_wac2->features.name);
                        snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1407,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
        }
 }
 
-/*
- * Not all devices report physical dimensions from HID.
- * Compute the default from hardcoded logical dimension
- * and resolution before driver overwrites them.
- */
-static void wacom_set_default_phy(struct wacom_features *features)
-{
-       if (features->x_resolution) {
-               features->x_phy = (features->x_max * 100) /
-                                       features->x_resolution;
-               features->y_phy = (features->y_max * 100) /
-                                       features->y_resolution;
-       }
-}
-
-static void wacom_calculate_res(struct wacom_features *features)
-{
-       /* set unit to "100th of a mm" for devices not reported by HID */
-       if (!features->unit) {
-               features->unit = 0x11;
-               features->unitExpo = -3;
-       }
-
-       features->x_resolution = wacom_calc_hid_res(features->x_max,
-                                                   features->x_phy,
-                                                   features->unit,
-                                                   features->unitExpo);
-       features->y_resolution = wacom_calc_hid_res(features->y_max,
-                                                   features->y_phy,
-                                                   features->unit,
-                                                   features->unitExpo);
-}
-
 static size_t wacom_compute_pktlen(struct hid_device *hdev)
 {
        struct hid_report_enum *report_enum;
index 37c16afe007a0524eaacb5edcae9399bebfae897..c8487894b31236cefd761b24cac48fb4e17e6d52 100644 (file)
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
+static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+       {
+               /*
+                * CPU fan speed going up and down on Dell Studio XPS 8100
+                * for unknown reasons.
+                */
+               .ident = "Dell Studio XPS 8100",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
+               },
+       },
+       { }
+};
+
 /*
  * Probe for the presence of a supported laptop.
  */
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
        /*
         * Get DMI information
         */
-       if (!dmi_check_system(i8k_dmi_table)) {
+       if (!dmi_check_system(i8k_dmi_table) ||
+           dmi_check_system(i8k_blacklist_dmi_table)) {
                if (!ignore_dmi && !force)
                        return -ENODEV;
 
index 9b55e673b67caf1365c7452ce51a22a37510af02..85d106fe3ce8628061901b53240e546a884cbea0 100644 (file)
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
        { .compatible = "gmt,g763" },
        { },
 };
+MODULE_DEVICE_TABLE(of, g762_dt_match);
 
 /*
  * Grab clock (a required property), enable it, get (fixed) clock frequency
index 6153df735e82ca4fd3d605e159510675410546fc..08ff89d222e5ff79a3c5cf37fa1b7729f70fe303 100644 (file)
@@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
        {"nct7904", 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, nct7904_id);
 
 static struct i2c_driver nct7904_driver = {
        .class = I2C_CLASS_HWMON,
index af162b4c7a6d9b8b30756c53147eb3de458b3d5e..025686d4164058498216862d37af9ad114fe4fa2 100644 (file)
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, iface);
 
-       dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, "
+       dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
                "regs_base@%p\n", iface->regs_base);
 
        return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
 module_exit(i2c_bfin_twi_exit);
 
 MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
-MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver");
+MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:i2c-bfin-twi");
index d1c22e3fdd146a34d96adcdd35b81a6e15984824..fc9bf7f30e355dfadfcadd4f7d8f187f818e9566 100644 (file)
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
        u32 reg;
 
        reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+       /* enable test mode */
        reg |= OMAP_I2C_SYSTEST_ST_EN;
+       /* select SDA/SCL IO mode */
+       reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
+       /* set SCL to high-impedance state (reset value is 0) */
+       reg |= OMAP_I2C_SYSTEST_SCL_O;
+       /* set SDA to high-impedance state (reset value is 0) */
+       reg |= OMAP_I2C_SYSTEST_SDA_O;
        omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
 }
 
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
        u32 reg;
 
        reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
+       /* restore reset values */
        reg &= ~OMAP_I2C_SYSTEST_ST_EN;
+       reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
+       reg &= ~OMAP_I2C_SYSTEST_SCL_O;
+       reg &= ~OMAP_I2C_SYSTEST_SDA_O;
        omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
 }
 
index e6d4935161e4902762f6042847838428ec34faf2..c83e4d13cfc5c402dfdea64df08f399ab486822b 100644 (file)
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
        if (bri->prepare_recovery)
                bri->prepare_recovery(adap);
 
+       bri->set_scl(adap, val);
+       ndelay(RECOVERY_NDELAY);
+
        /*
         * By this time SCL is high, as we need to give 9 falling-rising edges
         */
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
 
 int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 {
-       adap->bus_recovery_info->set_scl(adap, 1);
        return i2c_generic_recovery(adap);
 }
 EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
 struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
 {
        struct device *dev;
+       struct i2c_client *client;
 
-       dev = bus_find_device(&i2c_bus_type, NULL, node,
-                                        of_dev_node_match);
+       dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
        if (!dev)
                return NULL;
 
-       return i2c_verify_client(dev);
+       client = i2c_verify_client(dev);
+       if (!client)
+               put_device(dev);
+
+       return client;
 }
 EXPORT_SYMBOL(of_find_i2c_device_by_node);
 
@@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
 struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
 {
        struct device *dev;
+       struct i2c_adapter *adapter;
 
-       dev = bus_find_device(&i2c_bus_type, NULL, node,
-                                        of_dev_node_match);
+       dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
        if (!dev)
                return NULL;
 
-       return i2c_verify_adapter(dev);
+       adapter = i2c_verify_adapter(dev);
+       if (!adapter)
+               put_device(dev);
+
+       return adapter;
 }
 EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
 #else
index 8223746546093c7a08f4bdfc8425459d88fe1a52..1da44961477953038e78409169f80a3f4884f89a 100644 (file)
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count > attr->size)
-               return -EFBIG;
-
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
 
        spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count > attr->size)
-               return -EFBIG;
-
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
 
        spin_lock_irqsave(&eeprom->buffer_lock, flags);
index c7aab48f07cdfcdebf3efb6374416619c9095e04..92d518382a9fce90c3e1dbae45034675072da274 100644 (file)
@@ -814,7 +814,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                        printk(KERN_ERR MOD
                               "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
                               CQE_STATUS(&cqe), CQE_QPID(&cqe));
-                       ret = -EINVAL;
+                       wc->status = IB_WC_FATAL_ERR;
                }
        }
 out:
index 27b6a3ce18caf2e996177e6c313fff2b21a4ad19..891797ad76bccda3ae132e1fc59483b539e522ee 100644 (file)
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
                if (n_buttons[i] < 1)
                        continue;
 
-               if (n_buttons[i] > 6) {
+               if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
                        printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
                        err = -EINVAL;
                        goto err_unreg_devs;
index 097d7216d98ee4e4d394726e9d4c9743067d14f8..c6dc644aa5806b37cb4e0dcbbc6217d915e8a562 100644 (file)
@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
                 * convert it to descriptor.
                 */
                if (!button->gpiod && gpio_is_valid(button->gpio)) {
-                       unsigned flags = 0;
+                       unsigned flags = GPIOF_IN;
 
                        if (button->active_low)
                                flags |= GPIOF_ACTIVE_LOW;
index 10e140af5aac1a9ea309d2b237af065cc7abf684..1ac898db303afe84edd003a03129eb0f27518837 100644 (file)
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
 MODULE_DESCRIPTION("axp20x Power Button");
 MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:axp20x-pek");
index fc17b9592f5435238d980cc30d266a4ee399415a..10c4e3d462f112f15ec9843093c5f988d44780b9 100644 (file)
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
        if (pdata && pdata->coexist)
                return true;
 
-       if (of_find_node_by_name(node, "codec")) {
+       node = of_find_node_by_name(node, "codec");
+       if (node) {
                of_node_put(node);
                return true;
        }
index 113d6f1516a54956f74635f7eb51231ab5490052..4d246861d692b810f3074aa7917cda86893ac6c2 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/input/mt.h>
 #include <linux/serio.h>
 #include <linux/libps2.h>
+#include <linux/dmi.h>
 
 #include "psmouse.h"
 #include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
+#define ALPS_DELL              0x100   /* device is a Dell laptop */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                return;
        }
 
-       /* Non interleaved V2 dualpoint has separate stick button bits */
+       /* Dell non interleaved V2 dualpoint has separate stick button bits */
        if (priv->proto_version == ALPS_PROTO_V2 &&
-           priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
+           priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
                left |= packet[0] & 1;
                right |= packet[0] & 2;
                middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
        priv->byte0 = protocol->byte0;
        priv->mask0 = protocol->mask0;
        priv->flags = protocol->flags;
+       if (dmi_name_in_vendors("Dell"))
+               priv->flags |= ALPS_DELL;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
index 22b9ca901f4e96c22499ce9723c0d2bd897c6fbb..2955f1d0ca6c4c9137f786028ca36bff706beab2 100644 (file)
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        struct elantech_data *etd = psmouse->private;
        unsigned char *packet = psmouse->packet;
        unsigned char packet_type = packet[3] & 0x03;
+       unsigned int ic_version;
        bool sanity_check;
 
        if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
                return PACKET_TRACKPOINT;
 
+       /* This represents the version of IC body. */
+       ic_version = (etd->fw_version & 0x0f0000) >> 16;
+
        /*
         * Sanity check based on the constant bits of a packet.
         * The constant bits change depending on the value of
-        * the hardware flag 'crc_enabled' but are the same for
-        * every packet, regardless of the type.
+        * the hardware flag 'crc_enabled' and the version of
+        * the IC body, but are the same for every packet,
+        * regardless of the type.
         */
        if (etd->crc_enabled)
                sanity_check = ((packet[3] & 0x08) == 0x00);
+       else if (ic_version == 7 && etd->samples[1] == 0x2A)
+               sanity_check = ((packet[3] & 0x1c) == 0x10);
        else
                sanity_check = ((packet[0] & 0x0c) == 0x04 &&
                                (packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+ * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
@@ -1651,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
                     etd->capabilities[0], etd->capabilities[1],
                     etd->capabilities[2]);
 
+       if (etd->hw_version != 1) {
+               if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
+                       psmouse_err(psmouse, "failed to query sample data\n");
+                       goto init_fail;
+               }
+               psmouse_info(psmouse,
+                            "Elan sample query result %02x, %02x, %02x\n",
+                            etd->samples[0], etd->samples[1], etd->samples[2]);
+       }
+
        if (elantech_set_absolute_mode(psmouse)) {
                psmouse_err(psmouse,
                            "failed to put touchpad into absolute mode.\n");
index f965d1569cc338059cdd540bad44ed927c6ddc3e..e1cbf409d9c8d0d4e7d21e13d57851ae6565b535 100644 (file)
@@ -129,6 +129,7 @@ struct elantech_data {
        unsigned char reg_26;
        unsigned char debug;
        unsigned char capabilities[3];
+       unsigned char samples[3];
        bool paritycheck;
        bool jumpy_cursor;
        bool reports_pressure;
index f1fb1d3ccc56e679dd72334b6f81195981fa17c1..d77a848d50deb3efb566b9c851e31aa2643e0b21 100644 (file)
@@ -23,7 +23,8 @@ config IOMMU_IO_PGTABLE
 config IOMMU_IO_PGTABLE_LPAE
        bool "ARMv7/v8 Long Descriptor Format"
        select IOMMU_IO_PGTABLE
-       depends on ARM || ARM64 || COMPILE_TEST
+       # SWIOTLB guarantees a dma_to_phys() implementation
+       depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
        help
          Enable support for the ARM long descriptor pagetable format.
          This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
index 658ee39e65696898422bcd9c825d8a49fbc37359..f82060e778a23bb7a8901ef2356d42b5363d93a6 100644 (file)
@@ -1835,8 +1835,8 @@ static void free_gcr3_table(struct protection_domain *domain)
                free_gcr3_tbl_level2(domain->gcr3_tbl);
        else if (domain->glx == 1)
                free_gcr3_tbl_level1(domain->gcr3_tbl);
-       else if (domain->glx != 0)
-               BUG();
+       else
+               BUG_ON(domain->glx != 0);
 
        free_page((unsigned long)domain->gcr3_tbl);
 }
@@ -3947,11 +3947,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
        if (ret < 0)
                return ret;
 
-       ret = -ENOMEM;
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
-       if (!data)
-               goto out_free_parent;
-
        if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
                if (get_irq_table(devid, true))
                        index = info->ioapic_pin;
@@ -3962,7 +3957,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
        }
        if (index < 0) {
                pr_warn("Failed to allocate IRTE\n");
-               kfree(data);
                goto out_free_parent;
        }
 
@@ -3974,17 +3968,18 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
                        goto out_free_data;
                }
 
-               if (i > 0) {
-                       data = kzalloc(sizeof(*data), GFP_KERNEL);
-                       if (!data)
-                               goto out_free_data;
-               }
+               ret = -ENOMEM;
+               data = kzalloc(sizeof(*data), GFP_KERNEL);
+               if (!data)
+                       goto out_free_data;
+
                irq_data->hwirq = (devid << 16) + i;
                irq_data->chip_data = data;
                irq_data->chip = &amd_ir_chip;
                irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
                irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
        }
+
        return 0;
 
 out_free_data:
index a24495eb4e26c5c596efa79c084b14c19fe5932c..5ef347a13cb5d54789c07869b0527d81cb24365e 100644 (file)
@@ -154,7 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
 u32 amd_iommu_max_pasid __read_mostly = ~0;
 
 bool amd_iommu_v2_present __read_mostly;
-bool amd_iommu_pc_present __read_mostly;
+static bool amd_iommu_pc_present __read_mostly;
 
 bool amd_iommu_force_isolation __read_mostly;
 
index f7b875bb70d42138027f49ebde8150d27ce14cd2..1131664b918b0a574c7cc654a6a3cd04107f8e81 100644 (file)
@@ -356,8 +356,8 @@ static void free_pasid_states(struct device_state *dev_state)
                free_pasid_states_level2(dev_state->states);
        else if (dev_state->pasid_levels == 1)
                free_pasid_states_level1(dev_state->states);
-       else if (dev_state->pasid_levels != 0)
-               BUG();
+       else
+               BUG_ON(dev_state->pasid_levels != 0);
 
        free_page((unsigned long)dev_state->states);
 }
index da902baaa7946aac569b7ebe8a316c647dfd8187..dafaf59dc3b82833fb78d55e8f194ff728999d35 100644 (file)
 
 #define ARM_SMMU_IRQ_CTRL              0x50
 #define IRQ_CTRL_EVTQ_IRQEN            (1 << 2)
+#define IRQ_CTRL_PRIQ_IRQEN            (1 << 1)
 #define IRQ_CTRL_GERROR_IRQEN          (1 << 0)
 
 #define ARM_SMMU_IRQ_CTRLACK           0x54
 #define ARM_SMMU_PRIQ_IRQ_CFG2         0xdc
 
 /* Common MSI config fields */
-#define MSI_CFG0_SH_SHIFT              60
-#define MSI_CFG0_SH_NSH                        (0UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_OSH                        (2UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_SH_ISH                        (3UL << MSI_CFG0_SH_SHIFT)
-#define MSI_CFG0_MEMATTR_SHIFT         56
-#define MSI_CFG0_MEMATTR_DEVICE_nGnRE  (0x1 << MSI_CFG0_MEMATTR_SHIFT)
 #define MSI_CFG0_ADDR_SHIFT            2
 #define MSI_CFG0_ADDR_MASK             0x3fffffffffffUL
+#define MSI_CFG2_SH_SHIFT              4
+#define MSI_CFG2_SH_NSH                        (0UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_OSH                        (2UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_SH_ISH                        (3UL << MSI_CFG2_SH_SHIFT)
+#define MSI_CFG2_MEMATTR_SHIFT         0
+#define MSI_CFG2_MEMATTR_DEVICE_nGnRE  (0x1 << MSI_CFG2_MEMATTR_SHIFT)
 
 #define Q_IDX(q, p)                    ((p) & ((1 << (q)->max_n_shift) - 1))
 #define Q_WRP(q, p)                    ((p) & (1 << (q)->max_n_shift))
@@ -1330,33 +1331,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        arm_smmu_cmdq_issue_cmd(smmu, &cmd);
 }
 
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
-       struct arm_smmu_domain *smmu_domain = cookie;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-       if (smmu->features & ARM_SMMU_FEAT_COHERENCY) {
-               dsb(ishst);
-       } else {
-               dma_addr_t dma_addr;
-               struct device *dev = smmu->dev;
-
-               dma_addr = dma_map_page(dev, virt_to_page(addr), offset, size,
-                                       DMA_TO_DEVICE);
-
-               if (dma_mapping_error(dev, dma_addr))
-                       dev_err(dev, "failed to flush pgtable at %p\n", addr);
-               else
-                       dma_unmap_page(dev, dma_addr, size, DMA_TO_DEVICE);
-       }
-}
-
 static struct iommu_gather_ops arm_smmu_gather_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync,
-       .flush_pgtable  = arm_smmu_flush_pgtable,
 };
 
 /* IOMMU API */
@@ -1531,6 +1509,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
                .ias            = ias,
                .oas            = oas,
                .tlb            = &arm_smmu_gather_ops,
+               .iommu_dev      = smmu->dev,
        };
 
        pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
@@ -2053,9 +2032,17 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
        int ret;
        struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
 
-       /* Calculate the L1 size, capped to the SIDSIZE */
-       size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
-       size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+       /*
+        * If we can resolve everything with a single L2 table, then we
+        * just need a single L1 descriptor. Otherwise, calculate the L1
+        * size, capped to the SIDSIZE.
+        */
+       if (smmu->sid_bits < STRTAB_SPLIT) {
+               size = 0;
+       } else {
+               size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+               size = min(size, smmu->sid_bits - STRTAB_SPLIT);
+       }
        cfg->num_l1_ents = 1 << size;
 
        size += STRTAB_SPLIT;
@@ -2198,6 +2185,7 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
 static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
 {
        int ret, irq;
+       u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
 
        /* Disable IRQs first */
        ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
@@ -2252,13 +2240,13 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
                        if (IS_ERR_VALUE(ret))
                                dev_warn(smmu->dev,
                                         "failed to enable priq irq\n");
+                       else
+                               irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
                }
        }
 
        /* Enable interrupt generation on the SMMU */
-       ret = arm_smmu_write_reg_sync(smmu,
-                                     IRQ_CTRL_EVTQ_IRQEN |
-                                     IRQ_CTRL_GERROR_IRQEN,
+       ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
                                      ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
        if (ret)
                dev_warn(smmu->dev, "failed to enable irqs\n");
@@ -2540,12 +2528,12 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
        case IDR5_OAS_44_BIT:
                smmu->oas = 44;
                break;
+       default:
+               dev_info(smmu->dev,
+                       "unknown output address size. Truncating to 48-bit\n");
+               /* Fallthrough */
        case IDR5_OAS_48_BIT:
                smmu->oas = 48;
-               break;
-       default:
-               dev_err(smmu->dev, "unknown output address size!\n");
-               return -ENXIO;
        }
 
        /* Set the DMA mask for our table walker */
index 4cd0c29cb585000c0e5899651948ad1dc2ffbf1f..48a39dfa977795deb8271dc5b34a7b3e0be002d0 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -607,34 +608,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
        }
 }
 
-static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
-{
-       struct arm_smmu_domain *smmu_domain = cookie;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
-
-       /* Ensure new page tables are visible to the hardware walker */
-       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
-               dsb(ishst);
-       } else {
-               /*
-                * If the SMMU can't walk tables in the CPU caches, treat them
-                * like non-coherent DMA since we need to flush the new entries
-                * all the way out to memory. There's no possibility of
-                * recursion here as the SMMU table walker will not be wired
-                * through another SMMU.
-                */
-               dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
-                            DMA_TO_DEVICE);
-       }
-}
-
 static struct iommu_gather_ops arm_smmu_gather_ops = {
        .tlb_flush_all  = arm_smmu_tlb_inv_context,
        .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
        .tlb_sync       = arm_smmu_tlb_sync,
-       .flush_pgtable  = arm_smmu_flush_pgtable,
 };
 
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
@@ -898,6 +875,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
                .ias            = ias,
                .oas            = oas,
                .tlb            = &arm_smmu_gather_ops,
+               .iommu_dev      = smmu->dev,
        };
 
        smmu_domain->smmu = smmu;
@@ -1532,6 +1510,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
        unsigned long size;
        void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
        u32 id;
+       bool cttw_dt, cttw_reg;
 
        dev_notice(smmu->dev, "probing hardware configuration...\n");
        dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
@@ -1571,10 +1550,22 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
                dev_notice(smmu->dev, "\taddress translation ops\n");
        }
 
-       if (id & ID0_CTTW) {
+       /*
+        * In order for DMA API calls to work properly, we must defer to what
+        * the DT says about coherency, regardless of what the hardware claims.
+        * Fortunately, this also opens up a workaround for systems where the
+        * ID register value has ended up configured incorrectly.
+        */
+       cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
+       cttw_reg = !!(id & ID0_CTTW);
+       if (cttw_dt)
                smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
-               dev_notice(smmu->dev, "\tcoherent table walk\n");
-       }
+       if (cttw_dt || cttw_reg)
+               dev_notice(smmu->dev, "\t%scoherent table walk\n",
+                          cttw_dt ? "" : "non-");
+       if (cttw_dt != cttw_reg)
+               dev_notice(smmu->dev,
+                          "\t(IDR0.CTTW overridden by dma-coherent property)\n");
 
        if (id & ID0_SMS) {
                u32 smr, sid, mask;
index c9db04d4ef39ae36553279859b6ca0f1c7972db1..8757f8dfc4e57afee580fc68d76b88658467dea5 100644 (file)
@@ -1068,7 +1068,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
        if (intel_iommu_enabled)
                iommu->iommu_dev = iommu_device_create(NULL, iommu,
                                                       intel_iommu_groups,
-                                                      iommu->name);
+                                                      "%s", iommu->name);
 
        return 0;
 
index abeedc9a78c27c4e8ee419b2571d9a88fbb4fa74..2570f2a25dc432606e283d1dc7dd450e6fec3bd3 100644 (file)
@@ -41,7 +41,6 @@ struct pamu_isr_data {
 
 static struct paace *ppaact;
 static struct paace *spaact;
-static struct ome *omt __initdata;
 
 /*
  * Table for matching compatible strings, for device tree
@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
  * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
  * string would be used.
  */
-static const struct of_device_id guts_device_ids[] __initconst = {
+static const struct of_device_id guts_device_ids[] = {
        { .compatible = "fsl,qoriq-device-config-1.0", },
        { .compatible = "fsl,qoriq-device-config-2.0", },
        {}
@@ -599,7 +598,7 @@ found_cpu_node:
  * Memory accesses to QMAN and BMAN private memory need not be coherent, so
  * clear the PAACE entry coherency attribute for them.
  */
-static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
+static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
 {
        switch (paace_type) {
        case QMAN_PAACE:
@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
  * this table to translate device transaction to appropriate corenet
  * transaction.
  */
-static void __init setup_omt(struct ome *omt)
+static void setup_omt(struct ome *omt)
 {
        struct ome *ome;
 
@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
  * Get the maximum number of PAACT table entries
  * and subwindows supported by PAMU
  */
-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+static void get_pamu_cap_values(unsigned long pamu_reg_base)
 {
        u32 pc_val;
 
@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
 }
 
 /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
-                                phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
-                                phys_addr_t omt_phys)
+static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+                         phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+                         phys_addr_t omt_phys)
 {
        u32 *pc;
        struct pamu_mmap_regs *pamu_regs;
@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
 }
 
 /* Enable all device LIODNS */
-static void __init setup_liodns(void)
+static void setup_liodns(void)
 {
        int i, len;
        struct paace *ppaace;
@@ -846,7 +845,7 @@ struct ccsr_law {
 /*
  * Create a coherence subdomain for a given memory block.
  */
-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
 {
        struct device_node *np;
        const __be32 *iprop;
@@ -988,7 +987,7 @@ error:
 static const struct {
        u32 svr;
        u32 port_id;
-} port_id_map[] __initconst = {
+} port_id_map[] = {
        {(SVR_P2040 << 8) | 0x10, 0xFF000000},  /* P2040 1.0 */
        {(SVR_P2040 << 8) | 0x11, 0xFF000000},  /* P2040 1.1 */
        {(SVR_P2041 << 8) | 0x10, 0xFF000000},  /* P2041 1.0 */
@@ -1006,7 +1005,7 @@ static const struct {
 
 #define SVR_SECURITY   0x80000 /* The Security (E) bit */
 
-static int __init fsl_pamu_probe(struct platform_device *pdev)
+static int fsl_pamu_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        void __iomem *pamu_regs = NULL;
@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
        int irq;
        phys_addr_t ppaact_phys;
        phys_addr_t spaact_phys;
+       struct ome *omt;
        phys_addr_t omt_phys;
        size_t mem_size = 0;
        unsigned int order = 0;
@@ -1200,7 +1200,7 @@ error:
        return ret;
 }
 
-static struct platform_driver fsl_of_pamu_driver __initdata = {
+static struct platform_driver fsl_of_pamu_driver = {
        .driver = {
                .name = "fsl-of-pamu",
        },
index 0649b94f59584ca5b885cd0ecad595a84af0d89d..63daf1ba04b7ed5d491a78344589a6bb45abe0c4 100644 (file)
@@ -364,7 +364,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
 static struct dmar_domain *si_domain;
 static int hw_pass_through = 1;
 
-/* domain represents a virtual machine, more than one devices
+/*
+ * Domain represents a virtual machine, more than one devices
  * across iommus may be owned in one domain, e.g. kvm guest.
  */
 #define DOMAIN_FLAG_VIRTUAL_MACHINE    (1 << 0)
@@ -372,11 +373,21 @@ static int hw_pass_through = 1;
 /* si_domain contains mulitple devices */
 #define DOMAIN_FLAG_STATIC_IDENTITY    (1 << 1)
 
+#define for_each_domain_iommu(idx, domain)                     \
+       for (idx = 0; idx < g_num_of_iommus; idx++)             \
+               if (domain->iommu_refcnt[idx])
+
 struct dmar_domain {
-       int     id;                     /* domain id */
        int     nid;                    /* node id */
-       DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
-                                       /* bitmap of iommus this domain uses*/
+
+       unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
+                                       /* Refcount of devices per iommu */
+
+
+       u16             iommu_did[DMAR_UNITS_SUPPORTED];
+                                       /* Domain ids per IOMMU. Use u16 since
+                                        * domain ids are 16 bit wide according
+                                        * to VT-d spec, section 9.3 */
 
        struct list_head devices;       /* all devices' list */
        struct iova_domain iovad;       /* iova's that belong to this domain */
@@ -395,7 +406,6 @@ struct dmar_domain {
        int             iommu_superpage;/* Level of superpages supported:
                                           0 == 4KiB (no superpages), 1 == 2MiB,
                                           2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
-       spinlock_t      iommu_lock;     /* protect iommu set in domain */
        u64             max_addr;       /* maximum mapped address */
 
        struct iommu_domain domain;     /* generic domain data structure for
@@ -461,10 +471,11 @@ static long list_size;
 
 static void domain_exit(struct dmar_domain *domain);
 static void domain_remove_dev_info(struct dmar_domain *domain);
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-                                      struct device *dev);
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-                                          struct device *dev);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+                                    struct device *dev);
+static void __dmar_remove_one_dev_info(struct device_domain_info *info);
+static void domain_context_clear(struct intel_iommu *iommu,
+                                struct device *dev);
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu);
 
@@ -564,6 +575,36 @@ __setup("intel_iommu=", intel_iommu_setup);
 static struct kmem_cache *iommu_domain_cache;
 static struct kmem_cache *iommu_devinfo_cache;
 
+static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
+{
+       struct dmar_domain **domains;
+       int idx = did >> 8;
+
+       domains = iommu->domains[idx];
+       if (!domains)
+               return NULL;
+
+       return domains[did & 0xff];
+}
+
+static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
+                            struct dmar_domain *domain)
+{
+       struct dmar_domain **domains;
+       int idx = did >> 8;
+
+       if (!iommu->domains[idx]) {
+               size_t size = 256 * sizeof(struct dmar_domain *);
+               iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
+       }
+
+       domains = iommu->domains[idx];
+       if (WARN_ON(!domains))
+               return;
+       else
+               domains[did & 0xff] = domain;
+}
+
 static inline void *alloc_pgtable_page(int node)
 {
        struct page *page;
@@ -605,6 +646,11 @@ static inline int domain_type_is_vm(struct dmar_domain *domain)
        return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
 }
 
+static inline int domain_type_is_si(struct dmar_domain *domain)
+{
+       return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
+}
+
 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
 {
        return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
@@ -659,7 +705,9 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
 
        /* si_domain and vm domain should not get here. */
        BUG_ON(domain_type_is_vm_or_si(domain));
-       iommu_id = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+       for_each_domain_iommu(iommu_id, domain)
+               break;
+
        if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
                return NULL;
 
@@ -675,7 +723,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
 
        domain->iommu_coherency = 1;
 
-       for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
+       for_each_domain_iommu(i, domain) {
                found = true;
                if (!ecap_coherent(g_iommus[i]->ecap)) {
                        domain->iommu_coherency = 0;
@@ -755,6 +803,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
        struct context_entry *context;
        u64 *entry;
 
+       entry = &root->lo;
        if (ecs_enabled(iommu)) {
                if (devfn >= 0x80) {
                        devfn -= 0x80;
@@ -762,7 +811,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
                }
                devfn *= 2;
        }
-       entry = &root->lo;
        if (*entry & 1)
                context = phys_to_virt(*entry & VTD_PAGE_MASK);
        else {
@@ -1162,9 +1210,9 @@ next:
 /* We can't just free the pages because the IOMMU may still be walking
    the page tables, and may have cached the intermediate levels. The
    pages can only be freed after the IOTLB flush has been done. */
-struct page *domain_unmap(struct dmar_domain *domain,
-                         unsigned long start_pfn,
-                         unsigned long last_pfn)
+static struct page *domain_unmap(struct dmar_domain *domain,
+                                unsigned long start_pfn,
+                                unsigned long last_pfn)
 {
        struct page *freelist = NULL;
 
@@ -1188,7 +1236,7 @@ struct page *domain_unmap(struct dmar_domain *domain,
        return freelist;
 }
 
-void dma_free_pagelist(struct page *freelist)
+static void dma_free_pagelist(struct page *freelist)
 {
        struct page *pg;
 
@@ -1356,24 +1404,23 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
                         u8 bus, u8 devfn)
 {
        bool found = false;
-       unsigned long flags;
        struct device_domain_info *info;
        struct pci_dev *pdev;
 
+       assert_spin_locked(&device_domain_lock);
+
        if (!ecap_dev_iotlb_support(iommu->ecap))
                return NULL;
 
        if (!iommu->qi)
                return NULL;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
        list_for_each_entry(info, &domain->devices, link)
                if (info->iommu == iommu && info->bus == bus &&
                    info->devfn == devfn) {
                        found = true;
                        break;
                }
-       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        if (!found || !info->dev || !dev_is_pci(info->dev))
                return NULL;
@@ -1430,11 +1477,14 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
-static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
-                                 unsigned long pfn, unsigned int pages, int ih, int map)
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
+                                 struct dmar_domain *domain,
+                                 unsigned long pfn, unsigned int pages,
+                                 int ih, int map)
 {
        unsigned int mask = ilog2(__roundup_pow_of_two(pages));
        uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
+       u16 did = domain->iommu_did[iommu->seq_id];
 
        BUG_ON(pages == 0);
 
@@ -1458,7 +1508,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
         * flush. However, device IOTLB doesn't need to be flushed in this case.
         */
        if (!cap_caching_mode(iommu->cap) || !map)
-               iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
+               iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
+                                     addr, mask);
 }
 
 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1513,65 +1564,80 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
 
 static int iommu_init_domains(struct intel_iommu *iommu)
 {
-       unsigned long ndomains;
-       unsigned long nlongs;
+       u32 ndomains, nlongs;
+       size_t size;
 
        ndomains = cap_ndoms(iommu->cap);
-       pr_debug("%s: Number of Domains supported <%ld>\n",
+       pr_debug("%s: Number of Domains supported <%d>\n",
                 iommu->name, ndomains);
        nlongs = BITS_TO_LONGS(ndomains);
 
        spin_lock_init(&iommu->lock);
 
-       /* TBD: there might be 64K domains,
-        * consider other allocation for future chip
-        */
        iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
        if (!iommu->domain_ids) {
                pr_err("%s: Allocating domain id array failed\n",
                       iommu->name);
                return -ENOMEM;
        }
-       iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
-                       GFP_KERNEL);
-       if (!iommu->domains) {
+
+       size = ((ndomains >> 8) + 1) * sizeof(struct dmar_domain **);
+       iommu->domains = kzalloc(size, GFP_KERNEL);
+
+       if (iommu->domains) {
+               size = 256 * sizeof(struct dmar_domain *);
+               iommu->domains[0] = kzalloc(size, GFP_KERNEL);
+       }
+
+       if (!iommu->domains || !iommu->domains[0]) {
                pr_err("%s: Allocating domain array failed\n",
                       iommu->name);
                kfree(iommu->domain_ids);
+               kfree(iommu->domains);
                iommu->domain_ids = NULL;
+               iommu->domains    = NULL;
                return -ENOMEM;
        }
 
+
+
        /*
-        * if Caching mode is set, then invalid translations are tagged
-        * with domainid 0. Hence we need to pre-allocate it.
+        * If Caching mode is set, then invalid translations are tagged
+        * with domain-id 0, hence we need to pre-allocate it. We also
+        * use domain-id 0 as a marker for non-allocated domain-id, so
+        * make sure it is not used for a real domain.
         */
-       if (cap_caching_mode(iommu->cap))
-               set_bit(0, iommu->domain_ids);
+       set_bit(0, iommu->domain_ids);
+
        return 0;
 }
 
 static void disable_dmar_iommu(struct intel_iommu *iommu)
 {
-       struct dmar_domain *domain;
-       int i;
+       struct device_domain_info *info, *tmp;
+       unsigned long flags;
 
-       if ((iommu->domains) && (iommu->domain_ids)) {
-               for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) {
-                       /*
-                        * Domain id 0 is reserved for invalid translation
-                        * if hardware supports caching mode.
-                        */
-                       if (cap_caching_mode(iommu->cap) && i == 0)
-                               continue;
+       if (!iommu->domains || !iommu->domain_ids)
+               return;
 
-                       domain = iommu->domains[i];
-                       clear_bit(i, iommu->domain_ids);
-                       if (domain_detach_iommu(domain, iommu) == 0 &&
-                           !domain_type_is_vm(domain))
-                               domain_exit(domain);
-               }
+       spin_lock_irqsave(&device_domain_lock, flags);
+       list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
+               struct dmar_domain *domain;
+
+               if (info->iommu != iommu)
+                       continue;
+
+               if (!info->dev || !info->domain)
+                       continue;
+
+               domain = info->domain;
+
+               dmar_remove_one_dev_info(domain, info->dev);
+
+               if (!domain_type_is_vm_or_si(domain))
+                       domain_exit(domain);
        }
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        if (iommu->gcmd & DMA_GCMD_TE)
                iommu_disable_translation(iommu);
@@ -1580,6 +1646,11 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
 static void free_dmar_iommu(struct intel_iommu *iommu)
 {
        if ((iommu->domains) && (iommu->domain_ids)) {
+               int elems = (cap_ndoms(iommu->cap) >> 8) + 1;
+               int i;
+
+               for (i = 0; i < elems; i++)
+                       kfree(iommu->domains[i]);
                kfree(iommu->domains);
                kfree(iommu->domain_ids);
                iommu->domains = NULL;
@@ -1594,8 +1665,6 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
 
 static struct dmar_domain *alloc_domain(int flags)
 {
-       /* domain id for virtual machine, it won't be set in context */
-       static atomic_t vm_domid = ATOMIC_INIT(0);
        struct dmar_domain *domain;
 
        domain = alloc_domain_mem();
@@ -1605,111 +1674,64 @@ static struct dmar_domain *alloc_domain(int flags)
        memset(domain, 0, sizeof(*domain));
        domain->nid = -1;
        domain->flags = flags;
-       spin_lock_init(&domain->iommu_lock);
        INIT_LIST_HEAD(&domain->devices);
-       if (flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
-               domain->id = atomic_inc_return(&vm_domid);
 
        return domain;
 }
 
-static int __iommu_attach_domain(struct dmar_domain *domain,
-                                struct intel_iommu *iommu)
-{
-       int num;
-       unsigned long ndomains;
-
-       ndomains = cap_ndoms(iommu->cap);
-       num = find_first_zero_bit(iommu->domain_ids, ndomains);
-       if (num < ndomains) {
-               set_bit(num, iommu->domain_ids);
-               iommu->domains[num] = domain;
-       } else {
-               num = -ENOSPC;
-       }
-
-       return num;
-}
-
-static int iommu_attach_domain(struct dmar_domain *domain,
+/* Must be called with iommu->lock */
+static int domain_attach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu)
 {
-       int num;
-       unsigned long flags;
-
-       spin_lock_irqsave(&iommu->lock, flags);
-       num = __iommu_attach_domain(domain, iommu);
-       spin_unlock_irqrestore(&iommu->lock, flags);
-       if (num < 0)
-               pr_err("%s: No free domain ids\n", iommu->name);
-
-       return num;
-}
-
-static int iommu_attach_vm_domain(struct dmar_domain *domain,
-                                 struct intel_iommu *iommu)
-{
-       int num;
        unsigned long ndomains;
+       int num;
 
-       ndomains = cap_ndoms(iommu->cap);
-       for_each_set_bit(num, iommu->domain_ids, ndomains)
-               if (iommu->domains[num] == domain)
-                       return num;
-
-       return __iommu_attach_domain(domain, iommu);
-}
-
-static void iommu_detach_domain(struct dmar_domain *domain,
-                               struct intel_iommu *iommu)
-{
-       unsigned long flags;
-       int num, ndomains;
+       assert_spin_locked(&device_domain_lock);
+       assert_spin_locked(&iommu->lock);
 
-       spin_lock_irqsave(&iommu->lock, flags);
-       if (domain_type_is_vm_or_si(domain)) {
+       domain->iommu_refcnt[iommu->seq_id] += 1;
+       domain->iommu_count += 1;
+       if (domain->iommu_refcnt[iommu->seq_id] == 1) {
                ndomains = cap_ndoms(iommu->cap);
-               for_each_set_bit(num, iommu->domain_ids, ndomains) {
-                       if (iommu->domains[num] == domain) {
-                               clear_bit(num, iommu->domain_ids);
-                               iommu->domains[num] = NULL;
-                               break;
-                       }
+               num      = find_first_zero_bit(iommu->domain_ids, ndomains);
+
+               if (num >= ndomains) {
+                       pr_err("%s: No free domain ids\n", iommu->name);
+                       domain->iommu_refcnt[iommu->seq_id] -= 1;
+                       domain->iommu_count -= 1;
+                       return -ENOSPC;
                }
-       } else {
-               clear_bit(domain->id, iommu->domain_ids);
-               iommu->domains[domain->id] = NULL;
-       }
-       spin_unlock_irqrestore(&iommu->lock, flags);
-}
 
-static void domain_attach_iommu(struct dmar_domain *domain,
-                              struct intel_iommu *iommu)
-{
-       unsigned long flags;
+               set_bit(num, iommu->domain_ids);
+               set_iommu_domain(iommu, num, domain);
+
+               domain->iommu_did[iommu->seq_id] = num;
+               domain->nid                      = iommu->node;
 
-       spin_lock_irqsave(&domain->iommu_lock, flags);
-       if (!test_and_set_bit(iommu->seq_id, domain->iommu_bmp)) {
-               domain->iommu_count++;
-               if (domain->iommu_count == 1)
-                       domain->nid = iommu->node;
                domain_update_iommu_cap(domain);
        }
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
+
+       return 0;
 }
 
 static int domain_detach_iommu(struct dmar_domain *domain,
                               struct intel_iommu *iommu)
 {
-       unsigned long flags;
-       int count = INT_MAX;
+       int num, count = INT_MAX;
+
+       assert_spin_locked(&device_domain_lock);
+       assert_spin_locked(&iommu->lock);
+
+       domain->iommu_refcnt[iommu->seq_id] -= 1;
+       count = --domain->iommu_count;
+       if (domain->iommu_refcnt[iommu->seq_id] == 0) {
+               num = domain->iommu_did[iommu->seq_id];
+               clear_bit(num, iommu->domain_ids);
+               set_iommu_domain(iommu, num, NULL);
 
-       spin_lock_irqsave(&domain->iommu_lock, flags);
-       if (test_and_clear_bit(iommu->seq_id, domain->iommu_bmp)) {
-               count = --domain->iommu_count;
                domain_update_iommu_cap(domain);
+               domain->iommu_did[iommu->seq_id] = 0;
        }
-       spin_unlock_irqrestore(&domain->iommu_lock, flags);
 
        return count;
 }
@@ -1776,9 +1798,9 @@ static inline int guestwidth_to_adjustwidth(int gaw)
        return agaw;
 }
 
-static int domain_init(struct dmar_domain *domain, int guest_width)
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+                      int guest_width)
 {
-       struct intel_iommu *iommu;
        int adjust_width, agaw;
        unsigned long sagaw;
 
@@ -1787,7 +1809,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
        domain_reserve_special_ranges(domain);
 
        /* calculate AGAW */
-       iommu = domain_get_iommu(domain);
        if (guest_width > cap_mgaw(iommu->cap))
                guest_width = cap_mgaw(iommu->cap);
        domain->gaw = guest_width;
@@ -1830,8 +1851,6 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
 
 static void domain_exit(struct dmar_domain *domain)
 {
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
        struct page *freelist = NULL;
 
        /* Domain 0 is reserved, so dont process it */
@@ -1842,22 +1861,16 @@ static void domain_exit(struct dmar_domain *domain)
        if (!intel_iommu_strict)
                flush_unmaps_timeout(0);
 
-       /* remove associated devices */
+       /* Remove associated devices and clear attached or cached domains */
+       rcu_read_lock();
        domain_remove_dev_info(domain);
+       rcu_read_unlock();
 
        /* destroy iovas */
        put_iova_domain(&domain->iovad);
 
        freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
 
-       /* clear attached or cached domains */
-       rcu_read_lock();
-       for_each_active_iommu(iommu, drhd)
-               if (domain_type_is_vm(domain) ||
-                   test_bit(iommu->seq_id, domain->iommu_bmp))
-                       iommu_detach_domain(domain, iommu);
-       rcu_read_unlock();
-
        dma_free_pagelist(freelist);
 
        free_domain_mem(domain);
@@ -1865,79 +1878,68 @@ static void domain_exit(struct dmar_domain *domain)
 
 static int domain_context_mapping_one(struct dmar_domain *domain,
                                      struct intel_iommu *iommu,
-                                     u8 bus, u8 devfn, int translation)
+                                     u8 bus, u8 devfn)
 {
+       u16 did = domain->iommu_did[iommu->seq_id];
+       int translation = CONTEXT_TT_MULTI_LEVEL;
+       struct device_domain_info *info = NULL;
        struct context_entry *context;
        unsigned long flags;
        struct dma_pte *pgd;
-       int id;
-       int agaw;
-       struct device_domain_info *info = NULL;
+       int ret, agaw;
+
+       WARN_ON(did == 0);
+
+       if (hw_pass_through && domain_type_is_si(domain))
+               translation = CONTEXT_TT_PASS_THROUGH;
 
        pr_debug("Set context mapping for %02x:%02x.%d\n",
                bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
 
        BUG_ON(!domain->pgd);
-       BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
-              translation != CONTEXT_TT_MULTI_LEVEL);
 
-       spin_lock_irqsave(&iommu->lock, flags);
+       spin_lock_irqsave(&device_domain_lock, flags);
+       spin_lock(&iommu->lock);
+
+       ret = -ENOMEM;
        context = iommu_context_addr(iommu, bus, devfn, 1);
-       spin_unlock_irqrestore(&iommu->lock, flags);
        if (!context)
-               return -ENOMEM;
-       spin_lock_irqsave(&iommu->lock, flags);
-       if (context_present(context)) {
-               spin_unlock_irqrestore(&iommu->lock, flags);
-               return 0;
-       }
+               goto out_unlock;
 
-       context_clear_entry(context);
+       ret = 0;
+       if (context_present(context))
+               goto out_unlock;
 
-       id = domain->id;
        pgd = domain->pgd;
 
-       if (domain_type_is_vm_or_si(domain)) {
-               if (domain_type_is_vm(domain)) {
-                       id = iommu_attach_vm_domain(domain, iommu);
-                       if (id < 0) {
-                               spin_unlock_irqrestore(&iommu->lock, flags);
-                               pr_err("%s: No free domain ids\n", iommu->name);
-                               return -EFAULT;
-                       }
-               }
+       context_clear_entry(context);
+       context_set_domain_id(context, did);
 
-               /* Skip top levels of page tables for
-                * iommu which has less agaw than default.
-                * Unnecessary for PT mode.
-                */
-               if (translation != CONTEXT_TT_PASS_THROUGH) {
-                       for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
-                               pgd = phys_to_virt(dma_pte_addr(pgd));
-                               if (!dma_pte_present(pgd)) {
-                                       spin_unlock_irqrestore(&iommu->lock, flags);
-                                       return -ENOMEM;
-                               }
-                       }
+       /*
+        * Skip top levels of page tables for iommu which has less agaw
+        * than default.  Unnecessary for PT mode.
+        */
+       if (translation != CONTEXT_TT_PASS_THROUGH) {
+               for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+                       ret = -ENOMEM;
+                       pgd = phys_to_virt(dma_pte_addr(pgd));
+                       if (!dma_pte_present(pgd))
+                               goto out_unlock;
                }
-       }
-
-       context_set_domain_id(context, id);
 
-       if (translation != CONTEXT_TT_PASS_THROUGH) {
                info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
                translation = info ? CONTEXT_TT_DEV_IOTLB :
                                     CONTEXT_TT_MULTI_LEVEL;
-       }
-       /*
-        * In pass through mode, AW must be programmed to indicate the largest
-        * AGAW value supported by hardware. And ASR is ignored by hardware.
-        */
-       if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
-               context_set_address_width(context, iommu->msagaw);
-       else {
+
                context_set_address_root(context, virt_to_phys(pgd));
                context_set_address_width(context, iommu->agaw);
+       } else {
+               /*
+                * In pass through mode, AW must be programmed to
+                * indicate the largest AGAW value supported by
+                * hardware. And ASR is ignored by hardware.
+                */
+               context_set_address_width(context, iommu->msagaw);
        }
 
        context_set_translation_type(context, translation);
@@ -1956,14 +1958,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
                                           (((u16)bus) << 8) | devfn,
                                           DMA_CCMD_MASK_NOBIT,
                                           DMA_CCMD_DEVICE_INVL);
-               iommu->flush.flush_iotlb(iommu, id, 0, 0, DMA_TLB_DSI_FLUSH);
+               iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
        } else {
                iommu_flush_write_buffer(iommu);
        }
        iommu_enable_dev_iotlb(info);
-       spin_unlock_irqrestore(&iommu->lock, flags);
 
-       domain_attach_iommu(domain, iommu);
+       ret = 0;
+
+out_unlock:
+       spin_unlock(&iommu->lock);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return 0;
 }
@@ -1971,7 +1976,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
 struct domain_context_mapping_data {
        struct dmar_domain *domain;
        struct intel_iommu *iommu;
-       int translation;
 };
 
 static int domain_context_mapping_cb(struct pci_dev *pdev,
@@ -1980,13 +1984,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
        struct domain_context_mapping_data *data = opaque;
 
        return domain_context_mapping_one(data->domain, data->iommu,
-                                         PCI_BUS_NUM(alias), alias & 0xff,
-                                         data->translation);
+                                         PCI_BUS_NUM(alias), alias & 0xff);
 }
 
 static int
-domain_context_mapping(struct dmar_domain *domain, struct device *dev,
-                      int translation)
+domain_context_mapping(struct dmar_domain *domain, struct device *dev)
 {
        struct intel_iommu *iommu;
        u8 bus, devfn;
@@ -1997,12 +1999,10 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev,
                return -ENODEV;
 
        if (!dev_is_pci(dev))
-               return domain_context_mapping_one(domain, iommu, bus, devfn,
-                                                 translation);
+               return domain_context_mapping_one(domain, iommu, bus, devfn);
 
        data.domain = domain;
        data.iommu = iommu;
-       data.translation = translation;
 
        return pci_for_each_dma_alias(to_pci_dev(dev),
                                      &domain_context_mapping_cb, &data);
@@ -2188,7 +2188,7 @@ static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long i
        return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
 }
 
-static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
 {
        if (!iommu)
                return;
@@ -2214,21 +2214,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
        unsigned long flags;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry_safe(info, tmp, &domain->devices, link) {
-               unlink_domain_info(info);
-               spin_unlock_irqrestore(&device_domain_lock, flags);
-
-               iommu_disable_dev_iotlb(info);
-               iommu_detach_dev(info->iommu, info->bus, info->devfn);
-
-               if (domain_type_is_vm(domain)) {
-                       iommu_detach_dependent_devices(info->iommu, info->dev);
-                       domain_detach_iommu(domain, info->iommu);
-               }
-
-               free_devinfo_mem(info);
-               spin_lock_irqsave(&device_domain_lock, flags);
-       }
+       list_for_each_entry_safe(info, tmp, &domain->devices, link)
+               __dmar_remove_one_dev_info(info);
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
@@ -2260,14 +2247,15 @@ dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
        return NULL;
 }
 
-static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
-                                               int bus, int devfn,
-                                               struct device *dev,
-                                               struct dmar_domain *domain)
+static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
+                                                   int bus, int devfn,
+                                                   struct device *dev,
+                                                   struct dmar_domain *domain)
 {
        struct dmar_domain *found = NULL;
        struct device_domain_info *info;
        unsigned long flags;
+       int ret;
 
        info = alloc_devinfo_mem();
        if (!info)
@@ -2282,12 +2270,16 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
        spin_lock_irqsave(&device_domain_lock, flags);
        if (dev)
                found = find_domain(dev);
-       else {
+
+       if (!found) {
                struct device_domain_info *info2;
                info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
-               if (info2)
-                       found = info2->domain;
+               if (info2) {
+                       found      = info2->domain;
+                       info2->dev = dev;
+               }
        }
+
        if (found) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
                free_devinfo_mem(info);
@@ -2295,12 +2287,27 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
                return found;
        }
 
+       spin_lock(&iommu->lock);
+       ret = domain_attach_iommu(domain, iommu);
+       spin_unlock(&iommu->lock);
+
+       if (ret) {
+               spin_unlock_irqrestore(&device_domain_lock, flags);
+               return NULL;
+       }
+
        list_add(&info->link, &domain->devices);
        list_add(&info->global, &device_domain_list);
        if (dev)
                dev->archdata.iommu = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
+       if (dev && domain_context_mapping(domain, dev)) {
+               pr_err("Domain context map for %s failed\n", dev_name(dev));
+               dmar_remove_one_dev_info(domain, dev);
+               return NULL;
+       }
+
        return domain;
 }
 
@@ -2313,10 +2320,10 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
 /* domain is initialized */
 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
 {
+       struct device_domain_info *info = NULL;
        struct dmar_domain *domain, *tmp;
        struct intel_iommu *iommu;
-       struct device_domain_info *info;
-       u16 dma_alias;
+       u16 req_id, dma_alias;
        unsigned long flags;
        u8 bus, devfn;
 
@@ -2328,6 +2335,8 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        if (!iommu)
                return NULL;
 
+       req_id = ((u16)bus << 8) | devfn;
+
        if (dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(dev);
 
@@ -2352,21 +2361,15 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        domain = alloc_domain(0);
        if (!domain)
                return NULL;
-       domain->id = iommu_attach_domain(domain, iommu);
-       if (domain->id < 0) {
-               free_domain_mem(domain);
-               return NULL;
-       }
-       domain_attach_iommu(domain, iommu);
-       if (domain_init(domain, gaw)) {
+       if (domain_init(domain, iommu, gaw)) {
                domain_exit(domain);
                return NULL;
        }
 
        /* register PCI DMA alias device */
-       if (dev_is_pci(dev)) {
-               tmp = dmar_insert_dev_info(iommu, PCI_BUS_NUM(dma_alias),
-                                          dma_alias & 0xff, NULL, domain);
+       if (req_id != dma_alias && dev_is_pci(dev)) {
+               tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
+                                              dma_alias & 0xff, NULL, domain);
 
                if (!tmp || tmp != domain) {
                        domain_exit(domain);
@@ -2378,7 +2381,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
        }
 
 found_domain:
-       tmp = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+       tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
 
        if (!tmp || tmp != domain) {
                domain_exit(domain);
@@ -2406,8 +2409,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
                return -ENOMEM;
        }
 
-       pr_debug("Mapping reserved region %llx-%llx for domain %d\n",
-                start, end, domain->id);
+       pr_debug("Mapping reserved region %llx-%llx\n", start, end);
        /*
         * RMRR range might have overlap with physical memory range,
         * clear it first
@@ -2468,11 +2470,6 @@ static int iommu_prepare_identity_map(struct device *dev,
        if (ret)
                goto error;
 
-       /* context entry init */
-       ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
-       if (ret)
-               goto error;
-
        return 0;
 
  error:
@@ -2518,37 +2515,18 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width);
 
 static int __init si_domain_init(int hw)
 {
-       struct dmar_drhd_unit *drhd;
-       struct intel_iommu *iommu;
        int nid, ret = 0;
-       bool first = true;
 
        si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
        if (!si_domain)
                return -EFAULT;
 
-       for_each_active_iommu(iommu, drhd) {
-               ret = iommu_attach_domain(si_domain, iommu);
-               if (ret < 0) {
-                       domain_exit(si_domain);
-                       return -EFAULT;
-               } else if (first) {
-                       si_domain->id = ret;
-                       first = false;
-               } else if (si_domain->id != ret) {
-                       domain_exit(si_domain);
-                       return -EFAULT;
-               }
-               domain_attach_iommu(si_domain, iommu);
-       }
-
        if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
                domain_exit(si_domain);
                return -EFAULT;
        }
 
-       pr_debug("Identity mapping domain is domain %d\n",
-                si_domain->id);
+       pr_debug("Identity mapping domain allocated\n");
 
        if (hw)
                return 0;
@@ -2582,28 +2560,20 @@ static int identity_mapping(struct device *dev)
        return 0;
 }
 
-static int domain_add_dev_info(struct dmar_domain *domain,
-                              struct device *dev, int translation)
+static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
 {
        struct dmar_domain *ndomain;
        struct intel_iommu *iommu;
        u8 bus, devfn;
-       int ret;
 
        iommu = device_to_iommu(dev, &bus, &devfn);
        if (!iommu)
                return -ENODEV;
 
-       ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain);
+       ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
        if (ndomain != domain)
                return -EBUSY;
 
-       ret = domain_context_mapping(domain, dev, translation);
-       if (ret) {
-               domain_remove_one_dev_info(domain, dev);
-               return ret;
-       }
-
        return 0;
 }
 
@@ -2743,9 +2713,7 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw
        if (!iommu_should_identity_map(dev, 1))
                return 0;
 
-       ret = domain_add_dev_info(si_domain, dev,
-                                 hw ? CONTEXT_TT_PASS_THROUGH :
-                                      CONTEXT_TT_MULTI_LEVEL);
+       ret = domain_add_dev_info(si_domain, dev);
        if (!ret)
                pr_info("%s identity mapping for device %s\n",
                        hw ? "Hardware" : "Software", dev_name(dev));
@@ -2831,15 +2799,18 @@ static void intel_iommu_init_qi(struct intel_iommu *iommu)
 }
 
 static int copy_context_table(struct intel_iommu *iommu,
-                             struct root_entry *old_re,
+                             struct root_entry __iomem *old_re,
                              struct context_entry **tbl,
                              int bus, bool ext)
 {
-       struct context_entry *old_ce = NULL, *new_ce = NULL, ce;
        int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
+       struct context_entry __iomem *old_ce = NULL;
+       struct context_entry *new_ce = NULL, ce;
+       struct root_entry re;
        phys_addr_t old_ce_phys;
 
        tbl_idx = ext ? bus * 2 : bus;
+       memcpy_fromio(&re, old_re, sizeof(re));
 
        for (devfn = 0; devfn < 256; devfn++) {
                /* First calculate the correct index */
@@ -2859,9 +2830,9 @@ static int copy_context_table(struct intel_iommu *iommu,
 
                        ret = 0;
                        if (devfn < 0x80)
-                               old_ce_phys = root_entry_lctp(old_re);
+                               old_ce_phys = root_entry_lctp(&re);
                        else
-                               old_ce_phys = root_entry_uctp(old_re);
+                               old_ce_phys = root_entry_uctp(&re);
 
                        if (!old_ce_phys) {
                                if (ext && devfn == 0) {
@@ -2886,7 +2857,7 @@ static int copy_context_table(struct intel_iommu *iommu,
                }
 
                /* Now copy the context entry */
-               ce = old_ce[idx];
+               memcpy_fromio(&ce, old_ce + idx, sizeof(ce));
 
                if (!__context_present(&ce))
                        continue;
@@ -2930,8 +2901,8 @@ out:
 
 static int copy_translation_tables(struct intel_iommu *iommu)
 {
+       struct root_entry __iomem *old_rt;
        struct context_entry **ctxt_tbls;
-       struct root_entry *old_rt;
        phys_addr_t old_rt_phys;
        int ctxt_table_entries;
        unsigned long flags;
@@ -3261,7 +3232,6 @@ static struct iova *intel_alloc_iova(struct device *dev,
 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
 {
        struct dmar_domain *domain;
-       int ret;
 
        domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
        if (!domain) {
@@ -3270,16 +3240,6 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
                return NULL;
        }
 
-       /* make sure context mapping is ok */
-       if (unlikely(!domain_context_mapped(dev))) {
-               ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL);
-               if (ret) {
-                       pr_err("Domain context map for %s failed\n",
-                              dev_name(dev));
-                       return NULL;
-               }
-       }
-
        return domain;
 }
 
@@ -3315,7 +3275,7 @@ static int iommu_no_mapping(struct device *dev)
                         * 32 bit DMA is removed from si_domain and fall back
                         * to non-identity mapping.
                         */
-                       domain_remove_one_dev_info(si_domain, dev);
+                       dmar_remove_one_dev_info(si_domain, dev);
                        pr_info("32bit %s uses non-identity mapping\n",
                                dev_name(dev));
                        return 0;
@@ -3327,10 +3287,7 @@ static int iommu_no_mapping(struct device *dev)
                 */
                if (iommu_should_identity_map(dev, 0)) {
                        int ret;
-                       ret = domain_add_dev_info(si_domain, dev,
-                                                 hw_pass_through ?
-                                                 CONTEXT_TT_PASS_THROUGH :
-                                                 CONTEXT_TT_MULTI_LEVEL);
+                       ret = domain_add_dev_info(si_domain, dev);
                        if (!ret) {
                                pr_info("64bit %s uses identity mapping\n",
                                        dev_name(dev));
@@ -3391,7 +3348,9 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
 
        /* it's a non-present to present mapping. Only flush if caching mode */
        if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1);
+               iommu_flush_iotlb_psi(iommu, domain,
+                                     mm_to_dma_pfn(iova->pfn_lo),
+                                     size, 0, 1);
        else
                iommu_flush_write_buffer(iommu);
 
@@ -3442,7 +3401,7 @@ static void flush_unmaps(void)
 
                        /* On real hardware multiple invalidations are expensive */
                        if (cap_caching_mode(iommu->cap))
-                               iommu_flush_iotlb_psi(iommu, domain->id,
+                               iommu_flush_iotlb_psi(iommu, domain,
                                        iova->pfn_lo, iova_size(iova),
                                        !deferred_flush[i].freelist[j], 0);
                        else {
@@ -3526,7 +3485,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
        freelist = domain_unmap(domain, start_pfn, last_pfn);
 
        if (intel_iommu_strict) {
-               iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
+               iommu_flush_iotlb_psi(iommu, domain, start_pfn,
                                      last_pfn - start_pfn + 1, !freelist, 0);
                /* free iova */
                __free_iova(&domain->iovad, iova);
@@ -3684,7 +3643,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
 
        /* it's a non-present to present mapping. Only flush if caching mode */
        if (cap_caching_mode(iommu->cap))
-               iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1);
+               iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
        else
                iommu_flush_write_buffer(iommu);
 
@@ -4161,13 +4120,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
        iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
        iommu_enable_translation(iommu);
 
-       if (si_domain) {
-               ret = iommu_attach_domain(si_domain, iommu);
-               if (ret < 0 || si_domain->id != ret)
-                       goto disable_iommu;
-               domain_attach_iommu(si_domain, iommu);
-       }
-
        iommu_disable_protect_mem_regions(iommu);
        return 0;
 
@@ -4329,11 +4281,9 @@ static int device_notifier(struct notifier_block *nb,
        if (!domain)
                return 0;
 
-       down_read(&dmar_global_lock);
-       domain_remove_one_dev_info(domain, dev);
+       dmar_remove_one_dev_info(domain, dev);
        if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
                domain_exit(domain);
-       up_read(&dmar_global_lock);
 
        return 0;
 }
@@ -4390,7 +4340,7 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
 
                        rcu_read_lock();
                        for_each_active_iommu(iommu, drhd)
-                               iommu_flush_iotlb_psi(iommu, si_domain->id,
+                               iommu_flush_iotlb_psi(iommu, si_domain,
                                        iova->pfn_lo, iova_size(iova),
                                        !freelist, 0);
                        rcu_read_unlock();
@@ -4449,11 +4399,32 @@ static ssize_t intel_iommu_show_ecap(struct device *dev,
 }
 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
 
+static ssize_t intel_iommu_show_ndoms(struct device *dev,
+                                     struct device_attribute *attr,
+                                     char *buf)
+{
+       struct intel_iommu *iommu = dev_get_drvdata(dev);
+       return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
+}
+static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
+
+static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       struct intel_iommu *iommu = dev_get_drvdata(dev);
+       return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
+                                                 cap_ndoms(iommu->cap)));
+}
+static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
+
 static struct attribute *intel_iommu_attrs[] = {
        &dev_attr_version.attr,
        &dev_attr_address.attr,
        &dev_attr_cap.attr,
        &dev_attr_ecap.attr,
+       &dev_attr_domains_supported.attr,
+       &dev_attr_domains_used.attr,
        NULL,
 };
 
@@ -4533,7 +4504,7 @@ int __init intel_iommu_init(void)
        for_each_active_iommu(iommu, drhd)
                iommu->iommu_dev = iommu_device_create(NULL, iommu,
                                                       intel_iommu_groups,
-                                                      iommu->name);
+                                                      "%s", iommu->name);
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        bus_register_notifier(&pci_bus_type, &device_nb);
@@ -4553,11 +4524,11 @@ out_free_dmar:
        return ret;
 }
 
-static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
+static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
 {
        struct intel_iommu *iommu = opaque;
 
-       iommu_detach_dev(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+       domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
        return 0;
 }
 
@@ -4567,63 +4538,50 @@ static int iommu_detach_dev_cb(struct pci_dev *pdev, u16 alias, void *opaque)
  * devices, unbinding the driver from any one of them will possibly leave
  * the others unable to operate.
  */
-static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
-                                          struct device *dev)
+static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
 {
        if (!iommu || !dev || !dev_is_pci(dev))
                return;
 
-       pci_for_each_dma_alias(to_pci_dev(dev), &iommu_detach_dev_cb, iommu);
+       pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
 }
 
-static void domain_remove_one_dev_info(struct dmar_domain *domain,
-                                      struct device *dev)
+static void __dmar_remove_one_dev_info(struct device_domain_info *info)
 {
-       struct device_domain_info *info, *tmp;
        struct intel_iommu *iommu;
        unsigned long flags;
-       bool found = false;
-       u8 bus, devfn;
 
-       iommu = device_to_iommu(dev, &bus, &devfn);
-       if (!iommu)
+       assert_spin_locked(&device_domain_lock);
+
+       if (WARN_ON(!info))
                return;
 
-       spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry_safe(info, tmp, &domain->devices, link) {
-               if (info->iommu == iommu && info->bus == bus &&
-                   info->devfn == devfn) {
-                       unlink_domain_info(info);
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
+       iommu = info->iommu;
 
-                       iommu_disable_dev_iotlb(info);
-                       iommu_detach_dev(iommu, info->bus, info->devfn);
-                       iommu_detach_dependent_devices(iommu, dev);
-                       free_devinfo_mem(info);
+       if (info->dev) {
+               iommu_disable_dev_iotlb(info);
+               domain_context_clear(iommu, info->dev);
+       }
 
-                       spin_lock_irqsave(&device_domain_lock, flags);
+       unlink_domain_info(info);
 
-                       if (found)
-                               break;
-                       else
-                               continue;
-               }
+       spin_lock_irqsave(&iommu->lock, flags);
+       domain_detach_iommu(info->domain, iommu);
+       spin_unlock_irqrestore(&iommu->lock, flags);
 
-               /* if there is no other devices under the same iommu
-                * owned by this domain, clear this iommu in iommu_bmp
-                * update iommu count and coherency
-                */
-               if (info->iommu == iommu)
-                       found = true;
-       }
+       free_devinfo_mem(info);
+}
 
-       spin_unlock_irqrestore(&device_domain_lock, flags);
+static void dmar_remove_one_dev_info(struct dmar_domain *domain,
+                                    struct device *dev)
+{
+       struct device_domain_info *info;
+       unsigned long flags;
 
-       if (found == 0) {
-               domain_detach_iommu(domain, iommu);
-               if (!domain_type_is_vm_or_si(domain))
-                       iommu_detach_domain(domain, iommu);
-       }
+       spin_lock_irqsave(&device_domain_lock, flags);
+       info = dev->archdata.iommu;
+       __dmar_remove_one_dev_info(info);
+       spin_unlock_irqrestore(&device_domain_lock, flags);
 }
 
 static int md_domain_init(struct dmar_domain *domain, int guest_width)
@@ -4704,10 +4662,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
 
                old_domain = find_domain(dev);
                if (old_domain) {
-                       if (domain_type_is_vm_or_si(dmar_domain))
-                               domain_remove_one_dev_info(old_domain, dev);
-                       else
-                               domain_remove_dev_info(old_domain);
+                       rcu_read_lock();
+                       dmar_remove_one_dev_info(old_domain, dev);
+                       rcu_read_unlock();
 
                        if (!domain_type_is_vm_or_si(old_domain) &&
                             list_empty(&old_domain->devices))
@@ -4747,13 +4704,13 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
                dmar_domain->agaw--;
        }
 
-       return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL);
+       return domain_add_dev_info(dmar_domain, dev);
 }
 
 static void intel_iommu_detach_device(struct iommu_domain *domain,
                                      struct device *dev)
 {
-       domain_remove_one_dev_info(to_dmar_domain(domain), dev);
+       dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
 }
 
 static int intel_iommu_map(struct iommu_domain *domain,
@@ -4802,12 +4759,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
        struct intel_iommu *iommu;
        unsigned long start_pfn, last_pfn;
        unsigned int npages;
-       int iommu_id, num, ndomains, level = 0;
+       int iommu_id, level = 0;
 
        /* Cope with horrid API which requires us to unmap more than the
           size argument if it happens to be a large-page mapping. */
-       if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level))
-               BUG();
+       BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
 
        if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
                size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -4819,19 +4775,11 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
 
        npages = last_pfn - start_pfn + 1;
 
-       for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) {
-               iommu = g_iommus[iommu_id];
-
-               /*
-                * find bit position of dmar_domain
-                */
-               ndomains = cap_ndoms(iommu->cap);
-               for_each_set_bit(num, iommu->domain_ids, ndomains) {
-                       if (iommu->domains[num] == dmar_domain)
-                               iommu_flush_iotlb_psi(iommu, num, start_pfn,
-                                                    npages, !freelist, 0);
-              }
+       for_each_domain_iommu(iommu_id, dmar_domain) {
+               iommu = g_iommus[iommu_id];
 
+               iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
+                                     start_pfn, npages, !freelist, 0);
        }
 
        dma_free_pagelist(freelist);
index f15692a410c7e7064e844be2b7d83feee20ef5c5..9ec4e0d94ffd5bdd0be3be7fe6c83ae9ac37e85f 100644 (file)
@@ -384,7 +384,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
 
 static int iommu_load_old_irte(struct intel_iommu *iommu)
 {
-       struct irte *old_ir_table;
+       struct irte __iomem *old_ir_table;
        phys_addr_t irt_phys;
        unsigned int i;
        size_t size;
@@ -413,7 +413,7 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
                return -ENOMEM;
 
        /* Copy data over */
-       memcpy(iommu->ir_table->base, old_ir_table, size);
+       memcpy_fromio(iommu->ir_table->base, old_ir_table, size);
 
        __iommu_flush_cache(iommu, iommu->ir_table->base, size);
 
@@ -426,6 +426,8 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
                        bitmap_set(iommu->ir_table->bitmap, i, 1);
        }
 
+       iounmap(old_ir_table);
+
        return 0;
 }
 
index 4e460216bd1644e5bb8a26ba9a4c3891d7452393..73c07482f48763c5af3f0d43d73a2f04774bb74d 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 
+#include <asm/barrier.h>
+
 #include "io-pgtable.h"
 
 #define ARM_LPAE_MAX_ADDR_BITS         48
@@ -200,20 +202,97 @@ typedef u64 arm_lpae_iopte;
 
 static bool selftest_running = false;
 
+static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
+{
+       return phys_to_dma(dev, virt_to_phys(pages));
+}
+
+static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
+                                   struct io_pgtable_cfg *cfg)
+{
+       struct device *dev = cfg->iommu_dev;
+       dma_addr_t dma;
+       void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+
+       if (!pages)
+               return NULL;
+
+       if (!selftest_running) {
+               dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, dma))
+                       goto out_free;
+               /*
+                * We depend on the IOMMU being able to work with any physical
+                * address directly, so if the DMA layer suggests it can't by
+                * giving us back some translation, that bodes very badly...
+                */
+               if (dma != __arm_lpae_dma_addr(dev, pages))
+                       goto out_unmap;
+       }
+
+       return pages;
+
+out_unmap:
+       dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
+       dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
+out_free:
+       free_pages_exact(pages, size);
+       return NULL;
+}
+
+static void __arm_lpae_free_pages(void *pages, size_t size,
+                                 struct io_pgtable_cfg *cfg)
+{
+       struct device *dev = cfg->iommu_dev;
+
+       if (!selftest_running)
+               dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
+                                size, DMA_TO_DEVICE);
+       free_pages_exact(pages, size);
+}
+
+static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
+                              struct io_pgtable_cfg *cfg)
+{
+       struct device *dev = cfg->iommu_dev;
+
+       *ptep = pte;
+
+       if (!selftest_running)
+               dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
+                                          sizeof(pte), DMA_TO_DEVICE);
+}
+
+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
+                           unsigned long iova, size_t size, int lvl,
+                           arm_lpae_iopte *ptep);
+
 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
                             unsigned long iova, phys_addr_t paddr,
                             arm_lpae_iopte prot, int lvl,
                             arm_lpae_iopte *ptep)
 {
        arm_lpae_iopte pte = prot;
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
-       /* We require an unmap first */
        if (iopte_leaf(*ptep, lvl)) {
+               /* We require an unmap first */
                WARN_ON(!selftest_running);
                return -EEXIST;
+       } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
+               /*
+                * We need to unmap and free the old table before
+                * overwriting it with a block entry.
+                */
+               arm_lpae_iopte *tblp;
+               size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
+
+               tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
+               if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
+                       return -EINVAL;
        }
 
-       if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+       if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
                pte |= ARM_LPAE_PTE_NS;
 
        if (lvl == ARM_LPAE_MAX_LEVELS - 1)
@@ -224,8 +303,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
        pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
        pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
 
-       *ptep = pte;
-       data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
+       __arm_lpae_set_pte(ptep, pte, cfg);
        return 0;
 }
 
@@ -234,14 +312,14 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
                          int lvl, arm_lpae_iopte *ptep)
 {
        arm_lpae_iopte *cptep, pte;
-       void *cookie = data->iop.cookie;
        size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
        /* Find our entry at the current level */
        ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
        /* If we can install a leaf entry at this level, then do so */
-       if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
+       if (size == block_size && (size & cfg->pgsize_bitmap))
                return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
 
        /* We can't allocate tables at the final level */
@@ -251,18 +329,15 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
        /* Grab a pointer to the next level */
        pte = *ptep;
        if (!pte) {
-               cptep = alloc_pages_exact(1UL << data->pg_shift,
-                                        GFP_ATOMIC | __GFP_ZERO);
+               cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
+                                              GFP_ATOMIC, cfg);
                if (!cptep)
                        return -ENOMEM;
 
-               data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
-                                                cookie);
                pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
-               if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+               if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
                        pte |= ARM_LPAE_PTE_NSTABLE;
-               *ptep = pte;
-               data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+               __arm_lpae_set_pte(ptep, pte, cfg);
        } else {
                cptep = iopte_deref(pte, data);
        }
@@ -309,7 +384,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
 {
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        arm_lpae_iopte *ptep = data->pgd;
-       int lvl = ARM_LPAE_START_LVL(data);
+       int ret, lvl = ARM_LPAE_START_LVL(data);
        arm_lpae_iopte prot;
 
        /* If no access, then nothing to do */
@@ -317,7 +392,14 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
                return 0;
 
        prot = arm_lpae_prot_to_pte(data, iommu_prot);
-       return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+       ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+       /*
+        * Synchronise all PTE updates for the new mapping before there's
+        * a chance for anything to kick off a table walk for the new iova.
+        */
+       wmb();
+
+       return ret;
 }
 
 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
@@ -347,7 +429,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
                __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
        }
 
-       free_pages_exact(start, table_size);
+       __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
 }
 
 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -366,8 +448,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
        unsigned long blk_start, blk_end;
        phys_addr_t blk_paddr;
        arm_lpae_iopte table = 0;
-       void *cookie = data->iop.cookie;
-       const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
+       struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
        blk_start = iova & ~(blk_size - 1);
        blk_end = blk_start + blk_size;
@@ -393,10 +474,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
                }
        }
 
-       *ptep = table;
-       tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+       __arm_lpae_set_pte(ptep, table, cfg);
        iova &= ~(blk_size - 1);
-       tlb->tlb_add_flush(iova, blk_size, true, cookie);
+       cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
        return size;
 }
 
@@ -418,13 +498,12 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 
        /* If the size matches this level, we're in the right place */
        if (size == blk_size) {
-               *ptep = 0;
-               tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
+               __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
 
                if (!iopte_leaf(pte, lvl)) {
                        /* Also flush any partial walks */
                        tlb->tlb_add_flush(iova, size, false, cookie);
-                       tlb->tlb_sync(data->iop.cookie);
+                       tlb->tlb_sync(cookie);
                        ptep = iopte_deref(pte, data);
                        __arm_lpae_free_pgtable(data, lvl + 1, ptep);
                } else {
@@ -640,11 +719,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_lpae_s1_cfg.mair[1] = 0;
 
        /* Looking good; allocate a pgd */
-       data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
        if (!data->pgd)
                goto out_free_data;
 
-       cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+       /* Ensure the empty pgd is visible before any actual TTBR write */
+       wmb();
 
        /* TTBRs */
        cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
@@ -728,11 +808,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
        cfg->arm_lpae_s2_cfg.vtcr = reg;
 
        /* Allocate pgd pages */
-       data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
+       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
        if (!data->pgd)
                goto out_free_data;
 
-       cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
+       /* Ensure the empty pgd is visible before any actual TTBR write */
+       wmb();
 
        /* VTTBR */
        cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
@@ -818,16 +899,10 @@ static void dummy_tlb_sync(void *cookie)
        WARN_ON(cookie != cfg_cookie);
 }
 
-static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
-       WARN_ON(cookie != cfg_cookie);
-}
-
 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
        .tlb_flush_all  = dummy_tlb_flush_all,
        .tlb_add_flush  = dummy_tlb_add_flush,
        .tlb_sync       = dummy_tlb_sync,
-       .flush_pgtable  = dummy_flush_pgtable,
 };
 
 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
index 6436fe24bc2f6fdc0273d6017056a24f964b640e..6f2e319d4f04a58d1174984338c0af9d856e9329 100644 (file)
 
 #include "io-pgtable.h"
 
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
-extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
-
 static const struct io_pgtable_init_fns *
 io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
 {
index 10e32f69c6681368cb04023173a4bcf0b859839a..ac9e2341a633ed82420d3d06bd24cec23240b86b 100644 (file)
@@ -17,8 +17,9 @@ enum io_pgtable_fmt {
  *
  * @tlb_flush_all: Synchronously invalidate the entire TLB context.
  * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
- * @tlb_sync:      Ensure any queue TLB invalidation has taken effect.
- * @flush_pgtable: Ensure page table updates are visible to the IOMMU.
+ * @tlb_sync:      Ensure any queued TLB invalidation has taken effect, and
+ *                 any corresponding page table updates are visible to the
+ *                 IOMMU.
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
@@ -28,7 +29,6 @@ struct iommu_gather_ops {
        void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
                              void *cookie);
        void (*tlb_sync)(void *cookie);
-       void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
 };
 
 /**
@@ -41,6 +41,8 @@ struct iommu_gather_ops {
  * @ias:           Input address (iova) size, in bits.
  * @oas:           Output address (paddr) size, in bits.
  * @tlb:           TLB management callbacks for this set of tables.
+ * @iommu_dev:     The device representing the DMA configuration for the
+ *                 page table walker.
  */
 struct io_pgtable_cfg {
        #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0)        /* Set NS bit in PTEs */
@@ -49,6 +51,7 @@ struct io_pgtable_cfg {
        unsigned int                    ias;
        unsigned int                    oas;
        const struct iommu_gather_ops   *tlb;
+       struct device                   *iommu_dev;
 
        /* Low-level data specific to the table format */
        union {
@@ -140,4 +143,9 @@ struct io_pgtable_init_fns {
        void (*free)(struct io_pgtable *iop);
 };
 
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
+
 #endif /* __IO_PGTABLE_H */
index 1a67c531a07eb908519e5c130dd22e0ebf377566..8cf605fa9946013642b2a88f500beb285cc55cfc 100644 (file)
@@ -283,24 +283,10 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
        /* The hardware doesn't support selective TLB flush. */
 }
 
-static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
-{
-       unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
-       struct ipmmu_vmsa_domain *domain = cookie;
-
-       /*
-        * TODO: Add support for coherent walk through CCI with DVM and remove
-        * cache handling.
-        */
-       dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
-                    DMA_TO_DEVICE);
-}
-
 static struct iommu_gather_ops ipmmu_gather_ops = {
        .tlb_flush_all = ipmmu_tlb_flush_all,
        .tlb_add_flush = ipmmu_tlb_add_flush,
        .tlb_sync = ipmmu_tlb_flush_all,
-       .flush_pgtable = ipmmu_flush_pgtable,
 };
 
 /* -----------------------------------------------------------------------------
@@ -327,6 +313,11 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
        domain->cfg.ias = 32;
        domain->cfg.oas = 40;
        domain->cfg.tlb = &ipmmu_gather_ops;
+       /*
+        * TODO: Add support for coherent walk through CCI with DVM and remove
+        * cache handling. For now, delegate it to the io-pgtable code.
+        */
+       domain->cfg.iommu_dev = domain->mmu->dev;
 
        domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
                                           domain);
index 2d9993062ded6b2d543c89a9c09c3e6a85dfb17f..913455a5fd40e044e21ec20095296027dad3c1e6 100644 (file)
@@ -84,7 +84,7 @@ void set_irq_remapping_broken(void)
 bool irq_remapping_cap(enum irq_remap_cap cap)
 {
        if (!remap_ops || disable_irq_post)
-               return 0;
+               return false;
 
        return (remap_ops->capability & (1 << cap));
 }
index 15a2063812fa8ddf5aa2aa2eb912c526b8bc4da2..e321fa517a4526d191a6a997a361e0228305932b 100644 (file)
@@ -106,8 +106,8 @@ static int __flush_iotlb(struct iommu_domain *domain)
 #endif
 
        list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
-               if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
-                       BUG();
+
+               BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
 
                iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
                BUG_ON(!iommu_drvdata);
index f3d20a2039d20417093fd70c6192b905e7c3ccf8..0717aa96ce39bd22d1b4fe9e3a82242fa1e5403e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/io.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
 #include <linux/debugfs.h>
 #include <linux/platform_data/iommu-omap.h>
 
@@ -29,6 +30,59 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
        return !obj->domain;
 }
 
+#define pr_reg(name)                                                   \
+       do {                                                            \
+               ssize_t bytes;                                          \
+               const char *str = "%20s: %08x\n";                       \
+               const int maxcol = 32;                                  \
+               bytes = snprintf(p, maxcol, str, __stringify(name),     \
+                                iommu_read_reg(obj, MMU_##name));      \
+               p += bytes;                                             \
+               len -= bytes;                                           \
+               if (len < maxcol)                                       \
+                       goto out;                                       \
+       } while (0)
+
+static ssize_t
+omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
+{
+       char *p = buf;
+
+       pr_reg(REVISION);
+       pr_reg(IRQSTATUS);
+       pr_reg(IRQENABLE);
+       pr_reg(WALKING_ST);
+       pr_reg(CNTL);
+       pr_reg(FAULT_AD);
+       pr_reg(TTB);
+       pr_reg(LOCK);
+       pr_reg(LD_TLB);
+       pr_reg(CAM);
+       pr_reg(RAM);
+       pr_reg(GFLUSH);
+       pr_reg(FLUSH_ENTRY);
+       pr_reg(READ_CAM);
+       pr_reg(READ_RAM);
+       pr_reg(EMU_FAULT_AD);
+out:
+       return p - buf;
+}
+
+static ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf,
+                                  ssize_t bytes)
+{
+       if (!obj || !buf)
+               return -EINVAL;
+
+       pm_runtime_get_sync(obj->dev);
+
+       bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
+
+       pm_runtime_put_sync(obj->dev);
+
+       return bytes;
+}
+
 static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
                               size_t count, loff_t *ppos)
 {
@@ -55,34 +109,71 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
        return bytes;
 }
 
-static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
-                             size_t count, loff_t *ppos)
+static int
+__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
 {
-       struct omap_iommu *obj = file->private_data;
-       char *p, *buf;
-       ssize_t bytes, rest;
+       int i;
+       struct iotlb_lock saved;
+       struct cr_regs tmp;
+       struct cr_regs *p = crs;
+
+       pm_runtime_get_sync(obj->dev);
+       iotlb_lock_get(obj, &saved);
+
+       for_each_iotlb_cr(obj, num, i, tmp) {
+               if (!iotlb_cr_valid(&tmp))
+                       continue;
+               *p++ = tmp;
+       }
+
+       iotlb_lock_set(obj, &saved);
+       pm_runtime_put_sync(obj->dev);
+
+       return  p - crs;
+}
+
+static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
+                            struct seq_file *s)
+{
+       return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
+                         (cr->cam & MMU_CAM_P) ? 1 : 0);
+}
+
+static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
+{
+       int i, num;
+       struct cr_regs *cr;
+
+       num = obj->nr_tlb_entries;
+
+       cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
+       if (!cr)
+               return 0;
+
+       num = __dump_tlb_entries(obj, cr, num);
+       for (i = 0; i < num; i++)
+               iotlb_dump_cr(obj, cr + i, s);
+       kfree(cr);
+
+       return 0;
+}
+
+static int debug_read_tlb(struct seq_file *s, void *data)
+{
+       struct omap_iommu *obj = s->private;
 
        if (is_omap_iommu_detached(obj))
                return -EPERM;
 
-       buf = kmalloc(count, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       p = buf;
-
        mutex_lock(&iommu_debug_lock);
 
-       p += sprintf(p, "%8s %8s\n", "cam:", "ram:");
-       p += sprintf(p, "-----------------------------------------\n");
-       rest = count - (p - buf);
-       p += omap_dump_tlb_entries(obj, p, rest);
-
-       bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
+       seq_printf(s, "%8s %8s\n", "cam:", "ram:");
+       seq_puts(s, "-----------------------------------------\n");
+       omap_dump_tlb_entries(obj, s);
 
        mutex_unlock(&iommu_debug_lock);
-       kfree(buf);
 
-       return bytes;
+       return 0;
 }
 
 static void dump_ioptable(struct seq_file *s)
@@ -154,10 +245,10 @@ static int debug_read_pagetable(struct seq_file *s, void *data)
                .open = simple_open,                                    \
                .read = debug_read_##name,                              \
                .llseek = generic_file_llseek,                          \
-       };
+       }
 
 DEBUG_FOPS_RO(regs);
-DEBUG_FOPS_RO(tlb);
+DEBUG_SEQ_FOPS_RO(tlb);
 DEBUG_SEQ_FOPS_RO(pagetable);
 
 #define __DEBUG_ADD_FILE(attr, mode)                                   \
index a22c33d6a486c9dea9bfa820feea18a034623eec..36d0033c2ccbfc554b02b8b4183fdbde4ededffe 100644 (file)
@@ -12,7 +12,6 @@
  */
 
 #include <linux/err.h>
-#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #define to_iommu(dev)                                                  \
        ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
 
-#define for_each_iotlb_cr(obj, n, __i, cr)                             \
-       for (__i = 0;                                                   \
-            (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);   \
-            __i++)
-
 /* bitmap of the page sizes currently supported */
 #define OMAP_IOMMU_PGSIZES     (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
 
@@ -72,11 +66,6 @@ struct omap_iommu_domain {
 #define MMU_LOCK_VICT(x)       \
        ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
 
-struct iotlb_lock {
-       short base;
-       short vict;
-};
-
 static struct platform_driver omap_iommu_driver;
 static struct kmem_cache *iopte_cachep;
 
@@ -213,14 +202,6 @@ static void iommu_disable(struct omap_iommu *obj)
 /*
  *     TLB operations
  */
-static inline int iotlb_cr_valid(struct cr_regs *cr)
-{
-       if (!cr)
-               return -EINVAL;
-
-       return cr->cam & MMU_CAM_V;
-}
-
 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
 {
        u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
@@ -260,7 +241,7 @@ static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
        return status;
 }
 
-static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
 {
        u32 val;
 
@@ -268,10 +249,9 @@ static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
 
        l->base = MMU_LOCK_BASE(val);
        l->vict = MMU_LOCK_VICT(val);
-
 }
 
-static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
 {
        u32 val;
 
@@ -297,7 +277,7 @@ static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
 }
 
 /* only used in iotlb iteration for-loop */
-static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
 {
        struct cr_regs cr;
        struct iotlb_lock l;
@@ -468,129 +448,6 @@ static void flush_iotlb_all(struct omap_iommu *obj)
        pm_runtime_put_sync(obj->dev);
 }
 
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-
-#define pr_reg(name)                                                   \
-       do {                                                            \
-               ssize_t bytes;                                          \
-               const char *str = "%20s: %08x\n";                       \
-               const int maxcol = 32;                                  \
-               bytes = snprintf(p, maxcol, str, __stringify(name),     \
-                                iommu_read_reg(obj, MMU_##name));      \
-               p += bytes;                                             \
-               len -= bytes;                                           \
-               if (len < maxcol)                                       \
-                       goto out;                                       \
-       } while (0)
-
-static ssize_t
-omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
-{
-       char *p = buf;
-
-       pr_reg(REVISION);
-       pr_reg(IRQSTATUS);
-       pr_reg(IRQENABLE);
-       pr_reg(WALKING_ST);
-       pr_reg(CNTL);
-       pr_reg(FAULT_AD);
-       pr_reg(TTB);
-       pr_reg(LOCK);
-       pr_reg(LD_TLB);
-       pr_reg(CAM);
-       pr_reg(RAM);
-       pr_reg(GFLUSH);
-       pr_reg(FLUSH_ENTRY);
-       pr_reg(READ_CAM);
-       pr_reg(READ_RAM);
-       pr_reg(EMU_FAULT_AD);
-out:
-       return p - buf;
-}
-
-ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
-       if (!obj || !buf)
-               return -EINVAL;
-
-       pm_runtime_get_sync(obj->dev);
-
-       bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
-
-       pm_runtime_put_sync(obj->dev);
-
-       return bytes;
-}
-
-static int
-__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
-{
-       int i;
-       struct iotlb_lock saved;
-       struct cr_regs tmp;
-       struct cr_regs *p = crs;
-
-       pm_runtime_get_sync(obj->dev);
-       iotlb_lock_get(obj, &saved);
-
-       for_each_iotlb_cr(obj, num, i, tmp) {
-               if (!iotlb_cr_valid(&tmp))
-                       continue;
-               *p++ = tmp;
-       }
-
-       iotlb_lock_set(obj, &saved);
-       pm_runtime_put_sync(obj->dev);
-
-       return  p - crs;
-}
-
-/**
- * iotlb_dump_cr - Dump an iommu tlb entry into buf
- * @obj:       target iommu
- * @cr:                contents of cam and ram register
- * @buf:       output buffer
- **/
-static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
-                            char *buf)
-{
-       char *p = buf;
-
-       /* FIXME: Need more detail analysis of cam/ram */
-       p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
-                                       (cr->cam & MMU_CAM_P) ? 1 : 0);
-
-       return p - buf;
-}
-
-/**
- * omap_dump_tlb_entries - dump cr arrays to given buffer
- * @obj:       target iommu
- * @buf:       output buffer
- **/
-size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
-{
-       int i, num;
-       struct cr_regs *cr;
-       char *p = buf;
-
-       num = bytes / sizeof(*cr);
-       num = min(obj->nr_tlb_entries, num);
-
-       cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
-       if (!cr)
-               return 0;
-
-       num = __dump_tlb_entries(obj, cr, num);
-       for (i = 0; i < num; i++)
-               p += iotlb_dump_cr(obj, cr + i, p);
-       kfree(cr);
-
-       return p - buf;
-}
-
-#endif /* CONFIG_OMAP_IOMMU_DEBUG */
-
 /*
  *     H/W pagetable operations
  */
@@ -930,14 +787,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
 
        if (!iopgd_is_table(*iopgd)) {
                dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
-                               obj->name, errs, da, iopgd, *iopgd);
+                       obj->name, errs, da, iopgd, *iopgd);
                return IRQ_NONE;
        }
 
        iopte = iopte_offset(iopgd, da);
 
        dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
-                       obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
+               obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 
        return IRQ_NONE;
 }
@@ -963,9 +820,8 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
        struct device *dev;
        struct omap_iommu *obj;
 
-       dev = driver_find_device(&omap_iommu_driver.driver, NULL,
-                               (void *)name,
-                               device_match_by_alias);
+       dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
+                                device_match_by_alias);
        if (!dev)
                return ERR_PTR(-ENODEV);
 
@@ -1089,7 +945,6 @@ static const struct of_device_id omap_iommu_of_match[] = {
        { .compatible = "ti,dra7-iommu" },
        {},
 };
-MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
 
 static struct platform_driver omap_iommu_driver = {
        .probe  = omap_iommu_probe,
@@ -1121,7 +976,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
 }
 
 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
-                        phys_addr_t pa, size_t bytes, int prot)
+                         phys_addr_t pa, size_t bytes, int prot)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1148,7 +1003,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
 }
 
 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
-                           size_t size)
+                              size_t size)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1199,7 +1054,7 @@ out:
 }
 
 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
-                       struct device *dev)
+                                  struct device *dev)
 {
        struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
        struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
@@ -1220,7 +1075,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
 }
 
 static void omap_iommu_detach_dev(struct iommu_domain *domain,
-                                struct device *dev)
+                                 struct device *dev)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
 
@@ -1237,16 +1092,12 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
                return NULL;
 
        omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
-       if (!omap_domain) {
-               pr_err("kzalloc failed\n");
+       if (!omap_domain)
                goto out;
-       }
 
        omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
-       if (!omap_domain->pgtable) {
-               pr_err("kzalloc failed\n");
+       if (!omap_domain->pgtable)
                goto fail_nomem;
-       }
 
        /*
         * should never fail, but please keep this around to ensure
@@ -1285,7 +1136,7 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
 }
 
 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
-                                         dma_addr_t da)
+                                          dma_addr_t da)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct omap_iommu *oiommu = omap_domain->iommu_dev;
@@ -1302,7 +1153,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                        ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
                else
                        dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
-                                                       (unsigned long long)da);
+                               (unsigned long long)da);
        } else {
                if (iopgd_is_section(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
@@ -1310,7 +1161,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                        ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
                else
                        dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
-                                                       (unsigned long long)da);
+                               (unsigned long long)da);
        }
 
        return ret;
@@ -1405,20 +1256,5 @@ static int __init omap_iommu_init(void)
 
        return platform_driver_register(&omap_iommu_driver);
 }
-/* must be ready before omap3isp is probed */
 subsys_initcall(omap_iommu_init);
-
-static void __exit omap_iommu_exit(void)
-{
-       kmem_cache_destroy(iopte_cachep);
-
-       platform_driver_unregister(&omap_iommu_driver);
-
-       omap_iommu_debugfs_exit();
-}
-module_exit(omap_iommu_exit);
-
-MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
-MODULE_ALIAS("platform:omap-iommu");
-MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
-MODULE_LICENSE("GPL v2");
+/* must be ready before omap3isp is probed */
index d736630df3c8a16a1a853915be83dec437225df0..a656df2f9e03d27b2ad06cbd6f202aaecf2ee5dc 100644 (file)
 #ifndef _OMAP_IOMMU_H
 #define _OMAP_IOMMU_H
 
+#include <linux/bitops.h>
+
+#define for_each_iotlb_cr(obj, n, __i, cr)                             \
+       for (__i = 0;                                                   \
+            (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true);   \
+            __i++)
+
 struct iotlb_entry {
        u32 da;
        u32 pa;
        u32 pgsz, prsvd, valid;
-       union {
-               u16 ap;
-               struct {
-                       u32 endian, elsz, mixed;
-               };
-       };
+       u32 endian, elsz, mixed;
 };
 
 struct omap_iommu {
@@ -49,20 +51,13 @@ struct omap_iommu {
 };
 
 struct cr_regs {
-       union {
-               struct {
-                       u16 cam_l;
-                       u16 cam_h;
-               };
-               u32 cam;
-       };
-       union {
-               struct {
-                       u16 ram_l;
-                       u16 ram_h;
-               };
-               u32 ram;
-       };
+       u32 cam;
+       u32 ram;
+};
+
+struct iotlb_lock {
+       short base;
+       short vict;
 };
 
 /**
@@ -103,11 +98,11 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
  * MMU Register bit definitions
  */
 /* IRQSTATUS & IRQENABLE */
-#define MMU_IRQ_MULTIHITFAULT  (1 << 4)
-#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
-#define MMU_IRQ_EMUMISS                (1 << 2)
-#define MMU_IRQ_TRANSLATIONFAULT       (1 << 1)
-#define MMU_IRQ_TLBMISS                (1 << 0)
+#define MMU_IRQ_MULTIHITFAULT  BIT(4)
+#define MMU_IRQ_TABLEWALKFAULT BIT(3)
+#define MMU_IRQ_EMUMISS                BIT(2)
+#define MMU_IRQ_TRANSLATIONFAULT       BIT(1)
+#define MMU_IRQ_TLBMISS                BIT(0)
 
 #define __MMU_IRQ_FAULT                \
        (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
@@ -119,16 +114,16 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
 /* MMU_CNTL */
 #define MMU_CNTL_SHIFT         1
 #define MMU_CNTL_MASK          (7 << MMU_CNTL_SHIFT)
-#define MMU_CNTL_EML_TLB       (1 << 3)
-#define MMU_CNTL_TWL_EN                (1 << 2)
-#define MMU_CNTL_MMU_EN                (1 << 1)
+#define MMU_CNTL_EML_TLB       BIT(3)
+#define MMU_CNTL_TWL_EN                BIT(2)
+#define MMU_CNTL_MMU_EN                BIT(1)
 
 /* CAM */
 #define MMU_CAM_VATAG_SHIFT    12
 #define MMU_CAM_VATAG_MASK \
        ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
-#define MMU_CAM_P              (1 << 3)
-#define MMU_CAM_V              (1 << 2)
+#define MMU_CAM_P              BIT(3)
+#define MMU_CAM_V              BIT(2)
 #define MMU_CAM_PGSZ_MASK      3
 #define MMU_CAM_PGSZ_1M                (0 << 0)
 #define MMU_CAM_PGSZ_64K       (1 << 0)
@@ -141,9 +136,9 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
        ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
 
 #define MMU_RAM_ENDIAN_SHIFT   9
-#define MMU_RAM_ENDIAN_MASK    (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_MASK    BIT(MMU_RAM_ENDIAN_SHIFT)
 #define MMU_RAM_ENDIAN_LITTLE  (0 << MMU_RAM_ENDIAN_SHIFT)
-#define MMU_RAM_ENDIAN_BIG     (1 << MMU_RAM_ENDIAN_SHIFT)
+#define MMU_RAM_ENDIAN_BIG     BIT(MMU_RAM_ENDIAN_SHIFT)
 
 #define MMU_RAM_ELSZ_SHIFT     7
 #define MMU_RAM_ELSZ_MASK      (3 << MMU_RAM_ELSZ_SHIFT)
@@ -152,7 +147,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
 #define MMU_RAM_ELSZ_32                (2 << MMU_RAM_ELSZ_SHIFT)
 #define MMU_RAM_ELSZ_NONE      (3 << MMU_RAM_ELSZ_SHIFT)
 #define MMU_RAM_MIXED_SHIFT    6
-#define MMU_RAM_MIXED_MASK     (1 << MMU_RAM_MIXED_SHIFT)
+#define MMU_RAM_MIXED_MASK     BIT(MMU_RAM_MIXED_SHIFT)
 #define MMU_RAM_MIXED          MMU_RAM_MIXED_MASK
 
 #define MMU_GP_REG_BUS_ERR_BACK_EN     0x1
@@ -190,12 +185,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
 /*
  * global functions
  */
-#ifdef CONFIG_OMAP_IOMMU_DEBUG
-extern ssize_t
-omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
-extern size_t
-omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
 
+struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n);
+void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l);
+void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l);
+
+#ifdef CONFIG_OMAP_IOMMU_DEBUG
 void omap_iommu_debugfs_init(void);
 void omap_iommu_debugfs_exit(void);
 
@@ -222,4 +217,12 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
        __raw_writel(val, obj->regbase + offs);
 }
 
+static inline int iotlb_cr_valid(struct cr_regs *cr)
+{
+       if (!cr)
+               return -EINVAL;
+
+       return cr->cam & MMU_CAM_V;
+}
+
 #endif /* _OMAP_IOMMU_H */
index f891683e3f05af915738151a1a7574268fe84b3b..01a315227bf052d03a0c1f72e6e4e48c6121b201 100644 (file)
  * published by the Free Software Foundation.
  */
 
+#ifndef _OMAP_IOPGTABLE_H
+#define _OMAP_IOPGTABLE_H
+
+#include <linux/bitops.h>
+
 /*
  * "L2 table" address mask and size definitions.
  */
 #define IOPGD_SHIFT            20
-#define IOPGD_SIZE             (1UL << IOPGD_SHIFT)
+#define IOPGD_SIZE             BIT(IOPGD_SHIFT)
 #define IOPGD_MASK             (~(IOPGD_SIZE - 1))
 
 /*
  * "section" address mask and size definitions.
  */
 #define IOSECTION_SHIFT                20
-#define IOSECTION_SIZE         (1UL << IOSECTION_SHIFT)
+#define IOSECTION_SIZE         BIT(IOSECTION_SHIFT)
 #define IOSECTION_MASK         (~(IOSECTION_SIZE - 1))
 
 /*
  * "supersection" address mask and size definitions.
  */
 #define IOSUPER_SHIFT          24
-#define IOSUPER_SIZE           (1UL << IOSUPER_SHIFT)
+#define IOSUPER_SIZE           BIT(IOSUPER_SHIFT)
 #define IOSUPER_MASK           (~(IOSUPER_SIZE - 1))
 
 #define PTRS_PER_IOPGD         (1UL << (32 - IOPGD_SHIFT))
  * "small page" address mask and size definitions.
  */
 #define IOPTE_SHIFT            12
-#define IOPTE_SIZE             (1UL << IOPTE_SHIFT)
+#define IOPTE_SIZE             BIT(IOPTE_SHIFT)
 #define IOPTE_MASK             (~(IOPTE_SIZE - 1))
 
 /*
  * "large page" address mask and size definitions.
  */
 #define IOLARGE_SHIFT          16
-#define IOLARGE_SIZE           (1UL << IOLARGE_SHIFT)
+#define IOLARGE_SIZE           BIT(IOLARGE_SHIFT)
 #define IOLARGE_MASK           (~(IOLARGE_SIZE - 1))
 
 #define PTRS_PER_IOPTE         (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
@@ -69,16 +74,16 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 /*
  * some descriptor attributes.
  */
-#define IOPGD_TABLE            (1 << 0)
-#define IOPGD_SECTION          (2 << 0)
-#define IOPGD_SUPER            (1 << 18 | 2 << 0)
+#define IOPGD_TABLE            (1)
+#define IOPGD_SECTION          (2)
+#define IOPGD_SUPER            (BIT(18) | IOPGD_SECTION)
 
 #define iopgd_is_table(x)      (((x) & 3) == IOPGD_TABLE)
 #define iopgd_is_section(x)    (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
 #define iopgd_is_super(x)      (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
 
-#define IOPTE_SMALL            (2 << 0)
-#define IOPTE_LARGE            (1 << 0)
+#define IOPTE_SMALL            (2)
+#define IOPTE_LARGE            (1)
 
 #define iopte_is_small(x)      (((x) & 2) == IOPTE_SMALL)
 #define iopte_is_large(x)      (((x) & 3) == IOPTE_LARGE)
@@ -93,3 +98,5 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 /* to find an entry in the second-level page table. */
 #define iopte_index(da)                (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
+
+#endif /* _OMAP_IOPGTABLE_H */
index c1f2e521dc52cdb383b528c27d07f0786e4ffc43..9305964250acaf94cef7da5ce7f5a2fb6e78b5ce 100644 (file)
@@ -27,6 +27,7 @@ struct tegra_smmu {
        const struct tegra_smmu_soc *soc;
 
        unsigned long pfn_mask;
+       unsigned long tlb_mask;
 
        unsigned long *asids;
        struct mutex lock;
@@ -40,8 +41,10 @@ struct tegra_smmu_as {
        struct iommu_domain domain;
        struct tegra_smmu *smmu;
        unsigned int use_count;
-       struct page *count;
+       u32 *count;
+       struct page **pts;
        struct page *pd;
+       dma_addr_t pd_dma;
        unsigned id;
        u32 attr;
 };
@@ -68,7 +71,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define SMMU_TLB_CONFIG 0x14
 #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
 #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
-#define  SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
+#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
+       ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
 
 #define SMMU_PTC_CONFIG 0x18
 #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
@@ -79,9 +83,9 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define  SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
 
 #define SMMU_PTB_DATA 0x020
-#define  SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr))
+#define  SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
 
-#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr))
+#define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
 
 #define SMMU_TLB_FLUSH 0x030
 #define  SMMU_TLB_FLUSH_VA_MATCH_ALL     (0 << 0)
@@ -134,29 +138,49 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
 #define SMMU_PTE_ATTR          (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
                                 SMMU_PTE_NONSECURE)
 
-static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page,
+static unsigned int iova_pd_index(unsigned long iova)
+{
+       return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
+}
+
+static unsigned int iova_pt_index(unsigned long iova)
+{
+       return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
+}
+
+static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
+{
+       addr >>= 12;
+       return (addr & smmu->pfn_mask) == addr;
+}
+
+static dma_addr_t smmu_pde_to_dma(u32 pde)
+{
+       return pde << 12;
+}
+
+static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
+{
+       smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
+}
+
+static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
                                  unsigned long offset)
 {
-       phys_addr_t phys = page ? page_to_phys(page) : 0;
        u32 value;
 
-       if (page) {
-               offset &= ~(smmu->mc->soc->atom_size - 1);
+       offset &= ~(smmu->mc->soc->atom_size - 1);
 
-               if (smmu->mc->soc->num_address_bits > 32) {
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
-                       value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK;
+       if (smmu->mc->soc->num_address_bits > 32) {
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+               value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
 #else
-                       value = 0;
+               value = 0;
 #endif
-                       smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
-               }
-
-               value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
-       } else {
-               value = SMMU_PTC_FLUSH_TYPE_ALL;
+               smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
        }
 
+       value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
        smmu_writel(smmu, value, SMMU_PTC_FLUSH);
 }
 
@@ -236,8 +260,6 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
 {
        struct tegra_smmu_as *as;
-       unsigned int i;
-       uint32_t *pd;
 
        if (type != IOMMU_DOMAIN_UNMANAGED)
                return NULL;
@@ -248,32 +270,26 @@ static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
 
        as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
 
-       as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
+       as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
        if (!as->pd) {
                kfree(as);
                return NULL;
        }
 
-       as->count = alloc_page(GFP_KERNEL);
+       as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
        if (!as->count) {
                __free_page(as->pd);
                kfree(as);
                return NULL;
        }
 
-       /* clear PDEs */
-       pd = page_address(as->pd);
-       SetPageReserved(as->pd);
-
-       for (i = 0; i < SMMU_NUM_PDE; i++)
-               pd[i] = 0;
-
-       /* clear PDE usage counters */
-       pd = page_address(as->count);
-       SetPageReserved(as->count);
-
-       for (i = 0; i < SMMU_NUM_PDE; i++)
-               pd[i] = 0;
+       as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
+       if (!as->pts) {
+               kfree(as->count);
+               __free_page(as->pd);
+               kfree(as);
+               return NULL;
+       }
 
        /* setup aperture */
        as->domain.geometry.aperture_start = 0;
@@ -288,7 +304,6 @@ static void tegra_smmu_domain_free(struct iommu_domain *domain)
        struct tegra_smmu_as *as = to_smmu_as(domain);
 
        /* TODO: free page directory and page tables */
-       ClearPageReserved(as->pd);
 
        kfree(as);
 }
@@ -376,16 +391,26 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
                return 0;
        }
 
+       as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(smmu->dev, as->pd_dma))
+               return -ENOMEM;
+
+       /* We can't handle 64-bit DMA addresses */
+       if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
+               err = -ENOMEM;
+               goto err_unmap;
+       }
+
        err = tegra_smmu_alloc_asid(smmu, &as->id);
        if (err < 0)
-               return err;
+               goto err_unmap;
 
-       smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD);
-       smmu_flush_ptc(smmu, as->pd, 0);
+       smmu_flush_ptc(smmu, as->pd_dma, 0);
        smmu_flush_tlb_asid(smmu, as->id);
 
        smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
-       value = SMMU_PTB_DATA_VALUE(as->pd, as->attr);
+       value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
        smmu_writel(smmu, value, SMMU_PTB_DATA);
        smmu_flush(smmu);
 
@@ -393,6 +418,10 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
        as->use_count++;
 
        return 0;
+
+err_unmap:
+       dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+       return err;
 }
 
 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
@@ -402,6 +431,9 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
                return;
 
        tegra_smmu_free_asid(smmu, as->id);
+
+       dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
+
        as->smmu = NULL;
 }
 
@@ -465,96 +497,155 @@ static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *de
        }
 }
 
+static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
+                              u32 value)
+{
+       unsigned int pd_index = iova_pd_index(iova);
+       struct tegra_smmu *smmu = as->smmu;
+       u32 *pd = page_address(as->pd);
+       unsigned long offset = pd_index * sizeof(*pd);
+
+       /* Set the page directory entry first */
+       pd[pd_index] = value;
+
+       /* The flush the page directory entry from caches */
+       dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
+                                        sizeof(*pd), DMA_TO_DEVICE);
+
+       /* And flush the iommu */
+       smmu_flush_ptc(smmu, as->pd_dma, offset);
+       smmu_flush_tlb_section(smmu, as->id, iova);
+       smmu_flush(smmu);
+}
+
+static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
+{
+       u32 *pt = page_address(pt_page);
+
+       return pt + iova_pt_index(iova);
+}
+
+static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
+                                 dma_addr_t *dmap)
+{
+       unsigned int pd_index = iova_pd_index(iova);
+       struct page *pt_page;
+       u32 *pd;
+
+       pt_page = as->pts[pd_index];
+       if (!pt_page)
+               return NULL;
+
+       pd = page_address(as->pd);
+       *dmap = smmu_pde_to_dma(pd[pd_index]);
+
+       return tegra_smmu_pte_offset(pt_page, iova);
+}
+
 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
-                      struct page **pagep)
+                      dma_addr_t *dmap)
 {
-       u32 *pd = page_address(as->pd), *pt, *count;
-       u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
-       u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
+       unsigned int pde = iova_pd_index(iova);
        struct tegra_smmu *smmu = as->smmu;
-       struct page *page;
-       unsigned int i;
 
-       if (pd[pde] == 0) {
-               page = alloc_page(GFP_KERNEL | __GFP_DMA);
+       if (!as->pts[pde]) {
+               struct page *page;
+               dma_addr_t dma;
+
+               page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
                if (!page)
                        return NULL;
 
-               pt = page_address(page);
-               SetPageReserved(page);
+               dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
+                                  DMA_TO_DEVICE);
+               if (dma_mapping_error(smmu->dev, dma)) {
+                       __free_page(page);
+                       return NULL;
+               }
 
-               for (i = 0; i < SMMU_NUM_PTE; i++)
-                       pt[i] = 0;
+               if (!smmu_dma_addr_valid(smmu, dma)) {
+                       dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
+                                      DMA_TO_DEVICE);
+                       __free_page(page);
+                       return NULL;
+               }
 
-               smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT);
+               as->pts[pde] = page;
 
-               pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT);
+               tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
+                                                             SMMU_PDE_NEXT));
 
-               smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4);
-               smmu_flush_ptc(smmu, as->pd, pde << 2);
-               smmu_flush_tlb_section(smmu, as->id, iova);
-               smmu_flush(smmu);
+               *dmap = dma;
        } else {
-               page = pfn_to_page(pd[pde] & smmu->pfn_mask);
-               pt = page_address(page);
+               u32 *pd = page_address(as->pd);
+
+               *dmap = smmu_pde_to_dma(pd[pde]);
        }
 
-       *pagep = page;
+       return tegra_smmu_pte_offset(as->pts[pde], iova);
+}
 
-       /* Keep track of entries in this page table. */
-       count = page_address(as->count);
-       if (pt[pte] == 0)
-               count[pde]++;
+static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
+{
+       unsigned int pd_index = iova_pd_index(iova);
 
-       return &pt[pte];
+       as->count[pd_index]++;
 }
 
-static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
+static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
 {
-       u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff;
-       u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff;
-       u32 *count = page_address(as->count);
-       u32 *pd = page_address(as->pd), *pt;
-       struct page *page;
-
-       page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
-       pt = page_address(page);
+       unsigned int pde = iova_pd_index(iova);
+       struct page *page = as->pts[pde];
 
        /*
         * When no entries in this page table are used anymore, return the
         * memory page to the system.
         */
-       if (pt[pte] != 0) {
-               if (--count[pde] == 0) {
-                       ClearPageReserved(page);
-                       __free_page(page);
-                       pd[pde] = 0;
-               }
+       if (--as->count[pde] == 0) {
+               struct tegra_smmu *smmu = as->smmu;
+               u32 *pd = page_address(as->pd);
+               dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
+
+               tegra_smmu_set_pde(as, iova, 0);
 
-               pt[pte] = 0;
+               dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
+               __free_page(page);
+               as->pts[pde] = NULL;
        }
 }
 
+static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
+                              u32 *pte, dma_addr_t pte_dma, u32 val)
+{
+       struct tegra_smmu *smmu = as->smmu;
+       unsigned long offset = offset_in_page(pte);
+
+       *pte = val;
+
+       dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
+                                        4, DMA_TO_DEVICE);
+       smmu_flush_ptc(smmu, pte_dma, offset);
+       smmu_flush_tlb_group(smmu, as->id, iova);
+       smmu_flush(smmu);
+}
+
 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
                          phys_addr_t paddr, size_t size, int prot)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct tegra_smmu *smmu = as->smmu;
-       unsigned long offset;
-       struct page *page;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = as_get_pte(as, iova, &page);
+       pte = as_get_pte(as, iova, &pte_dma);
        if (!pte)
                return -ENOMEM;
 
-       *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR;
-       offset = offset_in_page(pte);
+       /* If we aren't overwriting a pre-existing entry, increment use */
+       if (*pte == 0)
+               tegra_smmu_pte_get_use(as, iova);
 
-       smmu->soc->ops->flush_dcache(page, offset, 4);
-       smmu_flush_ptc(smmu, page, offset);
-       smmu_flush_tlb_group(smmu, as->id, iova);
-       smmu_flush(smmu);
+       tegra_smmu_set_pte(as, iova, pte, pte_dma,
+                          __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
 
        return 0;
 }
@@ -563,22 +654,15 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
                               size_t size)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct tegra_smmu *smmu = as->smmu;
-       unsigned long offset;
-       struct page *page;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = as_get_pte(as, iova, &page);
-       if (!pte)
+       pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+       if (!pte || !*pte)
                return 0;
 
-       offset = offset_in_page(pte);
-       as_put_pte(as, iova);
-
-       smmu->soc->ops->flush_dcache(page, offset, 4);
-       smmu_flush_ptc(smmu, page, offset);
-       smmu_flush_tlb_group(smmu, as->id, iova);
-       smmu_flush(smmu);
+       tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
+       tegra_smmu_pte_put_use(as, iova);
 
        return size;
 }
@@ -587,11 +671,14 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
                                           dma_addr_t iova)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct page *page;
        unsigned long pfn;
+       dma_addr_t pte_dma;
        u32 *pte;
 
-       pte = as_get_pte(as, iova, &page);
+       pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
+       if (!pte || !*pte)
+               return 0;
+
        pfn = *pte & as->smmu->pfn_mask;
 
        return PFN_PHYS(pfn);
@@ -816,6 +903,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
        smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
        dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
                mc->soc->num_address_bits, smmu->pfn_mask);
+       smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
+       dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
+               smmu->tlb_mask);
 
        value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
 
@@ -825,14 +915,14 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
        smmu_writel(smmu, value, SMMU_PTC_CONFIG);
 
        value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
-               SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
+               SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
 
        if (soc->supports_round_robin_arbitration)
                value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
 
        smmu_writel(smmu, value, SMMU_TLB_CONFIG);
 
-       smmu_flush_ptc(smmu, NULL, 0);
+       smmu_flush_ptc_all(smmu);
        smmu_flush_tlb(smmu);
        smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
        smmu_flush(smmu);
index 692fe2bc81979b6b48f984ec6d88c117fbfdebdd..c12bb93334ff99e7a5ddc72d274b00c4eeb9c24c 100644 (file)
@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
        .irq_mask               = irq_chip_mask_parent,
        .irq_unmask             = irq_chip_unmask_parent,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
-       .irq_set_wake           = irq_chip_set_wake_parent,
+       .irq_set_type           = irq_chip_set_type_parent,
+       .flags                  = IRQCHIP_MASK_ON_SUSPEND |
+                                 IRQCHIP_SKIP_SET_WAKE,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
index b7d54d428b5e55d1520d52e68b95202593cf4b53..ff4be0515a0dc7dbb206ae0a84968f922817101e 100644 (file)
@@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
 
 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
 {
-       smp_call_function_interrupt();
+       generic_smp_call_function_interrupt();
 
        return IRQ_HANDLED;
 }
index 32814371b8d304539a2eb1cfece077510b4ca394..aa1b41ca40f778dcb4e6c0e393ab4ee33d25d388 100644 (file)
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("mq cache policy");
-
-MODULE_ALIAS("dm-cache-default");
index 48a4a826ae07649419d033b99c564b2adb9da6ea..200366c62231dd5f8f38f74284a42810c8603d19 100644 (file)
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("smq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
index 48dfe3c4d6aa7968bbc1986eafcb9e965fe55950..6ba47cfb1443748ccf092819a6a5ef160bb856fd 100644 (file)
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
                return r;
 
        disk_super = dm_block_data(copy);
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
-       dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+       dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
+       dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
        dm_sm_dec_block(pmd->metadata_sm, held_root);
 
        return dm_tm_unlock(pmd->tm, copy);
index ab37ae114e943c20c161f88b8c2a739206bfafab..0d7ab20c58dffc40d5c56c9427b7dd7f090c8bd3 100644 (file)
@@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_live_table_fast(md);
        struct dm_target *ti;
-       sector_t max_sectors, max_size = 0;
+       sector_t max_sectors;
+       int max_size = 0;
 
        if (unlikely(!map))
                goto out;
@@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
         * Find maximum amount of I/O that won't need splitting
         */
        max_sectors = min(max_io_len(bvm->bi_sector, ti),
-                         (sector_t) queue_max_sectors(q));
+                         (sector_t) BIO_MAX_SECTORS);
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
-       /*
-        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
-        * to the targets' merge function since it holds sectors not bytes).
-        * Just doing this as an interim fix for stable@ because the more
-        * comprehensive cleanup of switching to sector_t will impact every
-        * DM target that implements a ->merge hook.
-        */
-       if (max_size > INT_MAX)
-               max_size = INT_MAX;
+       if (max_size < 0)
+               max_size = 0;
 
        /*
         * merge_bvec_fn() returns number of bytes
@@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
         * max is precomputed maximal io size
         */
        if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
+               max_size = ti->type->merge(ti, bvm, biovec, max_size);
        /*
         * If the target doesn't support merge method and some of the devices
-        * provided their merge_bvec method (we know this by looking for the
-        * max_hw_sectors that dm_set_device_limits may set), then we can't
-        * allow bios with multiple vector entries.  So always set max_size
-        * to 0, and the code below allows just one page.
+        * provided their merge_bvec method (we know this by looking at
+        * queue_max_hw_sectors), then we can't allow bios with multiple vector
+        * entries.  So always set max_size to 0, and the code below allows
+        * just one page.
         */
        else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
                max_size = 0;
index 0c2a4e8b873c659dbc260b2aa5484c7a5e87b176..e25f00f0138a7b4d82a5ae4f6fc7e1b6f0bb1b30 100644 (file)
@@ -5759,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
        char *ptr;
        int err;
 
-       file = kmalloc(sizeof(*file), GFP_NOIO);
+       file = kzalloc(sizeof(*file), GFP_NOIO);
        if (!file)
                return -ENOMEM;
 
index bf2b80d5c4707a64210b5e57deb785069dc7d921..8731b6ea026bd9b8cfbe2a21bbc06366e509181c 100644 (file)
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
 
 extern struct dm_block_validator btree_node_validator;
 
+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+                          struct dm_btree_value_type *vt);
+
 #endif /* DM_BTREE_INTERNAL_H */
index 9836c0ae897c33c4e227bca77cc95026c193f73c..4222f774cf369b1eb1b031bd652854c573b224af 100644 (file)
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
        return r;
 }
 
-static struct dm_btree_value_type le64_type = {
-       .context = NULL,
-       .size = sizeof(__le64),
-       .inc = NULL,
-       .dec = NULL,
-       .equal = NULL
-};
-
 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
                    uint64_t *keys, dm_block_t *new_root)
 {
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
        int index = 0, r = 0;
        struct shadow_spine spine;
        struct btree_node *n;
+       struct dm_btree_value_type le64_vt;
 
+       init_le64_type(info->tm, &le64_vt);
        init_shadow_spine(&spine, info);
        for (level = 0; level < info->levels; level++) {
                r = remove_raw(&spine, info,
                               (level == last_level ?
-                               &info->value_type : &le64_type),
+                               &info->value_type : &le64_vt),
                               root, keys[level], (unsigned *)&index);
                if (r < 0)
                        break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
        int index = 0, r = 0;
        struct shadow_spine spine;
        struct btree_node *n;
+       struct dm_btree_value_type le64_vt;
        uint64_t k;
 
+       init_le64_type(info->tm, &le64_vt);
        init_shadow_spine(&spine, info);
        for (level = 0; level < last_level; level++) {
-               r = remove_raw(&spine, info, &le64_type,
+               r = remove_raw(&spine, info, &le64_vt,
                               root, keys[level], (unsigned *) &index);
                if (r < 0)
                        goto out;
@@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
                                             value_ptr(n, index));
 
                delete_at(n, index);
+               keys[last_level] = k + 1ull;
 
        } else
                r = -ENODATA;
index 1b5e13ec7f96a670ed7a9b5b472a5d2ee95a7dff..0dee514ba4c5f9e8d34d16e9d239ef333395c7d4 100644 (file)
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
 {
        return s->root;
 }
+
+static void le64_inc(void *context, const void *value_le)
+{
+       struct dm_transaction_manager *tm = context;
+       __le64 v_le;
+
+       memcpy(&v_le, value_le, sizeof(v_le));
+       dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, const void *value_le)
+{
+       struct dm_transaction_manager *tm = context;
+       __le64 v_le;
+
+       memcpy(&v_le, value_le, sizeof(v_le));
+       dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, const void *value1_le, const void *value2_le)
+{
+       __le64 v1_le, v2_le;
+
+       memcpy(&v1_le, value1_le, sizeof(v1_le));
+       memcpy(&v2_le, value2_le, sizeof(v2_le));
+       return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+                   struct dm_btree_value_type *vt)
+{
+       vt->context = tm;
+       vt->size = sizeof(__le64);
+       vt->inc = le64_inc;
+       vt->dec = le64_dec;
+       vt->equal = le64_equal;
+}
index fdd3793e22f957ef08db71f897607c68ce6eb6a3..c7726cebc4950c24cb6f6f2b7cacfa6465bddeb6 100644 (file)
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
        struct btree_node *n;
        struct dm_btree_value_type le64_type;
 
-       le64_type.context = NULL;
-       le64_type.size = sizeof(__le64);
-       le64_type.inc = NULL;
-       le64_type.dec = NULL;
-       le64_type.equal = NULL;
-
+       init_le64_type(info->tm, &le64_type);
        init_shadow_spine(&spine, info);
 
        for (level = 0; level < (info->levels - 1); level++) {
index 94f5b55069e09610f21ea640f5dff3efd7e580ca..967a4ed73929ff44a38d9475c5e362fc2914c758 100644 (file)
@@ -1476,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r1conf *conf = mddev->private;
+       unsigned long flags;
 
        /*
         * If it is not operational, then we have already marked it as dead
@@ -1495,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
                return;
        }
        set_bit(Blocked, &rdev->flags);
+       spin_lock_irqsave(&conf->device_lock, flags);
        if (test_and_clear_bit(In_sync, &rdev->flags)) {
-               unsigned long flags;
-               spin_lock_irqsave(&conf->device_lock, flags);
                mddev->degraded++;
                set_bit(Faulty, &rdev->flags);
-               spin_unlock_irqrestore(&conf->device_lock, flags);
        } else
                set_bit(Faulty, &rdev->flags);
+       spin_unlock_irqrestore(&conf->device_lock, flags);
        /*
         * if recovery is running, make sure it aborts.
         */
@@ -1568,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
         * Find all failed disks within the RAID1 configuration
         * and mark them readable.
         * Called under mddev lock, so rcu protection not needed.
+        * device_lock used to avoid races with raid1_end_read_request
+        * which expects 'In_sync' flags and ->degraded to be consistent.
         */
+       spin_lock_irqsave(&conf->device_lock, flags);
        for (i = 0; i < conf->raid_disks; i++) {
                struct md_rdev *rdev = conf->mirrors[i].rdev;
                struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1599,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
                        sysfs_notify_dirent_safe(rdev->sysfs_state);
                }
        }
-       spin_lock_irqsave(&conf->device_lock, flags);
        mddev->degraded -= count;
        spin_unlock_irqrestore(&conf->device_lock, flags);
 
index 643d217bfa13ac8caa3dee9f9dd65d57f165bbe8..f757023fc4580680bfdd6e178f93acb62cb1f31e 100644 (file)
@@ -2256,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
 static int drop_one_stripe(struct r5conf *conf)
 {
        struct stripe_head *sh;
-       int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
+       int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
 
        spin_lock_irq(conf->hash_locks + hash);
        sh = get_free_stripe(conf, hash);
@@ -6388,7 +6388,8 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
 
        if (mutex_trylock(&conf->cache_size_mutex)) {
                ret= 0;
-               while (ret < sc->nr_to_scan) {
+               while (ret < sc->nr_to_scan &&
+                      conf->max_nr_stripes > conf->min_nr_stripes) {
                        if (drop_one_stripe(conf) == 0) {
                                ret = SHRINK_STOP;
                                break;
index 0d35f5850ff1ea8e9910168f55fe04f05c42600e..5ab90f36a6a687c3c64edd56f09d3f8708dba635 100644 (file)
@@ -240,7 +240,7 @@ config DVB_SI21XX
 
 config DVB_TS2020
        tristate "Montage Tehnology TS2020 based tuners"
-       depends on DVB_CORE
+       depends on DVB_CORE && I2C
        select REGMAP_I2C
        default m if !MEDIA_SUBDRV_AUTOSELECT
        help
index 3be1b2c3c3860ec48c2e9a1e5e5cd41daf56046f..6a1c0089bb6279c6660b6154681c1c7cdeb129d4 100644 (file)
@@ -2,6 +2,7 @@ config VIDEO_COBALT
        tristate "Cisco Cobalt support"
        depends on VIDEO_V4L2 && I2C && MEDIA_CONTROLLER
        depends on PCI_MSI && MTD_COMPLEX_MAPPINGS && GPIOLIB
+       depends on SND
        select I2C_ALGOBIT
        select VIDEO_ADV7604
        select VIDEO_ADV7511
index dd4bff9cf3390a4f167c6e5d1a9ae4886dae9750..d1f5898d11ba1c046737b2bb2a7ba48fa7547b9d 100644 (file)
@@ -139,7 +139,7 @@ done:
           also know about dropped frames. */
        cb->vb.v4l2_buf.sequence = s->sequence++;
        vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
-                       VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
+                       VB2_BUF_STATE_REQUEUEING : VB2_BUF_STATE_DONE);
 }
 
 irqreturn_t cobalt_irq_handler(int irq, void *dev_id)
index 1d59c7e039f7d41e989dc986cf6ac09815165747..87990ece5848957baf4af40dd5100f356466b901 100644 (file)
@@ -130,10 +130,11 @@ err:
 
 int mantis_dma_init(struct mantis_pci *mantis)
 {
-       int err = 0;
+       int err;
 
        dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
-       if (mantis_alloc_buffers(mantis) < 0) {
+       err = mantis_alloc_buffers(mantis);
+       if (err < 0) {
                dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
 
                /* Stop RISC Engine */
index 8939ebd7439198161935f573652f25babb438840..84fa6e9b59a1acd9360364fbd30ca0a4e00d3a56 100644 (file)
@@ -184,125 +184,9 @@ out:
        return -EINVAL;
 }
 
-static struct ir_raw_timings_manchester ir_rc5_timings = {
-       .leader                 = RC5_UNIT,
-       .pulse_space_start      = 0,
-       .clock                  = RC5_UNIT,
-       .trailer_space          = RC5_UNIT * 10,
-};
-
-static struct ir_raw_timings_manchester ir_rc5x_timings[2] = {
-       {
-               .leader                 = RC5_UNIT,
-               .pulse_space_start      = 0,
-               .clock                  = RC5_UNIT,
-               .trailer_space          = RC5X_SPACE,
-       },
-       {
-               .clock                  = RC5_UNIT,
-               .trailer_space          = RC5_UNIT * 10,
-       },
-};
-
-static struct ir_raw_timings_manchester ir_rc5_sz_timings = {
-       .leader                         = RC5_UNIT,
-       .pulse_space_start              = 0,
-       .clock                          = RC5_UNIT,
-       .trailer_space                  = RC5_UNIT * 10,
-};
-
-static int ir_rc5_validate_filter(const struct rc_scancode_filter *scancode,
-                                 unsigned int important_bits)
-{
-       /* all important bits of scancode should be set in mask */
-       if (~scancode->mask & important_bits)
-               return -EINVAL;
-       /* extra bits in mask should be zero in data */
-       if (scancode->mask & scancode->data & ~important_bits)
-               return -EINVAL;
-       return 0;
-}
-
-/**
- * ir_rc5_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode:  scancode filter describing scancode (helps distinguish between
- *             protocol subtypes when scancode is ambiguous)
- * @events:    array of raw ir events to write into
- * @max:       maximum size of @events
- *
- * Returns:    The number of events written.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             encoding. In this case all @max events will have been written.
- *             -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc5_encode(u64 protocols,
-                        const struct rc_scancode_filter *scancode,
-                        struct ir_raw_event *events, unsigned int max)
-{
-       int ret;
-       struct ir_raw_event *e = events;
-       unsigned int data, xdata, command, commandx, system;
-
-       /* Detect protocol and convert scancode to raw data */
-       if (protocols & RC_BIT_RC5 &&
-           !ir_rc5_validate_filter(scancode, 0x1f7f)) {
-               /* decode scancode */
-               command  = (scancode->data & 0x003f) >> 0;
-               commandx = (scancode->data & 0x0040) >> 6;
-               system   = (scancode->data & 0x1f00) >> 8;
-               /* encode data */
-               data = !commandx << 12 | system << 6 | command;
-
-               /* Modulate the data */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc5_timings, RC5_NBITS,
-                                           data);
-               if (ret < 0)
-                       return ret;
-       } else if (protocols & RC_BIT_RC5X &&
-                  !ir_rc5_validate_filter(scancode, 0x1f7f3f)) {
-               /* decode scancode */
-               xdata    = (scancode->data & 0x00003f) >> 0;
-               command  = (scancode->data & 0x003f00) >> 8;
-               commandx = (scancode->data & 0x004000) >> 14;
-               system   = (scancode->data & 0x1f0000) >> 16;
-               /* commandx and system overlap, bits must match when encoded */
-               if (commandx == (system & 0x1))
-                       return -EINVAL;
-               /* encode data */
-               data = 1 << 18 | system << 12 | command << 6 | xdata;
-
-               /* Modulate the data */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc5x_timings[0],
-                                       CHECK_RC5X_NBITS,
-                                       data >> (RC5X_NBITS-CHECK_RC5X_NBITS));
-               if (ret < 0)
-                       return ret;
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                       &ir_rc5x_timings[1],
-                                       RC5X_NBITS - CHECK_RC5X_NBITS,
-                                       data);
-               if (ret < 0)
-                       return ret;
-       } else if (protocols & RC_BIT_RC5_SZ &&
-                  !ir_rc5_validate_filter(scancode, 0x2fff)) {
-               /* RC5-SZ scancode is raw enough for Manchester as it is */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc5_sz_timings,
-                                       RC5_SZ_NBITS, scancode->data & 0x2fff);
-               if (ret < 0)
-                       return ret;
-       } else {
-               return -EINVAL;
-       }
-
-       return e - events;
-}
-
 static struct ir_raw_handler rc5_handler = {
        .protocols      = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ,
        .decode         = ir_rc5_decode,
-       .encode         = ir_rc5_encode,
 };
 
 static int __init ir_rc5_decode_init(void)
index f9c70baf6e0cb1f071f0a6f7e2ac9e81cf131817..d16bc67af732251998fb280b86d887835cf39e1b 100644 (file)
@@ -291,133 +291,11 @@ out:
        return -EINVAL;
 }
 
-static struct ir_raw_timings_manchester ir_rc6_timings[4] = {
-       {
-               .leader                 = RC6_PREFIX_PULSE,
-               .pulse_space_start      = 0,
-               .clock                  = RC6_UNIT,
-               .invert                 = 1,
-               .trailer_space          = RC6_PREFIX_SPACE,
-       },
-       {
-               .clock                  = RC6_UNIT,
-               .invert                 = 1,
-       },
-       {
-               .clock                  = RC6_UNIT * 2,
-               .invert                 = 1,
-       },
-       {
-               .clock                  = RC6_UNIT,
-               .invert                 = 1,
-               .trailer_space          = RC6_SUFFIX_SPACE,
-       },
-};
-
-static int ir_rc6_validate_filter(const struct rc_scancode_filter *scancode,
-                                 unsigned int important_bits)
-{
-       /* all important bits of scancode should be set in mask */
-       if (~scancode->mask & important_bits)
-               return -EINVAL;
-       /* extra bits in mask should be zero in data */
-       if (scancode->mask & scancode->data & ~important_bits)
-               return -EINVAL;
-       return 0;
-}
-
-/**
- * ir_rc6_encode() - Encode a scancode as a stream of raw events
- *
- * @protocols: allowed protocols
- * @scancode:  scancode filter describing scancode (helps distinguish between
- *             protocol subtypes when scancode is ambiguous)
- * @events:    array of raw ir events to write into
- * @max:       maximum size of @events
- *
- * Returns:    The number of events written.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             encoding. In this case all @max events will have been written.
- *             -EINVAL if the scancode is ambiguous or invalid.
- */
-static int ir_rc6_encode(u64 protocols,
-                        const struct rc_scancode_filter *scancode,
-                        struct ir_raw_event *events, unsigned int max)
-{
-       int ret;
-       struct ir_raw_event *e = events;
-
-       if (protocols & RC_BIT_RC6_0 &&
-           !ir_rc6_validate_filter(scancode, 0xffff)) {
-
-               /* Modulate the preamble */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate the header (Start Bit & Mode-0) */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[1],
-                                           RC6_HEADER_NBITS, (1 << 3));
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate Trailer Bit */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[2], 1, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate rest of the data */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[3], RC6_0_NBITS,
-                                           scancode->data);
-               if (ret < 0)
-                       return ret;
-
-       } else if (protocols & (RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 |
-                               RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE) &&
-                  !ir_rc6_validate_filter(scancode, 0x8fffffff)) {
-
-               /* Modulate the preamble */
-               ret = ir_raw_gen_manchester(&e, max, &ir_rc6_timings[0], 0, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate the header (Start Bit & Header-version 6 */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[1],
-                                           RC6_HEADER_NBITS, (1 << 3 | 6));
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate Trailer Bit */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[2], 1, 0);
-               if (ret < 0)
-                       return ret;
-
-               /* Modulate rest of the data */
-               ret = ir_raw_gen_manchester(&e, max - (e - events),
-                                           &ir_rc6_timings[3],
-                                           fls(scancode->mask),
-                                           scancode->data);
-               if (ret < 0)
-                       return ret;
-
-       } else {
-               return -EINVAL;
-       }
-
-       return e - events;
-}
-
 static struct ir_raw_handler rc6_handler = {
        .protocols      = RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 |
                          RC_BIT_RC6_6A_24 | RC_BIT_RC6_6A_32 |
                          RC_BIT_RC6_MCE,
        .decode         = ir_rc6_decode,
-       .encode         = ir_rc6_encode,
 };
 
 static int __init ir_rc6_decode_init(void)
index baeb5971fd52cecca1a01f436713f9f3a77c65ca..85af7a8691677a9b3965f469f37bc49b741de6b7 100644 (file)
@@ -526,130 +526,6 @@ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
        return 0;
 }
 
-static int nvt_write_wakeup_codes(struct rc_dev *dev,
-                                 const u8 *wakeup_sample_buf, int count)
-{
-       int i = 0;
-       u8 reg, reg_learn_mode;
-       unsigned long flags;
-       struct nvt_dev *nvt = dev->priv;
-
-       nvt_dbg_wake("writing wakeup samples");
-
-       reg = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
-       reg_learn_mode = reg & ~CIR_WAKE_IRCON_MODE0;
-       reg_learn_mode |= CIR_WAKE_IRCON_MODE1;
-
-       /* Lock the learn area to prevent racing with wake-isr */
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-
-       /* Enable fifo writes */
-       nvt_cir_wake_reg_write(nvt, reg_learn_mode, CIR_WAKE_IRCON);
-
-       /* Clear cir wake rx fifo */
-       nvt_clear_cir_wake_fifo(nvt);
-
-       if (count > WAKE_FIFO_LEN) {
-               nvt_dbg_wake("HW FIFO too small for all wake samples");
-               count = WAKE_FIFO_LEN;
-       }
-
-       if (count)
-               pr_info("Wake samples (%d) =", count);
-       else
-               pr_info("Wake sample fifo cleared");
-
-       /* Write wake samples to fifo */
-       for (i = 0; i < count; i++) {
-               pr_cont(" %02x", wakeup_sample_buf[i]);
-               nvt_cir_wake_reg_write(nvt, wakeup_sample_buf[i],
-                                      CIR_WAKE_WR_FIFO_DATA);
-       }
-       pr_cont("\n");
-
-       /* Switch cir to wakeup mode and disable fifo writing */
-       nvt_cir_wake_reg_write(nvt, reg, CIR_WAKE_IRCON);
-
-       /* Set number of bytes needed for wake */
-       nvt_cir_wake_reg_write(nvt, count ? count :
-                              CIR_WAKE_FIFO_CMP_BYTES,
-                              CIR_WAKE_FIFO_CMP_DEEP);
-
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
-       return 0;
-}
-
-static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
-                                       struct rc_scancode_filter *sc_filter)
-{
-       u8 *reg_buf;
-       u8 buf_val;
-       int i, ret, count;
-       unsigned int val;
-       struct ir_raw_event *raw;
-       bool complete;
-
-       /* Require both mask and data to be set before actually committing */
-       if (!sc_filter->mask || !sc_filter->data)
-               return 0;
-
-       raw = kmalloc_array(WAKE_FIFO_LEN, sizeof(*raw), GFP_KERNEL);
-       if (!raw)
-               return -ENOMEM;
-
-       ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
-                                    raw, WAKE_FIFO_LEN);
-       complete = (ret != -ENOBUFS);
-       if (!complete)
-               ret = WAKE_FIFO_LEN;
-       else if (ret < 0)
-               goto out_raw;
-
-       reg_buf = kmalloc_array(WAKE_FIFO_LEN, sizeof(*reg_buf), GFP_KERNEL);
-       if (!reg_buf) {
-               ret = -ENOMEM;
-               goto out_raw;
-       }
-
-       /* Inspect the ir samples */
-       for (i = 0, count = 0; i < ret && count < WAKE_FIFO_LEN; ++i) {
-               val = NS_TO_US((raw[i]).duration) / SAMPLE_PERIOD;
-
-               /* Split too large values into several smaller ones */
-               while (val > 0 && count < WAKE_FIFO_LEN) {
-
-                       /* Skip last value for better comparison tolerance */
-                       if (complete && i == ret - 1 && val < BUF_LEN_MASK)
-                               break;
-
-                       /* Clamp values to BUF_LEN_MASK at most */
-                       buf_val = (val > BUF_LEN_MASK) ? BUF_LEN_MASK : val;
-
-                       reg_buf[count] = buf_val;
-                       val -= buf_val;
-                       if ((raw[i]).pulse)
-                               reg_buf[count] |= BUF_PULSE_BIT;
-                       count++;
-               }
-       }
-
-       ret = nvt_write_wakeup_codes(dev, reg_buf, count);
-
-       kfree(reg_buf);
-out_raw:
-       kfree(raw);
-
-       return ret;
-}
-
-/* Dummy implementation. nuvoton is agnostic to the protocol used */
-static int nvt_ir_raw_change_wakeup_protocol(struct rc_dev *dev,
-                                            u64 *rc_type)
-{
-       return 0;
-}
-
 /*
  * nvt_tx_ir
  *
@@ -1167,14 +1043,11 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        /* Set up the rc device */
        rdev->priv = nvt;
        rdev->driver_type = RC_DRIVER_IR_RAW;
-       rdev->encode_wakeup = true;
        rdev->allowed_protocols = RC_BIT_ALL;
        rdev->open = nvt_open;
        rdev->close = nvt_close;
        rdev->tx_ir = nvt_tx_ir;
        rdev->s_tx_carrier = nvt_set_tx_carrier;
-       rdev->s_wakeup_filter = nvt_ir_raw_set_wakeup_filter;
-       rdev->change_wakeup_protocol = nvt_ir_raw_change_wakeup_protocol;
        rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
        rdev->input_phys = "nuvoton/cir0";
        rdev->input_id.bustype = BUS_HOST;
index 9d0e161c2a886c511d3fde3928b154b0cfb7f0db..e1cf23c3875b16ead52464d4e6ad3c479fd1e951 100644 (file)
@@ -63,7 +63,6 @@ static int debug;
  */
 #define TX_BUF_LEN 256
 #define RX_BUF_LEN 32
-#define WAKE_FIFO_LEN 67
 
 struct nvt_dev {
        struct pnp_dev *pdev;
index 4b994aa2f2a7d1f763fd5bb7eccfb95f8ba1b6d6..b68d4f76273448fcbc98fc1e215a6656c9b6c6ec 100644 (file)
@@ -25,8 +25,6 @@ struct ir_raw_handler {
 
        u64 protocols; /* which are handled by this handler */
        int (*decode)(struct rc_dev *dev, struct ir_raw_event event);
-       int (*encode)(u64 protocols, const struct rc_scancode_filter *scancode,
-                     struct ir_raw_event *events, unsigned int max);
 
        /* These two should only be used by the lirc decoder */
        int (*raw_register)(struct rc_dev *dev);
@@ -152,44 +150,10 @@ static inline bool is_timing_event(struct ir_raw_event ev)
 #define TO_US(duration)                        DIV_ROUND_CLOSEST((duration), 1000)
 #define TO_STR(is_pulse)               ((is_pulse) ? "pulse" : "space")
 
-/* functions for IR encoders */
-
-static inline void init_ir_raw_event_duration(struct ir_raw_event *ev,
-                                             unsigned int pulse,
-                                             u32 duration)
-{
-       init_ir_raw_event(ev);
-       ev->duration = duration;
-       ev->pulse = pulse;
-}
-
-/**
- * struct ir_raw_timings_manchester - Manchester coding timings
- * @leader:            duration of leader pulse (if any) 0 if continuing
- *                     existing signal (see @pulse_space_start)
- * @pulse_space_start: 1 for starting with pulse (0 for starting with space)
- * @clock:             duration of each pulse/space in ns
- * @invert:            if set clock logic is inverted
- *                     (0 = space + pulse, 1 = pulse + space)
- * @trailer_space:     duration of trailer space in ns
- */
-struct ir_raw_timings_manchester {
-       unsigned int leader;
-       unsigned int pulse_space_start:1;
-       unsigned int clock;
-       unsigned int invert:1;
-       unsigned int trailer_space;
-};
-
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
-                         const struct ir_raw_timings_manchester *timings,
-                         unsigned int n, unsigned int data);
-
 /*
  * Routines from rc-raw.c to be used internally and by decoders
  */
 u64 ir_raw_get_allowed_protocols(void);
-u64 ir_raw_get_encode_protocols(void);
 int ir_raw_event_register(struct rc_dev *dev);
 void ir_raw_event_unregister(struct rc_dev *dev);
 int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler);
index b9e4645c731c087f3dea99fa652fe09ef19dc8de..b732ac6a26d8065cc26ecb3a648ced999db71367 100644 (file)
@@ -30,7 +30,6 @@ static LIST_HEAD(ir_raw_client_list);
 static DEFINE_MUTEX(ir_raw_handler_lock);
 static LIST_HEAD(ir_raw_handler_list);
 static u64 available_protocols;
-static u64 encode_protocols;
 
 static int ir_raw_event_thread(void *data)
 {
@@ -241,146 +240,12 @@ ir_raw_get_allowed_protocols(void)
        return protocols;
 }
 
-/* used internally by the sysfs interface */
-u64
-ir_raw_get_encode_protocols(void)
-{
-       u64 protocols;
-
-       mutex_lock(&ir_raw_handler_lock);
-       protocols = encode_protocols;
-       mutex_unlock(&ir_raw_handler_lock);
-       return protocols;
-}
-
 static int change_protocol(struct rc_dev *dev, u64 *rc_type)
 {
        /* the caller will update dev->enabled_protocols */
        return 0;
 }
 
-/**
- * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
- * @ev:                Pointer to pointer to next free event. *@ev is incremented for
- *             each raw event filled.
- * @max:       Maximum number of raw events to fill.
- * @timings:   Manchester modulation timings.
- * @n:         Number of bits of data.
- * @data:      Data bits to encode.
- *
- * Encodes the @n least significant bits of @data using Manchester (bi-phase)
- * modulation with the timing characteristics described by @timings, writing up
- * to @max raw IR events using the *@ev pointer.
- *
- * Returns:    0 on success.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             full encoded data. In this case all @max events will have been
- *             written.
- */
-int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
-                         const struct ir_raw_timings_manchester *timings,
-                         unsigned int n, unsigned int data)
-{
-       bool need_pulse;
-       unsigned int i;
-       int ret = -ENOBUFS;
-
-       i = 1 << (n - 1);
-
-       if (timings->leader) {
-               if (!max--)
-                       return ret;
-               if (timings->pulse_space_start) {
-                       init_ir_raw_event_duration((*ev)++, 1, timings->leader);
-
-                       if (!max--)
-                               return ret;
-                       init_ir_raw_event_duration((*ev), 0, timings->leader);
-               } else {
-                       init_ir_raw_event_duration((*ev), 1, timings->leader);
-               }
-               i >>= 1;
-       } else {
-               /* continue existing signal */
-               --(*ev);
-       }
-       /* from here on *ev will point to the last event rather than the next */
-
-       while (n && i > 0) {
-               need_pulse = !(data & i);
-               if (timings->invert)
-                       need_pulse = !need_pulse;
-               if (need_pulse == !!(*ev)->pulse) {
-                       (*ev)->duration += timings->clock;
-               } else {
-                       if (!max--)
-                               goto nobufs;
-                       init_ir_raw_event_duration(++(*ev), need_pulse,
-                                                  timings->clock);
-               }
-
-               if (!max--)
-                       goto nobufs;
-               init_ir_raw_event_duration(++(*ev), !need_pulse,
-                                          timings->clock);
-               i >>= 1;
-       }
-
-       if (timings->trailer_space) {
-               if (!(*ev)->pulse)
-                       (*ev)->duration += timings->trailer_space;
-               else if (!max--)
-                       goto nobufs;
-               else
-                       init_ir_raw_event_duration(++(*ev), 0,
-                                                  timings->trailer_space);
-       }
-
-       ret = 0;
-nobufs:
-       /* point to the next event rather than last event before returning */
-       ++(*ev);
-       return ret;
-}
-EXPORT_SYMBOL(ir_raw_gen_manchester);
-
-/**
- * ir_raw_encode_scancode() - Encode a scancode as raw events
- *
- * @protocols:         permitted protocols
- * @scancode:          scancode filter describing a single scancode
- * @events:            array of raw events to write into
- * @max:               max number of raw events
- *
- * Attempts to encode the scancode as raw events.
- *
- * Returns:    The number of events written.
- *             -ENOBUFS if there isn't enough space in the array to fit the
- *             encoding. In this case all @max events will have been written.
- *             -EINVAL if the scancode is ambiguous or invalid, or if no
- *             compatible encoder was found.
- */
-int ir_raw_encode_scancode(u64 protocols,
-                          const struct rc_scancode_filter *scancode,
-                          struct ir_raw_event *events, unsigned int max)
-{
-       struct ir_raw_handler *handler;
-       int ret = -EINVAL;
-
-       mutex_lock(&ir_raw_handler_lock);
-       list_for_each_entry(handler, &ir_raw_handler_list, list) {
-               if (handler->protocols & protocols && handler->encode) {
-                       ret = handler->encode(protocols, scancode, events, max);
-                       if (ret >= 0 || ret == -ENOBUFS)
-                               break;
-               }
-       }
-       mutex_unlock(&ir_raw_handler_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL(ir_raw_encode_scancode);
-
 /*
  * Used to (un)register raw event clients
  */
@@ -463,8 +328,6 @@ int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
                list_for_each_entry(raw, &ir_raw_client_list, list)
                        ir_raw_handler->raw_register(raw->dev);
        available_protocols |= ir_raw_handler->protocols;
-       if (ir_raw_handler->encode)
-               encode_protocols |= ir_raw_handler->protocols;
        mutex_unlock(&ir_raw_handler_lock);
 
        return 0;
@@ -481,8 +344,6 @@ void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
                list_for_each_entry(raw, &ir_raw_client_list, list)
                        ir_raw_handler->raw_unregister(raw->dev);
        available_protocols &= ~ir_raw_handler->protocols;
-       if (ir_raw_handler->encode)
-               encode_protocols &= ~ir_raw_handler->protocols;
        mutex_unlock(&ir_raw_handler_lock);
 }
 EXPORT_SYMBOL(ir_raw_handler_unregister);
index d8bdf63ce9858ccfc78536fc698698e16c751670..63dace8198b0b3dbcb116108e238351fe9684a9e 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/sched.h>
-#include <linux/slab.h>
 #include <media/rc-core.h>
 
 #define DRIVER_NAME    "rc-loopback"
@@ -177,39 +176,6 @@ static int loop_set_carrier_report(struct rc_dev *dev, int enable)
        return 0;
 }
 
-static int loop_set_wakeup_filter(struct rc_dev *dev,
-                                 struct rc_scancode_filter *sc_filter)
-{
-       static const unsigned int max = 512;
-       struct ir_raw_event *raw;
-       int ret;
-       int i;
-
-       /* fine to disable filter */
-       if (!sc_filter->mask)
-               return 0;
-
-       /* encode the specified filter and loop it back */
-       raw = kmalloc_array(max, sizeof(*raw), GFP_KERNEL);
-       ret = ir_raw_encode_scancode(dev->enabled_wakeup_protocols, sc_filter,
-                                    raw, max);
-       /* still loop back the partial raw IR even if it's incomplete */
-       if (ret == -ENOBUFS)
-               ret = max;
-       if (ret >= 0) {
-               /* do the loopback */
-               for (i = 0; i < ret; ++i)
-                       ir_raw_event_store(dev, &raw[i]);
-               ir_raw_event_handle(dev);
-
-               ret = 0;
-       }
-
-       kfree(raw);
-
-       return ret;
-}
-
 static int __init loop_init(void)
 {
        struct rc_dev *rc;
@@ -229,7 +195,6 @@ static int __init loop_init(void)
        rc->map_name            = RC_MAP_EMPTY;
        rc->priv                = &loopdev;
        rc->driver_type         = RC_DRIVER_IR_RAW;
-       rc->encode_wakeup       = true;
        rc->allowed_protocols   = RC_BIT_ALL;
        rc->timeout             = 100 * 1000 * 1000; /* 100 ms */
        rc->min_timeout         = 1;
@@ -244,7 +209,6 @@ static int __init loop_init(void)
        rc->s_idle              = loop_set_idle;
        rc->s_learning_mode     = loop_set_learning_mode;
        rc->s_carrier_report    = loop_set_carrier_report;
-       rc->s_wakeup_filter     = loop_set_wakeup_filter;
 
        loopdev.txmask          = RXMASK_REGULAR;
        loopdev.txcarrier       = 36000;
index 9d015db652808fcb54fc0abfff19e9018dbad488..0ff388a1616876d50ade807ff5bff20204d1b8cd 100644 (file)
@@ -865,8 +865,6 @@ static ssize_t show_protocols(struct device *device,
        } else {
                enabled = dev->enabled_wakeup_protocols;
                allowed = dev->allowed_wakeup_protocols;
-               if (dev->encode_wakeup && !allowed)
-                       allowed = ir_raw_get_encode_protocols();
        }
 
        mutex_unlock(&dev->lock);
@@ -1408,16 +1406,13 @@ int rc_register_device(struct rc_dev *dev)
                path ? path : "N/A");
        kfree(path);
 
-       if (dev->driver_type == RC_DRIVER_IR_RAW || dev->encode_wakeup) {
+       if (dev->driver_type == RC_DRIVER_IR_RAW) {
                /* Load raw decoders, if they aren't already */
                if (!raw_init) {
                        IR_dprintk(1, "Loading raw decoders\n");
                        ir_raw_init();
                        raw_init = true;
                }
-       }
-
-       if (dev->driver_type == RC_DRIVER_IR_RAW) {
                /* calls ir_register_device so unlock mutex here*/
                mutex_unlock(&dev->lock);
                rc = ir_raw_event_register(dev);
index 93b315459098932381d61d282cdb3f04194b18ae..a14c428f70e992460ed869b492723a5a08f155b9 100644 (file)
@@ -715,6 +715,7 @@ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
                break;
        case VB2_BUF_STATE_PREPARING:
        case VB2_BUF_STATE_DEQUEUED:
+       case VB2_BUF_STATE_REQUEUEING:
                /* nothing */
                break;
        }
@@ -1182,7 +1183,8 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
 
        if (WARN_ON(state != VB2_BUF_STATE_DONE &&
                    state != VB2_BUF_STATE_ERROR &&
-                   state != VB2_BUF_STATE_QUEUED))
+                   state != VB2_BUF_STATE_QUEUED &&
+                   state != VB2_BUF_STATE_REQUEUEING))
                state = VB2_BUF_STATE_ERROR;
 
 #ifdef CONFIG_VIDEO_ADV_DEBUG
@@ -1199,22 +1201,30 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
        for (plane = 0; plane < vb->num_planes; ++plane)
                call_void_memop(vb, finish, vb->planes[plane].mem_priv);
 
-       /* Add the buffer to the done buffers list */
        spin_lock_irqsave(&q->done_lock, flags);
-       vb->state = state;
-       if (state != VB2_BUF_STATE_QUEUED)
+       if (state == VB2_BUF_STATE_QUEUED ||
+           state == VB2_BUF_STATE_REQUEUEING) {
+               vb->state = VB2_BUF_STATE_QUEUED;
+       } else {
+               /* Add the buffer to the done buffers list */
                list_add_tail(&vb->done_entry, &q->done_list);
+               vb->state = state;
+       }
        atomic_dec(&q->owned_by_drv_count);
        spin_unlock_irqrestore(&q->done_lock, flags);
 
-       if (state == VB2_BUF_STATE_QUEUED) {
+       switch (state) {
+       case VB2_BUF_STATE_QUEUED:
+               return;
+       case VB2_BUF_STATE_REQUEUEING:
                if (q->start_streaming_called)
                        __enqueue_in_driver(vb);
                return;
+       default:
+               /* Inform any processes that may be waiting for buffers */
+               wake_up(&q->done_wq);
+               break;
        }
-
-       /* Inform any processes that may be waiting for buffers */
-       wake_up(&q->done_wq);
 }
 EXPORT_SYMBOL_GPL(vb2_buffer_done);
 
@@ -1244,19 +1254,19 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
 
 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
 {
-       static bool __check_once __read_mostly;
+       static bool check_once;
 
-       if (__check_once)
+       if (check_once)
                return;
 
-       __check_once = true;
-       __WARN();
+       check_once = true;
+       WARN_ON(1);
 
-       pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+       pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
        if (vb->vb2_queue->allow_zero_bytesused)
-               pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+               pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
        else
-               pr_warn_once("use the actual size instead.\n");
+               pr_warn("use the actual size instead.\n");
 }
 
 /**
index 3a27a84ad3ec376a2543c1ac9568c30e5d7c131b..9426276dbe1402b1445dd7b84da6d7fca38893a6 100644 (file)
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
        gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
        gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
 {
        int i;
 
+       if (!gpmc_base)
+               return;
+
        gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
        gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
        gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
index 9f579589e8000aaac06333c8d7162bdd5b6041ae..8053f70dbfd19266f9cbbbc220be85d8967d7d11 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra114-mc.h>
 
 #include "mc.h"
@@ -914,20 +912,6 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
        { .name = "tsec",      .swgroup = TEGRA_SWGROUP_TSEC,      .reg = 0x294 },
 };
 
-static void tegra114_flush_dcache(struct page *page, unsigned long offset,
-                                 size_t size)
-{
-       phys_addr_t phys = page_to_phys(page) + offset;
-       void *virt = page_address(page) + offset;
-
-       __cpuc_flush_dcache_area(virt, size);
-       outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra114_smmu_ops = {
-       .flush_dcache = tegra114_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra114_smmu_soc = {
        .clients = tegra114_mc_clients,
        .num_clients = ARRAY_SIZE(tegra114_mc_clients),
@@ -935,8 +919,8 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
        .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
        .supports_round_robin_arbitration = false,
        .supports_request_limit = false,
+       .num_tlb_lines = 32,
        .num_asids = 4,
-       .ops = &tegra114_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra114_mc_soc = {
index 966e1557e6f414598868a8392b5487cb05e09f61..7d734befe0ed3122cce8032098345c3ff5f40d39 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra124-mc.h>
 
 #include "mc.h"
@@ -1002,20 +1000,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
 };
 
 #ifdef CONFIG_ARCH_TEGRA_124_SOC
-static void tegra124_flush_dcache(struct page *page, unsigned long offset,
-                                 size_t size)
-{
-       phys_addr_t phys = page_to_phys(page) + offset;
-       void *virt = page_address(page) + offset;
-
-       __cpuc_flush_dcache_area(virt, size);
-       outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra124_smmu_ops = {
-       .flush_dcache = tegra124_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra124_smmu_soc = {
        .clients = tegra124_mc_clients,
        .num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1024,7 +1008,6 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
        .supports_round_robin_arbitration = true,
        .supports_request_limit = true,
        .num_asids = 128,
-       .ops = &tegra124_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra124_mc_soc = {
@@ -1039,18 +1022,6 @@ const struct tegra_mc_soc tegra124_mc_soc = {
 #endif /* CONFIG_ARCH_TEGRA_124_SOC */
 
 #ifdef CONFIG_ARCH_TEGRA_132_SOC
-static void tegra132_flush_dcache(struct page *page, unsigned long offset,
-                                 size_t size)
-{
-       void *virt = page_address(page) + offset;
-
-       __flush_dcache_area(virt, size);
-}
-
-static const struct tegra_smmu_ops tegra132_smmu_ops = {
-       .flush_dcache = tegra132_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra132_smmu_soc = {
        .clients = tegra124_mc_clients,
        .num_clients = ARRAY_SIZE(tegra124_mc_clients),
@@ -1058,8 +1029,8 @@ static const struct tegra_smmu_soc tegra132_smmu_soc = {
        .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
        .supports_round_robin_arbitration = true,
        .supports_request_limit = true,
+       .num_tlb_lines = 32,
        .num_asids = 128,
-       .ops = &tegra132_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra132_mc_soc = {
index 1abcd8f6f3ba60ed6cdabcc28478123061af0b63..7e0694d80edb3a3a5d63a543f15ab516b87ac0b1 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/of.h>
 #include <linux/mm.h>
 
-#include <asm/cacheflush.h>
-
 #include <dt-bindings/memory/tegra30-mc.h>
 
 #include "mc.h"
@@ -936,20 +934,6 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
        { .name = "isp",  .swgroup = TEGRA_SWGROUP_ISP,  .reg = 0x258 },
 };
 
-static void tegra30_flush_dcache(struct page *page, unsigned long offset,
-                                size_t size)
-{
-       phys_addr_t phys = page_to_phys(page) + offset;
-       void *virt = page_address(page) + offset;
-
-       __cpuc_flush_dcache_area(virt, size);
-       outer_flush_range(phys, phys + size);
-}
-
-static const struct tegra_smmu_ops tegra30_smmu_ops = {
-       .flush_dcache = tegra30_flush_dcache,
-};
-
 static const struct tegra_smmu_soc tegra30_smmu_soc = {
        .clients = tegra30_mc_clients,
        .num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -957,8 +941,8 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
        .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
        .supports_round_robin_arbitration = false,
        .supports_request_limit = false,
+       .num_tlb_lines = 16,
        .num_asids = 4,
-       .ops = &tegra30_smmu_ops,
 };
 
 const struct tegra_mc_soc tegra30_mc_soc = {
index 653815950aa2416b277718df69213545573aa557..3f68dd251ce89304bf044960568c58c11aca8fdd 100644 (file)
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
 
 config MFD_CROS_EC_SPI
        tristate "ChromeOS Embedded Controller (SPI)"
-       depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF
+       depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
 
        ---help---
          If you say Y here, you get support for talking to the ChromeOS EC
index bebf58a06a6b2932d57798c0b5b81a31a12e9b7a..a72ddb2950784cf044fbfb5156ebd68866bbea48 100644 (file)
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
 
                arizona->has_fully_powered_off = true;
 
-               disable_irq(arizona->irq);
+               disable_irq_nosync(arizona->irq);
                arizona_enable_reset(arizona);
                regulator_bulk_disable(arizona->num_core_supplies,
                                       arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
                             arizona->pdata.gpio_defaults[i]);
        }
 
-       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
-       pm_runtime_use_autosuspend(arizona->dev);
-       pm_runtime_enable(arizona->dev);
-
        /* Chip default */
        if (!arizona->pdata.clk32k_src)
                arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
                                           arizona->pdata.spk_fmt[i]);
        }
 
+       pm_runtime_set_active(arizona->dev);
+       pm_runtime_enable(arizona->dev);
+
        /* Set up for interrupts */
        ret = arizona_irq_init(arizona);
        if (ret != 0)
                goto err_reset;
 
+       pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+       pm_runtime_use_autosuspend(arizona->dev);
+
        arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
                            arizona_clkgen_err, arizona);
        arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
                goto err_irq;
        }
 
-#ifdef CONFIG_PM
-       regulator_disable(arizona->dcvdd);
-#endif
-
        return 0;
 
 err_irq:
index 2d3db81be0990a1b88109aa7614f74f201930204..6ded3dc36644a31a0bd4775f36df72f1c00a7e0d 100644 (file)
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
 {
        struct at24_data *at24;
 
-       if (unlikely(off >= attr->size))
-               return -EFBIG;
-
        at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
        return at24_write(at24, buf, off, count);
 }
index e1ccefce9a9de629505344f9fc22042ab09b9684..a98dd4f1b0e33126ca04d1a7cecb8ad1e41d8d72 100644 (file)
@@ -786,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
                   slave ? slave->dev->name : "NULL");
 
        if (!slave || !bond->send_peer_notif ||
+           !netif_carrier_ok(bond->dev) ||
            test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
                return false;
 
index 2d1ce3c5d0dd34c9fabb1399a32c49be744afdd1..753887d02b46abc66663a24a2ea3e4e396d6b881 100644 (file)
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
                        vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
                }
                if (i != RX_RING_SIZE) {
-                       int j;
                        pr_emerg("%s: no memory for rx ring\n", dev->name);
-                       for (j = 0; j < i; j++) {
-                               if (vp->rx_skbuff[j]) {
-                                       dev_kfree_skb(vp->rx_skbuff[j]);
-                                       vp->rx_skbuff[j] = NULL;
-                               }
-                       }
                        retval = -ENOMEM;
-                       goto err_free_irq;
+                       goto err_free_skb;
                }
                /* Wrap the ring. */
                vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
        if (!retval)
                goto out;
 
-err_free_irq:
+err_free_skb:
+       for (i = 0; i < RX_RING_SIZE; i++) {
+               if (vp->rx_skbuff[i]) {
+                       dev_kfree_skb(vp->rx_skbuff[i]);
+                       vp->rx_skbuff[i] = NULL;
+               }
+       }
        free_irq(dev->irq, dev);
 err:
        if (vortex_debug > 1)
index a90d7364334f9dfa3687dc813e068508a342861c..f7fbdc9d132511b72df0db2ce0b92e4df0774c44 100644 (file)
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        if (likely(skb)) {
                (*pkts_compl)++;
                (*bytes_compl) += skb->len;
+               dev_kfree_skb_any(skb);
        }
 
-       dev_kfree_skb_any(skb);
        tx_buf->first_bd = 0;
        tx_buf->skb = NULL;
 
index 76b9052a961c517978494199d74398264583508c..5907c821d131eed6fa5b6b0aa7a93dc41cca0d66 100644 (file)
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
                offset += sizeof(u32);
                data_buf += sizeof(u32);
                written_so_far += sizeof(u32);
+
+               /* At end of each 4Kb page, release nvram lock to allow MFW
+                * chance to take it for its own use.
+                */
+               if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
+                   (written_so_far < buf_size)) {
+                       DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+                          "Releasing NVM lock after offset 0x%x\n",
+                          (u32)(offset - sizeof(u32)));
+                       bnx2x_release_nvram_lock(bp);
+                       usleep_range(1000, 2000);
+                       rc = bnx2x_acquire_nvram_lock(bp);
+                       if (rc)
+                               return rc;
+               }
+
                cmd_flags = 0;
        }
 
index 0612b19f6313bd3e6ffa205be2b543585262e31e..506047c386071db9472d60218faa004fe94849a8 100644 (file)
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                        if (!next_cmpl->valid)
                                break;
                }
+               packets++;
 
                /* TODO: BNA_CQ_EF_LOCAL ? */
                if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
                else
                        bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
 
-               packets++;
                rcb->rxq->rx_packets++;
                rcb->rxq->rx_bytes += totlen;
                ccb->bytes_per_intr += totlen;
index c4d6bbe9458dbfe9c726fdff1e9b6c4c64b8a33f..02e23e6f142487ddc8cc2a8d4e2c512cb7e6db62 100644 (file)
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
 config THUNDER_NIC_PF
        tristate "Thunder Physical function driver"
        depends on 64BIT
-       default ARCH_THUNDER
        select THUNDER_NIC_BGX
        ---help---
          This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
 config THUNDER_NIC_VF
        tristate "Thunder Virtual function driver"
        depends on 64BIT
-       default ARCH_THUNDER
        ---help---
          This driver supports Thunder's NIC virtual function
 
 config THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
        depends on 64BIT
-       default ARCH_THUNDER
        ---help---
          This driver supports programming and controlling of MAC
          interface from NIC physical function driver.
index a11485fbb33f2b7bcd6c973324ea41601dbaf575..c3c7db41819dfad26b521008c6c70148711ee019 100644 (file)
@@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
                                        EXT_MEM1_SIZE_G(size));
                }
        } else {
-               if (i & EXT_MEM_ENABLE_F)
+               if (i & EXT_MEM_ENABLE_F) {
                        size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
                        add_debugfs_mem(adap, "mc", MEM_MC,
                                        EXT_MEM_SIZE_G(size));
+               }
        }
 
        de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
index 2716e6f30d9a0949633b40dc9864196c7465fa3a..00e3a6b6b82254269aa7e0bf5d165f13d358594f 100644 (file)
@@ -620,6 +620,11 @@ enum be_if_flags {
                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |\
                                         BE_IF_FLAGS_MCAST_PROMISCUOUS)
 
+#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
+                       BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
+
+#define BE_IF_ALL_FILT_FLAGS   (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
+
 /* An RX interface is an object with one or more MAC addresses and
  * filtering capabilities. */
 struct be_cmd_req_if_create {
index 6f642426308c67399eac3abdb20ae6160ce41d2a..6ca693b03f33abbbdb6097544f2f4ee7f3928720 100644 (file)
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
                return 0;
 
+       /* if device is not running, copy MAC to netdev->dev_addr */
+       if (!netif_running(netdev))
+               goto done;
+
        /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
         * privilege or if PF did not provision the new MAC address.
         * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
                status = -EPERM;
                goto err;
        }
-
-       memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       dev_info(dev, "MAC address changed to %pM\n", mac);
+done:
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
        return 0;
 err:
        dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
        be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
 }
 
-static void be_rx_cq_clean(struct be_rx_obj *rxo)
+/* Free posted rx buffers that were not used */
+static void be_rxq_clean(struct be_rx_obj *rxo)
 {
-       struct be_rx_page_info *page_info;
        struct be_queue_info *rxq = &rxo->q;
+       struct be_rx_page_info *page_info;
+
+       while (atomic_read(&rxq->used) > 0) {
+               page_info = get_rx_page_info(rxo);
+               put_page(page_info->page);
+               memset(page_info, 0, sizeof(*page_info));
+       }
+       BUG_ON(atomic_read(&rxq->used));
+       rxq->tail = 0;
+       rxq->head = 0;
+}
+
+static void be_rx_cq_clean(struct be_rx_obj *rxo)
+{
        struct be_queue_info *rx_cq = &rxo->cq;
        struct be_rx_compl_info *rxcp;
        struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
 
        /* After cleanup, leave the CQ in unarmed state */
        be_cq_notify(adapter, rx_cq->id, false, 0);
-
-       /* Then free posted rx buffers that were not used */
-       while (atomic_read(&rxq->used) > 0) {
-               page_info = get_rx_page_info(rxo);
-               put_page(page_info->page);
-               memset(page_info, 0, sizeof(*page_info));
-       }
-       BUG_ON(atomic_read(&rxq->used));
-       rxq->tail = 0;
-       rxq->head = 0;
 }
 
 static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
                        be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
                        napi_hash_del(&eqo->napi);
                        netif_napi_del(&eqo->napi);
+                       free_cpumask_var(eqo->affinity_mask);
                }
-               free_cpumask_var(eqo->affinity_mask);
                be_queue_free(adapter, &eqo->q);
        }
 }
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 
        for_all_evt_queues(adapter, eqo, i) {
                int numa_node = dev_to_node(&adapter->pdev->dev);
-               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
-                       return -ENOMEM;
-               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
-                               eqo->affinity_mask);
-               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
-                              BE_NAPI_WEIGHT);
-               napi_hash_add(&eqo->napi);
+
                aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                rc = be_cmd_eq_create(adapter, eqo);
                if (rc)
                        return rc;
+
+               if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
+                       return -ENOMEM;
+               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                               eqo->affinity_mask);
+               netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+                              BE_NAPI_WEIGHT);
+               napi_hash_add(&eqo->napi);
        }
        return 0;
 }
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
        for_all_rx_queues(adapter, rxo, i) {
                q = &rxo->q;
                if (q->created) {
+                       /* If RXQs are destroyed while in an "out of buffer"
+                        * state, there is a possibility of an HW stall on
+                        * Lancer. So, post 64 buffers to each queue to relieve
+                        * the "out of buffer" condition.
+                        * Make sure there's space in the RXQ before posting.
+                        */
+                       if (lancer_chip(adapter)) {
+                               be_rx_cq_clean(rxo);
+                               if (atomic_read(&q->used) == 0)
+                                       be_post_rx_frags(rxo, GFP_KERNEL,
+                                                        MAX_RX_POST);
+                       }
+
                        be_cmd_rxq_destroy(adapter, q);
                        be_rx_cq_clean(rxo);
+                       be_rxq_clean(rxo);
                }
                be_queue_free(adapter, q);
        }
 }
 
+static void be_disable_if_filters(struct be_adapter *adapter)
+{
+       be_cmd_pmac_del(adapter, adapter->if_handle,
+                       adapter->pmac_id[0], 0);
+
+       be_clear_uc_list(adapter);
+
+       /* The IFACE flags are enabled in the open path and cleared
+        * in the close path. When a VF gets detached from the host and
+        * assigned to a VM the following happens:
+        *      - VF's IFACE flags get cleared in the detach path
+        *      - IFACE create is issued by the VF in the attach path
+        * Due to a bug in the BE3/Skyhawk-R FW
+        * (Lancer FW doesn't have the bug), the IFACE capability flags
+        * specified along with the IFACE create cmd issued by a VF are not
+        * honoured by FW.  As a consequence, if a *new* driver
+        * (that enables/disables IFACE flags in open/close)
+        * is loaded in the host and an *old* driver is * used by a VM/VF,
+        * the IFACE gets created *without* the needed flags.
+        * To avoid this, disable RX-filter flags only for Lancer.
+        */
+       if (lancer_chip(adapter)) {
+               be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
+               adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
+       }
+}
+
 static int be_close(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
        if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
                return 0;
 
+       be_disable_if_filters(adapter);
+
        be_roce_dev_close(adapter);
 
        if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
        be_tx_compl_clean(adapter);
 
        be_rx_qs_destroy(adapter);
-       be_clear_uc_list(adapter);
 
        for_all_evt_queues(adapter, eqo, i) {
                if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
        return 0;
 }
 
+static int be_enable_if_filters(struct be_adapter *adapter)
+{
+       int status;
+
+       status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
+       if (status)
+               return status;
+
+       /* For BE3 VFs, the PF programs the initial MAC address */
+       if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+               status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
+                                        adapter->if_handle,
+                                        &adapter->pmac_id[0], 0);
+               if (status)
+                       return status;
+       }
+
+       if (adapter->vlans_added)
+               be_vid_config(adapter);
+
+       be_set_rx_mode(adapter->netdev);
+
+       return 0;
+}
+
 static int be_open(struct net_device *netdev)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
        if (status)
                goto err;
 
+       status = be_enable_if_filters(adapter);
+       if (status)
+               goto err;
+
        status = be_irq_register(adapter);
        if (status)
                goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
        }
 }
 
-static void be_mac_clear(struct be_adapter *adapter)
-{
-       if (adapter->pmac_id) {
-               be_cmd_pmac_del(adapter, adapter->if_handle,
-                               adapter->pmac_id[0], 0);
-               kfree(adapter->pmac_id);
-               adapter->pmac_id = NULL;
-       }
-}
-
 #ifdef CONFIG_BE2NET_VXLAN
 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
 {
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
 #endif
-       /* delete the primary mac along with the uc-mac list */
-       be_mac_clear(adapter);
+       kfree(adapter->pmac_id);
+       adapter->pmac_id = NULL;
 
        be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
        return 0;
 }
 
-static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
-                       u32 cap_flags, u32 vf)
-{
-       u32 en_flags;
-
-       en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
-                  BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
-                  BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
-
-       en_flags &= cap_flags;
-
-       return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
-}
-
 static int be_vfs_if_create(struct be_adapter *adapter)
 {
        struct be_resources res = {0};
+       u32 cap_flags, en_flags, vf;
        struct be_vf_cfg *vf_cfg;
-       u32 cap_flags, vf;
        int status;
 
        /* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
                        }
                }
 
-               status = be_if_create(adapter, &vf_cfg->if_handle,
-                                     cap_flags, vf + 1);
+               en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
+                                       BE_IF_FLAGS_BROADCAST |
+                                       BE_IF_FLAGS_MULTICAST |
+                                       BE_IF_FLAGS_PASS_L3L4_ERRORS);
+               status = be_cmd_if_create(adapter, cap_flags, en_flags,
+                                         &vf_cfg->if_handle, vf + 1);
                if (status)
                        return status;
        }
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
 
                memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
                memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-       } else {
-               /* Maybe the HW was reset; dev_addr must be re-programmed */
-               memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
        }
 
-       /* For BE3-R VFs, the PF programs the initial MAC address */
-       if (!(BEx_chip(adapter) && be_virtfn(adapter)))
-               be_cmd_pmac_add(adapter, mac, adapter->if_handle,
-                               &adapter->pmac_id[0], 0);
        return 0;
 }
 
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
 static int be_setup(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
+       u32 en_flags;
        int status;
 
        status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       status = be_if_create(adapter, &adapter->if_handle,
-                             be_if_cap_flags(adapter), 0);
+       /* will enable all the needed filter flags in be_open() */
+       en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
+       en_flags = en_flags & be_if_cap_flags(adapter);
+       status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+                                 &adapter->if_handle, 0);
        if (status)
                goto err;
 
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
                dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
        }
 
-       if (adapter->vlans_added)
-               be_vid_config(adapter);
-
-       be_set_rx_mode(adapter->netdev);
-
        status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                         adapter->rx_fc);
        if (status)
@@ -5121,7 +5174,7 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
        struct device *dev = &adapter->pdev->dev;
        int status;
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
@@ -5168,7 +5221,7 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
 {
        struct be_adapter *adapter = netdev_priv(netdev);
 
-       if (lancer_chip(adapter) || BEx_chip(adapter))
+       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
                return;
 
        if (adapter->vxlan_port != port)
index 32e3807c650ea7256b09f0c9e406a8219cb2bb78..271bb5862346ede352473f0837fb76e0a5ac7163 100644 (file)
@@ -3433,6 +3433,7 @@ fec_probe(struct platform_device *pdev)
 
        pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
        pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
index 56316db6c5a674fd1d17ef20e8db3747950cf71e..cf8e54652df95266e24a5d6d64ed67c00336f67b 100644 (file)
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        frag = skb_shinfo(skb)->frags;
        while (nr_frags) {
                CBDC_SC(bdp,
-                       BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+                       BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
+                       BD_ENET_TX_TC);
                CBDS_SC(bdp, BD_ENET_TX_READY);
 
                if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
index b34214e2df5f6e55bdbb29e6a8016c139e74345c..016743e355de31984d57904e802d69e6703ea5e4 100644 (file)
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
 }
 
 #define FEC_NAPI_RX_EVENT_MSK  (FEC_ENET_RXF | FEC_ENET_RXB)
-#define FEC_NAPI_TX_EVENT_MSK  (FEC_ENET_TXF | FEC_ENET_TXB)
+#define FEC_NAPI_TX_EVENT_MSK  (FEC_ENET_TXF)
 #define FEC_RX_EVENT           (FEC_ENET_RXF)
 #define FEC_TX_EVENT           (FEC_ENET_TXF)
 #define FEC_ERR_EVENT_MSK      (FEC_ENET_HBERR | FEC_ENET_BABR | \
index 2b7610f341b09f4ff293f9916235030487fb2a80..10b3bbbbac8e10e89c47b2d50bb661cc80ad616b 100644 (file)
@@ -2102,6 +2102,11 @@ int startup_gfar(struct net_device *ndev)
        /* Start Rx/Tx DMA and enable the interrupts */
        gfar_start(priv);
 
+       /* force link state update after mac reset */
+       priv->oldlink = 0;
+       priv->oldspeed = 0;
+       priv->oldduplex = -1;
+
        phy_start(priv->phydev);
 
        enable_napi(priv);
index 3c0a8f825b630148c29b4cda6ca46b9797a668be..5b90fcf96265aec4a6daa1f28135ef2fa06a0752 100644 (file)
@@ -900,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
        return 0;
 }
 
-static int gfar_comp_asc(const void *a, const void *b)
-{
-       return memcmp(a, b, 4);
-}
-
-static int gfar_comp_desc(const void *a, const void *b)
-{
-       return -memcmp(a, b, 4);
-}
-
-static void gfar_swap(void *a, void *b, int size)
-{
-       u32 *_a = a;
-       u32 *_b = b;
-
-       swap(_a[0], _b[0]);
-       swap(_a[1], _b[1]);
-       swap(_a[2], _b[2]);
-       swap(_a[3], _b[3]);
-}
-
 /* Write a mask to filer cache */
 static void gfar_set_mask(u32 mask, struct filer_table *tab)
 {
@@ -1270,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
        return 0;
 }
 
-/* Copy size filer entries */
-static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
-                                   struct gfar_filer_entry src[0], s32 size)
-{
-       while (size > 0) {
-               size--;
-               dst[size].ctrl = src[size].ctrl;
-               dst[size].prop = src[size].prop;
-       }
-}
-
-/* Delete the contents of the filer-table between start and end
- * and collapse them
- */
-static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
-{
-       int length;
-
-       if (end > MAX_FILER_CACHE_IDX || end < begin)
-               return -EINVAL;
-
-       end++;
-       length = end - begin;
-
-       /* Copy */
-       while (end < tab->index) {
-               tab->fe[begin].ctrl = tab->fe[end].ctrl;
-               tab->fe[begin++].prop = tab->fe[end++].prop;
-
-       }
-       /* Fill up with don't cares */
-       while (begin < tab->index) {
-               tab->fe[begin].ctrl = 0x60;
-               tab->fe[begin].prop = 0xFFFFFFFF;
-               begin++;
-       }
-
-       tab->index -= length;
-       return 0;
-}
-
-/* Make space on the wanted location */
-static int gfar_expand_filer_entries(u32 begin, u32 length,
-                                    struct filer_table *tab)
-{
-       if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
-           begin > MAX_FILER_CACHE_IDX)
-               return -EINVAL;
-
-       gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
-                               tab->index - length + 1);
-
-       tab->index += length;
-       return 0;
-}
-
-static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
-{
-       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-            start++) {
-               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-                   (RQFCR_AND | RQFCR_CLE))
-                       return start;
-       }
-       return -1;
-}
-
-static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
-{
-       for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
-            start++) {
-               if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
-                   (RQFCR_CLE))
-                       return start;
-       }
-       return -1;
-}
-
-/* Uses hardwares clustering option to reduce
- * the number of filer table entries
- */
-static void gfar_cluster_filer(struct filer_table *tab)
-{
-       s32 i = -1, j, iend, jend;
-
-       while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
-               j = i;
-               while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
-                       /* The cluster entries self and the previous one
-                        * (a mask) must be identical!
-                        */
-                       if (tab->fe[i].ctrl != tab->fe[j].ctrl)
-                               break;
-                       if (tab->fe[i].prop != tab->fe[j].prop)
-                               break;
-                       if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
-                               break;
-                       if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
-                               break;
-                       iend = gfar_get_next_cluster_end(i, tab);
-                       jend = gfar_get_next_cluster_end(j, tab);
-                       if (jend == -1 || iend == -1)
-                               break;
-
-                       /* First we make some free space, where our cluster
-                        * element should be. Then we copy it there and finally
-                        * delete in from its old location.
-                        */
-                       if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
-                           -EINVAL)
-                               break;
-
-                       gfar_copy_filer_entries(&(tab->fe[iend + 1]),
-                                               &(tab->fe[jend + 1]), jend - j);
-
-                       if (gfar_trim_filer_entries(jend - 1,
-                                                   jend + (jend - j),
-                                                   tab) == -EINVAL)
-                               return;
-
-                       /* Mask out cluster bit */
-                       tab->fe[iend].ctrl &= ~(RQFCR_CLE);
-               }
-       }
-}
-
-/* Swaps the masked bits of a1<>a2 and b1<>b2 */
-static void gfar_swap_bits(struct gfar_filer_entry *a1,
-                          struct gfar_filer_entry *a2,
-                          struct gfar_filer_entry *b1,
-                          struct gfar_filer_entry *b2, u32 mask)
-{
-       u32 temp[4];
-       temp[0] = a1->ctrl & mask;
-       temp[1] = a2->ctrl & mask;
-       temp[2] = b1->ctrl & mask;
-       temp[3] = b2->ctrl & mask;
-
-       a1->ctrl &= ~mask;
-       a2->ctrl &= ~mask;
-       b1->ctrl &= ~mask;
-       b2->ctrl &= ~mask;
-
-       a1->ctrl |= temp[1];
-       a2->ctrl |= temp[0];
-       b1->ctrl |= temp[3];
-       b2->ctrl |= temp[2];
-}
-
-/* Generate a list consisting of masks values with their start and
- * end of validity and block as indicator for parts belonging
- * together (glued by ANDs) in mask_table
- */
-static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
-                                   struct filer_table *tab)
-{
-       u32 i, and_index = 0, block_index = 1;
-
-       for (i = 0; i < tab->index; i++) {
-
-               /* LSByte of control = 0 sets a mask */
-               if (!(tab->fe[i].ctrl & 0xF)) {
-                       mask_table[and_index].mask = tab->fe[i].prop;
-                       mask_table[and_index].start = i;
-                       mask_table[and_index].block = block_index;
-                       if (and_index >= 1)
-                               mask_table[and_index - 1].end = i - 1;
-                       and_index++;
-               }
-               /* cluster starts and ends will be separated because they should
-                * hold their position
-                */
-               if (tab->fe[i].ctrl & RQFCR_CLE)
-                       block_index++;
-               /* A not set AND indicates the end of a depended block */
-               if (!(tab->fe[i].ctrl & RQFCR_AND))
-                       block_index++;
-       }
-
-       mask_table[and_index - 1].end = i - 1;
-
-       return and_index;
-}
-
-/* Sorts the entries of mask_table by the values of the masks.
- * Important: The 0xFF80 flags of the first and last entry of a
- * block must hold their position (which queue, CLusterEnable, ReJEct,
- * AND)
- */
-static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
-                                struct filer_table *temp_table, u32 and_index)
-{
-       /* Pointer to compare function (_asc or _desc) */
-       int (*gfar_comp)(const void *, const void *);
-
-       u32 i, size = 0, start = 0, prev = 1;
-       u32 old_first, old_last, new_first, new_last;
-
-       gfar_comp = &gfar_comp_desc;
-
-       for (i = 0; i < and_index; i++) {
-               if (prev != mask_table[i].block) {
-                       old_first = mask_table[start].start + 1;
-                       old_last = mask_table[i - 1].end;
-                       sort(mask_table + start, size,
-                            sizeof(struct gfar_mask_entry),
-                            gfar_comp, &gfar_swap);
-
-                       /* Toggle order for every block. This makes the
-                        * thing more efficient!
-                        */
-                       if (gfar_comp == gfar_comp_desc)
-                               gfar_comp = &gfar_comp_asc;
-                       else
-                               gfar_comp = &gfar_comp_desc;
-
-                       new_first = mask_table[start].start + 1;
-                       new_last = mask_table[i - 1].end;
-
-                       gfar_swap_bits(&temp_table->fe[new_first],
-                                      &temp_table->fe[old_first],
-                                      &temp_table->fe[new_last],
-                                      &temp_table->fe[old_last],
-                                      RQFCR_QUEUE | RQFCR_CLE |
-                                      RQFCR_RJE | RQFCR_AND);
-
-                       start = i;
-                       size = 0;
-               }
-               size++;
-               prev = mask_table[i].block;
-       }
-}
-
-/* Reduces the number of masks needed in the filer table to save entries
- * This is done by sorting the masks of a depended block. A depended block is
- * identified by gluing ANDs or CLE. The sorting order toggles after every
- * block. Of course entries in scope of a mask must change their location with
- * it.
- */
-static int gfar_optimize_filer_masks(struct filer_table *tab)
-{
-       struct filer_table *temp_table;
-       struct gfar_mask_entry *mask_table;
-
-       u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
-       s32 ret = 0;
-
-       /* We need a copy of the filer table because
-        * we want to change its order
-        */
-       temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
-       if (temp_table == NULL)
-               return -ENOMEM;
-
-       mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
-                            sizeof(struct gfar_mask_entry), GFP_KERNEL);
-
-       if (mask_table == NULL) {
-               ret = -ENOMEM;
-               goto end;
-       }
-
-       and_index = gfar_generate_mask_table(mask_table, tab);
-
-       gfar_sort_mask_table(mask_table, temp_table, and_index);
-
-       /* Now we can copy the data from our duplicated filer table to
-        * the real one in the order the mask table says
-        */
-       for (i = 0; i < and_index; i++) {
-               size = mask_table[i].end - mask_table[i].start + 1;
-               gfar_copy_filer_entries(&(tab->fe[j]),
-                               &(temp_table->fe[mask_table[i].start]), size);
-               j += size;
-       }
-
-       /* And finally we just have to check for duplicated masks and drop the
-        * second ones
-        */
-       for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-               if (tab->fe[i].ctrl == 0x80) {
-                       previous_mask = i++;
-                       break;
-               }
-       }
-       for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
-               if (tab->fe[i].ctrl == 0x80) {
-                       if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
-                               /* Two identical ones found!
-                                * So drop the second one!
-                                */
-                               gfar_trim_filer_entries(i, i, tab);
-                       } else
-                               /* Not identical! */
-                               previous_mask = i;
-               }
-       }
-
-       kfree(mask_table);
-end:   kfree(temp_table);
-       return ret;
-}
-
 /* Write the bit-pattern from software's buffer to hardware registers */
 static int gfar_write_filer_table(struct gfar_private *priv,
                                  struct filer_table *tab)
@@ -1583,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
                return -EBUSY;
 
        /* Fill regular entries */
-       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
-            i++)
+       for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
        /* Fill the rest with fall-troughs */
-       for (; i < MAX_FILER_IDX - 1; i++)
+       for (; i < MAX_FILER_IDX; i++)
                gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
        /* Last entry must be default accept
         * because that's what people expect
@@ -1621,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
 {
        struct ethtool_flow_spec_container *j;
        struct filer_table *tab;
-       s32 i = 0;
        s32 ret = 0;
 
        /* So index is set to zero, too! */
@@ -1646,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
                }
        }
 
-       i = tab->index;
-
-       /* Optimizations to save entries */
-       gfar_cluster_filer(tab);
-       gfar_optimize_filer_masks(tab);
-
-       pr_debug("\tSummary:\n"
-                "\tData on hardware: %d\n"
-                "\tCompression rate: %d%%\n",
-                tab->index, 100 - (100 * tab->index) / i);
-
        /* Write everything to hardware */
        ret = gfar_write_filer_table(priv, tab);
        if (ret == -EBUSY) {
@@ -1722,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
        }
 
 process:
+       priv->rx_list.count++;
        ret = gfar_process_filer_changes(priv);
        if (ret)
                goto clean_list;
-       priv->rx_list.count++;
        return ret;
 
 clean_list:
+       priv->rx_list.count--;
        list_del(&temp->list);
 clean_mem:
        kfree(temp);
index 982fdcdc795b4752a2fbe65e71ed76ee197ad5de..b5b2925103ec10c1e835429c2ab7d2efee093838 100644 (file)
@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
 
 static inline bool fm10k_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
index 2f70a9b152bd1789349d9c4d995852e95be1e70d..830466c49987cfbf1ad25f6d9bc01e4f7f0d0454 100644 (file)
@@ -6566,7 +6566,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
 
 static inline bool igb_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
index 9aa6104e34ea8e2bd93994e4d8291763014162b8..ae21e0b06c3ad40a54093c8966fcfa711f188cfb 100644 (file)
@@ -1832,7 +1832,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
 
 static inline bool ixgbe_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 /**
index e71cdde9cb017aecab834d2f2d9c5d4821c3d42e..1d7b00b038a2ea8c3bdd9394ad3d4988ccee04c8 100644 (file)
@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
 
 static inline bool ixgbevf_page_is_reserved(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
+       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
 /**
index 3e8b1bfb1f2e316212bd9b60fa06522ca4dc68db..d9884fd15b453e2486177d58b7fc40bcd5aaf7cc 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/of_address.h>
 #include <linux/phy.h>
 #include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 
 /* Coalescing */
 #define MVPP2_TXDONE_COAL_PKTS_THRESH  15
+#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
 #define MVPP2_RX_COAL_PKTS             32
 #define MVPP2_RX_COAL_USEC             100
 
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
        u64     tx_bytes;
 };
 
+/* Per-CPU port control */
+struct mvpp2_port_pcpu {
+       struct hrtimer tx_done_timer;
+       bool timer_scheduled;
+       /* Tasklet for egress finalization */
+       struct tasklet_struct tx_done_tasklet;
+};
+
 struct mvpp2_port {
        u8 id;
 
@@ -679,6 +690,9 @@ struct mvpp2_port {
        u32 pending_cause_rx;
        struct napi_struct napi;
 
+       /* Per-CPU port control */
+       struct mvpp2_port_pcpu __percpu *pcpu;
+
        /* Flags */
        unsigned long flags;
 
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
        /* Array of transmitted skb */
        struct sk_buff **tx_skb;
 
+       /* Array of transmitted buffers' physical addresses */
+       dma_addr_t *tx_buffs;
+
        /* Index of last TX DMA descriptor that was inserted */
        int txq_put_index;
 
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
        /* Occupied buffers indicator */
        atomic_t in_use;
        int in_use_thresh;
-
-       spinlock_t lock;
 };
 
 struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
 }
 
 static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
-                             struct sk_buff *skb)
+                             struct sk_buff *skb,
+                             struct mvpp2_tx_desc *tx_desc)
 {
        txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+       if (skb)
+               txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
+                                                        tx_desc->buf_phys_addr;
        txq_pcpu->txq_put_index++;
        if (txq_pcpu->txq_put_index == txq_pcpu->size)
                txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
        bm_pool->pkt_size = 0;
        bm_pool->buf_num = 0;
        atomic_set(&bm_pool->in_use, 0);
-       spin_lock_init(&bm_pool->lock);
 
        return 0;
 }
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                  int pkt_size)
 {
-       unsigned long flags = 0;
        struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
        int num;
 
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                return NULL;
        }
 
-       spin_lock_irqsave(&new_pool->lock, flags);
-
        if (new_pool->type == MVPP2_BM_FREE)
                new_pool->type = type;
 
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
                if (num != pkts_num) {
                        WARN(1, "pool %d: %d of %d allocated\n",
                             new_pool->id, num, pkts_num);
-                       /* We need to undo the bufs_add() allocations */
-                       spin_unlock_irqrestore(&new_pool->lock, flags);
                        return NULL;
                }
        }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
        mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
                                  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
 
-       spin_unlock_irqrestore(&new_pool->lock, flags);
-
        return new_pool;
 }
 
 /* Initialize pools for swf */
 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
 {
-       unsigned long flags = 0;
        int rxq;
 
        if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
                if (!port->pool_long)
                        return -ENOMEM;
 
-               spin_lock_irqsave(&port->pool_long->lock, flags);
                port->pool_long->port_map |= (1 << port->id);
-               spin_unlock_irqrestore(&port->pool_long->lock, flags);
 
                for (rxq = 0; rxq < rxq_number; rxq++)
                        mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
                if (!port->pool_short)
                        return -ENOMEM;
 
-               spin_lock_irqsave(&port->pool_short->lock, flags);
                port->pool_short->port_map |= (1 << port->id);
-               spin_unlock_irqrestore(&port->pool_short->lock, flags);
 
                for (rxq = 0; rxq < rxq_number; rxq++)
                        mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
 
        mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
                    (MVPP2_CAUSE_MISC_SUM_MASK |
-                    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
                     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
 }
 
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
        rxq->time_coal = usec;
 }
 
-/* Set threshold for TX_DONE pkts coalescing */
-static void mvpp2_tx_done_pkts_coal_set(void *arg)
-{
-       struct mvpp2_port *port = arg;
-       int queue;
-       u32 val;
-
-       for (queue = 0; queue < txq_number; queue++) {
-               struct mvpp2_tx_queue *txq = port->txqs[queue];
-
-               val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
-                      MVPP2_TRANSMITTED_THRESH_MASK;
-               mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
-               mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
-       }
-}
-
 /* Free Tx queue skbuffs */
 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                                struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
        int i;
 
        for (i = 0; i < num; i++) {
-               struct mvpp2_tx_desc *tx_desc = txq->descs +
-                                                       txq_pcpu->txq_get_index;
+               dma_addr_t buf_phys_addr =
+                                   txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
                struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
 
                mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                if (!skb)
                        continue;
 
-               dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
-                                tx_desc->data_size, DMA_TO_DEVICE);
+               dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
+                                skb_headlen(skb), DMA_TO_DEVICE);
                dev_kfree_skb_any(skb);
        }
 }
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
                                                        u32 cause)
 {
-       int queue = fls(cause >> 16) - 1;
+       int queue = fls(cause) - 1;
 
        return port->txqs[queue];
 }
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
                        netif_tx_wake_queue(nq);
 }
 
+static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
+{
+       struct mvpp2_tx_queue *txq;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       unsigned int tx_todo = 0;
+
+       while (cause) {
+               txq = mvpp2_get_tx_queue(port, cause);
+               if (!txq)
+                       break;
+
+               txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+               if (txq_pcpu->count) {
+                       mvpp2_txq_done(port, txq, txq_pcpu);
+                       tx_todo += txq_pcpu->count;
+               }
+
+               cause &= ~(1 << txq->log_id);
+       }
+       return tx_todo;
+}
+
 /* Rx/Tx queue initialization/cleanup methods */
 
 /* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
                txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
                                           sizeof(*txq_pcpu->tx_skb),
                                           GFP_KERNEL);
-               if (!txq_pcpu->tx_skb) {
-                       dma_free_coherent(port->dev->dev.parent,
-                                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
-                                         txq->descs, txq->descs_phys);
-                       return -ENOMEM;
-               }
+               if (!txq_pcpu->tx_skb)
+                       goto error;
+
+               txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
+                                            sizeof(dma_addr_t), GFP_KERNEL);
+               if (!txq_pcpu->tx_buffs)
+                       goto error;
 
                txq_pcpu->count = 0;
                txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
        }
 
        return 0;
+
+error:
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+               kfree(txq_pcpu->tx_skb);
+               kfree(txq_pcpu->tx_buffs);
+       }
+
+       dma_free_coherent(port->dev->dev.parent,
+                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                         txq->descs, txq->descs_phys);
+
+       return -ENOMEM;
 }
 
 /* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
        for_each_present_cpu(cpu) {
                txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
                kfree(txq_pcpu->tx_skb);
+               kfree(txq_pcpu->tx_buffs);
        }
 
        if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
                        goto err_cleanup;
        }
 
-       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
        on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
        return 0;
 
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
        }
 }
 
+static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+{
+       ktime_t interval;
+
+       if (!port_pcpu->timer_scheduled) {
+               port_pcpu->timer_scheduled = true;
+               interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+               hrtimer_start(&port_pcpu->tx_done_timer, interval,
+                             HRTIMER_MODE_REL_PINNED);
+       }
+}
+
+static void mvpp2_tx_proc_cb(unsigned long data)
+{
+       struct net_device *dev = (struct net_device *)data;
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+       unsigned int tx_todo, cause;
+
+       if (!netif_running(dev))
+               return;
+       port_pcpu->timer_scheduled = false;
+
+       /* Process all the Tx queues */
+       cause = (1 << txq_number) - 1;
+       tx_todo = mvpp2_tx_done(port, cause);
+
+       /* Set the timer in case not all the packets were processed */
+       if (tx_todo)
+               mvpp2_timer_set(port_pcpu);
+}
+
+static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+{
+       struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+                                                        struct mvpp2_port_pcpu,
+                                                        tx_done_timer);
+
+       tasklet_schedule(&port_pcpu->tx_done_tasklet);
+
+       return HRTIMER_NORESTART;
+}
+
 /* Main RX/TX processing routines */
 
 /* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
                if (i == (skb_shinfo(skb)->nr_frags - 1)) {
                        /* Last descriptor */
                        tx_desc->command = MVPP2_TXD_L_DESC;
-                       mvpp2_txq_inc_put(txq_pcpu, skb);
+                       mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
                } else {
                        /* Descriptor in the middle: Not First, Not Last */
                        tx_desc->command = 0;
-                       mvpp2_txq_inc_put(txq_pcpu, NULL);
+                       mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
                }
        }
 
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
                /* First and Last descriptor */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
                tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, skb);
+               mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
        } else {
                /* First but not Last */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
                tx_desc->command = tx_cmd;
-               mvpp2_txq_inc_put(txq_pcpu, NULL);
+               mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
 
                /* Continue with other skb fragments */
                if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
                dev_kfree_skb_any(skb);
        }
 
+       /* Finalize TX processing */
+       if (txq_pcpu->count >= txq->done_pkts_coal)
+               mvpp2_txq_done(port, txq, txq_pcpu);
+
+       /* Set the timer in case not all frags were processed */
+       if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
+               struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+               mvpp2_timer_set(port_pcpu);
+       }
+
        return NETDEV_TX_OK;
 }
 
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
                netdev_err(dev, "tx fifo underrun error\n");
 }
 
-static void mvpp2_txq_done_percpu(void *arg)
+static int mvpp2_poll(struct napi_struct *napi, int budget)
 {
-       struct mvpp2_port *port = arg;
-       u32 cause_rx_tx, cause_tx, cause_misc;
+       u32 cause_rx_tx, cause_rx, cause_misc;
+       int rx_done = 0;
+       struct mvpp2_port *port = netdev_priv(napi->dev);
 
        /* Rx/Tx cause register
         *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
         */
        cause_rx_tx = mvpp2_read(port->priv,
                                 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
-       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+       cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
        cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
 
        if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
                            cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
        }
 
-       /* Release TX descriptors */
-       if (cause_tx) {
-               struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
-               struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
-
-               if (txq_pcpu->count)
-                       mvpp2_txq_done(port, txq, txq_pcpu);
-       }
-}
-
-static int mvpp2_poll(struct napi_struct *napi, int budget)
-{
-       u32 cause_rx_tx, cause_rx;
-       int rx_done = 0;
-       struct mvpp2_port *port = netdev_priv(napi->dev);
-
-       on_each_cpu(mvpp2_txq_done_percpu, port, 1);
-
-       cause_rx_tx = mvpp2_read(port->priv,
-                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
        cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
 
        /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
 static int mvpp2_stop(struct net_device *dev)
 {
        struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port_pcpu *port_pcpu;
+       int cpu;
 
        mvpp2_stop_dev(port);
        mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
        on_each_cpu(mvpp2_interrupts_mask, port, 1);
 
        free_irq(port->irq, port);
+       for_each_present_cpu(cpu) {
+               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+               hrtimer_cancel(&port_pcpu->tx_done_timer);
+               port_pcpu->timer_scheduled = false;
+               tasklet_kill(&port_pcpu->tx_done_tasklet);
+       }
        mvpp2_cleanup_rxqs(port);
        mvpp2_cleanup_txqs(port);
 
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
                txq->done_pkts_coal = c->tx_max_coalesced_frames;
        }
 
-       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
        return 0;
 }
 
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
 {
        struct device_node *phy_node;
        struct mvpp2_port *port;
+       struct mvpp2_port_pcpu *port_pcpu;
        struct net_device *dev;
        struct resource *res;
        const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        int features;
        int phy_mode;
        int priv_common_regs_num = 2;
-       int err, i;
+       int err, i, cpu;
 
        dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
                                 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        }
        mvpp2_port_power_up(port);
 
+       port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
+       if (!port->pcpu) {
+               err = -ENOMEM;
+               goto err_free_txq_pcpu;
+       }
+
+       for_each_present_cpu(cpu) {
+               port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+               hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+                            HRTIMER_MODE_REL_PINNED);
+               port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+               port_pcpu->timer_scheduled = false;
+
+               tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
+                            (unsigned long)dev);
+       }
+
        netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
        features = NETIF_F_SG | NETIF_F_IP_CSUM;
        dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        err = register_netdev(dev);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register netdev\n");
-               goto err_free_txq_pcpu;
+               goto err_free_port_pcpu;
        }
        netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
 
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        priv->port_list[id] = port;
        return 0;
 
+err_free_port_pcpu:
+       free_percpu(port->pcpu);
 err_free_txq_pcpu:
        for (i = 0; i < txq_number; i++)
                free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
        int i;
 
        unregister_netdev(port->dev);
+       free_percpu(port->pcpu);
        free_percpu(port->stats);
        for (i = 0; i < txq_number; i++)
                free_percpu(port->txqs[i]->pcpu);
index afad529838de748efc9f9253c6fde42abbe954a3..06e3e1e54c35d6078321d792d1e0e537ec95a207 100644 (file)
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        /* disable cmdif checksum */
        MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
+       MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
        err = set_caps(dev, set_ctx, set_sz);
 
 query_ex:
index f78909a00f150edfd76065b2b993d9660edaebdd..09d2e16fd6b00bfdd0c20fc10c64abd03c29a935 100644 (file)
@@ -952,9 +952,8 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
 
        sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev,
                tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE);
-       err = dma_mapping_error(adapter->dev,
-               sg_dma_address(&tx_ctl->sg));
-       if (err) {
+       if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) {
+               err = -ENOMEM;
                sg_dma_address(&tx_ctl->sg) = 0;
                goto err;
        }
index 3df51faf18ae3ba8ce6bb7f49e6f51e4da1be738..f790f61ea78a2b4f1008da82eca29132ff5bdcc0 100644 (file)
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_46:
        case RTL_GIGA_MAC_VER_47:
        case RTL_GIGA_MAC_VER_48:
+               RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               break;
        case RTL_GIGA_MAC_VER_49:
        case RTL_GIGA_MAC_VER_50:
        case RTL_GIGA_MAC_VER_51:
-               RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
                break;
        default:
                RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
index 2d8578cade03790782af7e97a1f59f9848301ae6..2e7f9a2834be320eef4a7bed44d8c787f72f3e65 100644 (file)
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
                rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
                                   ROCKER_OP_FLAG_REMOVE);
                unregister_netdev(rocker_port->dev);
+               free_netdev(rocker_port->dev);
        }
        kfree(rocker->ports);
 }
index 7e3129e7f143a9990c89780a8e6638f8182e4892..f0e4bb4e3ec59f9695957dee9dec55a02d450da8 100644 (file)
@@ -42,7 +42,7 @@
 #define NSS_COMMON_CLK_DIV_MASK                        0x7f
 
 #define NSS_COMMON_CLK_SRC_CTRL                        0x14
-#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (1 << x)
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x)      (x)
 /* Mode is coded on 1 bit but is different depending on the MAC ID:
  * MAC0: QSGMII=0 RGMII=1
  * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
 
        /* Configure the clock src according to the mode */
        regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
-       val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+       val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
        switch (gmac->phy_mode) {
        case PHY_INTERFACE_MODE_RGMII:
                val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
index a8a730641bbb14e723f841b2b3c13454b53a5491..bb1bb72121c0474b8c1898540a28d21264479d6a 100644 (file)
@@ -85,7 +85,6 @@ struct netcp_intf {
        struct list_head        rxhook_list_head;
        unsigned int            rx_queue_id;
        void                    *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
-       u32                     rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
        struct napi_struct      rx_napi;
        struct napi_struct      tx_napi;
 
index 9749dfd78c434992f4041effc3c9a7314bdace3d..4755838c6137b462d6de262b38beadacdf099e98 100644 (file)
@@ -34,6 +34,7 @@
 #define NETCP_SOP_OFFSET       (NET_IP_ALIGN + NET_SKB_PAD)
 #define NETCP_NAPI_WEIGHT      64
 #define NETCP_TX_TIMEOUT       (5 * HZ)
+#define NETCP_PACKET_SIZE      (ETH_FRAME_LEN + ETH_FCS_LEN)
 #define NETCP_MIN_PACKET_SIZE  ETH_ZLEN
 #define NETCP_MAX_MCAST_ADDR   16
 
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
        if (likely(fdq == 0)) {
                unsigned int primary_buf_len;
                /* Allocate a primary receive queue entry */
-               buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET;
+               buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
                primary_buf_len = SKB_DATA_ALIGN(buf_len) +
                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
-               if (primary_buf_len <= PAGE_SIZE) {
-                       bufptr = netdev_alloc_frag(primary_buf_len);
-                       pad[1] = primary_buf_len;
-               } else {
-                       bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
-                                        GFP_DMA32 | __GFP_COLD);
-                       pad[1] = 0;
-               }
+               bufptr = netdev_alloc_frag(primary_buf_len);
+               pad[1] = primary_buf_len;
 
                if (unlikely(!bufptr)) {
-                       dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n");
+                       dev_warn_ratelimited(netcp->ndev_dev,
+                                            "Primary RX buffer alloc failed\n");
                        goto fail;
                }
                dma = dma_map_single(netcp->dev, bufptr, buf_len,
                                     DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(netcp->dev, dma)))
+                       goto fail;
+
                pad[0] = (u32)bufptr;
 
        } else {
                /* Allocate a secondary receive queue entry */
-               page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD);
+               page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
                if (unlikely(!page)) {
                        dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
                        goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
 
        /* Map the linear buffer */
        dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
-       if (unlikely(!dma_addr)) {
+       if (unlikely(dma_mapping_error(dev, dma_addr))) {
                dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
                return NULL;
        }
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
        knav_queue_disable_notify(netcp->rx_queue);
 
        /* open Rx FDQs */
-       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
-            netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) {
+       for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+            ++i) {
                snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
                netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
                if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                netcp->rx_queue_depths[0] = 128;
        }
 
-       ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
-                                        netcp->rx_buffer_sizes,
-                                        KNAV_DMA_FDQ_PER_CHAN);
-       if (ret) {
-               dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
-               netcp->rx_buffer_sizes[0] = 1536;
-       }
-
        ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
        if (ret < 0) {
                dev_err(dev, "missing \"rx-pool\" parameter\n");
index 2ffbf13471d09ad4c27d8c70fbb4dd3145befa75..216bfd350169a9da723035876d152e89b17c8432 100644 (file)
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
        dev->type = ARPHRD_AX25;
 
        /* Perform the low-level AX25 initialization. */
-       if ((err = ax_open(ax->dev))) {
+       err = ax_open(ax->dev);
+       if (err)
                goto out_free_netdev;
-       }
 
-       if (register_netdev(dev))
+       err = register_netdev(dev);
+       if (err)
                goto out_free_buffers;
 
        /* after register_netdev() - because else printk smashes the kernel */
index 3cc316cb7e6be792b06dfc2c520eae9809f1008b..d8757bf9ad755ed6a3114d9d0a9664e59618744a 100644 (file)
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
 
        netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
 
+       if (len < 0) {
+               ndev->stats.rx_errors++;
+               ndev->stats.rx_length_errors++;
+               goto enqueue_again;
+       }
+
        skb_put(skb, len);
        skb->protocol = eth_type_trans(skb, ndev);
        skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
                return;
        }
 
+enqueue_again:
        rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
        if (rc) {
                dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
 
                rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
                                              ndev->mtu + ETH_HLEN);
-               if (rc == -EINVAL) {
+               if (rc) {
                        dev_kfree_skb(skb);
                        goto err;
                }
index b2197b506acbe86f3540d5ae1d8334129c2bbe57..1e1fbb049ec63b79c1d8255c517364f739a8d38b 100644 (file)
@@ -811,6 +811,7 @@ void phy_state_machine(struct work_struct *work)
        bool needs_aneg = false, do_suspend = false;
        enum phy_state old_state;
        int err = 0;
+       int old_link;
 
        mutex_lock(&phydev->lock);
 
@@ -896,11 +897,18 @@ void phy_state_machine(struct work_struct *work)
                phydev->adjust_link(phydev->attached_dev);
                break;
        case PHY_RUNNING:
-               /* Only register a CHANGE if we are
-                * polling or ignoring interrupts
+               /* Only register a CHANGE if we are polling or ignoring
+                * interrupts and link changed since latest checking.
                 */
-               if (!phy_interrupt_is_valid(phydev))
-                       phydev->state = PHY_CHANGELINK;
+               if (!phy_interrupt_is_valid(phydev)) {
+                       old_link = phydev->link;
+                       err = phy_read_status(phydev);
+                       if (err)
+                               break;
+
+                       if (old_link != phydev->link)
+                               phydev->state = PHY_CHANGELINK;
+               }
                break;
        case PHY_CHANGELINK:
                err = phy_read_status(phydev);
index c0f6479e19d48e51fd76c3bbce161ffdd7846842..70b08958763a129fff47ad00a1db130c1334f254 100644 (file)
@@ -91,19 +91,18 @@ static int lan911x_config_init(struct phy_device *phydev)
 }
 
 /*
- * The LAN8710/LAN8720 requires a minimum of 2 link pulses within 64ms of each
- * other in order to set the ENERGYON bit and exit EDPD mode.  If a link partner
- * does send the pulses within this interval, the PHY will remained powered
- * down.
- *
- * This workaround will manually toggle the PHY on/off upon calls to read_status
- * in order to generate link test pulses if the link is down.  If a link partner
- * is present, it will respond to the pulses, which will cause the ENERGYON bit
- * to be set and will cause the EDPD mode to be exited.
+ * The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
+ * plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
+ * unstable detection of plugging in Ethernet cable.
+ * This workaround disables Energy Detect Power-Down mode and waiting for
+ * response on link pulses to detect presence of plugged Ethernet cable.
+ * The Energy Detect Power-Down mode is enabled again in the end of procedure to
+ * save approximately 220 mW of power if cable is unplugged.
  */
 static int lan87xx_read_status(struct phy_device *phydev)
 {
        int err = genphy_read_status(phydev);
+       int i;
 
        if (!phydev->link) {
                /* Disable EDPD to wake up PHY */
@@ -116,8 +115,16 @@ static int lan87xx_read_status(struct phy_device *phydev)
                if (rc < 0)
                        return rc;
 
-               /* Sleep 64 ms to allow ~5 link test pulses to be sent */
-               msleep(64);
+               /* Wait max 640 ms to detect energy */
+               for (i = 0; i < 64; i++) {
+                       /* Sleep to allow link test pulses to be sent */
+                       msleep(10);
+                       rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+                       if (rc < 0)
+                               return rc;
+                       if (rc & MII_LAN83C185_ENERGYON)
+                               break;
+               }
 
                /* Re-enable EDPD */
                rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
@@ -191,7 +198,7 @@ static struct phy_driver smsc_phy_driver[] = {
 
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
+       .read_status    = lan87xx_read_status,
        .config_init    = smsc_phy_config_init,
        .soft_reset     = smsc_phy_reset,
 
index 9d15566521a719b525a28a009f1d999c91a00da2..fa8f5046afe90627242f6d2d523178da653c9427 100644 (file)
@@ -269,9 +269,9 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
 static void ppp_ccp_closed(struct ppp *ppp);
 static struct compressor *find_compressor(int type);
 static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit, int *retp);
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp);
 static void init_ppp_file(struct ppp_file *pf, int kind);
-static void ppp_shutdown_interface(struct ppp *ppp);
 static void ppp_destroy_interface(struct ppp *ppp);
 static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
 static struct channel *ppp_find_channel(struct ppp_net *pn, int unit);
@@ -392,8 +392,10 @@ static int ppp_release(struct inode *unused, struct file *file)
                file->private_data = NULL;
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_dec_and_test(&pf->refcnt)) {
                        switch (pf->kind) {
@@ -593,8 +595,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                mutex_lock(&ppp_mutex);
                if (pf->kind == INTERFACE) {
                        ppp = PF_TO_PPP(pf);
+                       rtnl_lock();
                        if (file == ppp->owner)
-                               ppp_shutdown_interface(ppp);
+                               unregister_netdevice(ppp->dev);
+                       rtnl_unlock();
                }
                if (atomic_long_read(&file->f_count) < 2) {
                        ppp_release(NULL, file);
@@ -838,11 +842,10 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
                /* Create a new ppp unit */
                if (get_user(unit, p))
                        break;
-               ppp = ppp_create_interface(net, unit, &err);
+               ppp = ppp_create_interface(net, unit, file, &err);
                if (!ppp)
                        break;
                file->private_data = &ppp->file;
-               ppp->owner = file;
                err = -EFAULT;
                if (put_user(ppp->file.index, p))
                        break;
@@ -916,6 +919,16 @@ static __net_init int ppp_init_net(struct net *net)
 static __net_exit void ppp_exit_net(struct net *net)
 {
        struct ppp_net *pn = net_generic(net, ppp_net_id);
+       struct ppp *ppp;
+       LIST_HEAD(list);
+       int id;
+
+       rtnl_lock();
+       idr_for_each_entry(&pn->units_idr, ppp, id)
+               unregister_netdevice_queue(ppp->dev, &list);
+
+       unregister_netdevice_many(&list);
+       rtnl_unlock();
 
        idr_destroy(&pn->units_idr);
 }
@@ -1088,8 +1101,28 @@ static int ppp_dev_init(struct net_device *dev)
        return 0;
 }
 
+static void ppp_dev_uninit(struct net_device *dev)
+{
+       struct ppp *ppp = netdev_priv(dev);
+       struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+
+       ppp_lock(ppp);
+       ppp->closing = 1;
+       ppp_unlock(ppp);
+
+       mutex_lock(&pn->all_ppp_mutex);
+       unit_put(&pn->units_idr, ppp->file.index);
+       mutex_unlock(&pn->all_ppp_mutex);
+
+       ppp->owner = NULL;
+
+       ppp->file.dead = 1;
+       wake_up_interruptible(&ppp->file.rwait);
+}
+
 static const struct net_device_ops ppp_netdev_ops = {
        .ndo_init        = ppp_dev_init,
+       .ndo_uninit      = ppp_dev_uninit,
        .ndo_start_xmit  = ppp_start_xmit,
        .ndo_do_ioctl    = ppp_net_ioctl,
        .ndo_get_stats64 = ppp_get_stats64,
@@ -2667,8 +2700,8 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
  * or if there is already a unit with the requested number.
  * unit == -1 means allocate a new number.
  */
-static struct ppp *
-ppp_create_interface(struct net *net, int unit, int *retp)
+static struct ppp *ppp_create_interface(struct net *net, int unit,
+                                       struct file *file, int *retp)
 {
        struct ppp *ppp;
        struct ppp_net *pn;
@@ -2688,6 +2721,7 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        ppp->mru = PPP_MRU;
        init_ppp_file(&ppp->file, INTERFACE);
        ppp->file.hdrlen = PPP_HDRLEN - 2;      /* don't count proto bytes */
+       ppp->owner = file;
        for (i = 0; i < NUM_NP; ++i)
                ppp->npmode[i] = NPMODE_PASS;
        INIT_LIST_HEAD(&ppp->channels);
@@ -2775,34 +2809,6 @@ init_ppp_file(struct ppp_file *pf, int kind)
        init_waitqueue_head(&pf->rwait);
 }
 
-/*
- * Take down a ppp interface unit - called when the owning file
- * (the one that created the unit) is closed or detached.
- */
-static void ppp_shutdown_interface(struct ppp *ppp)
-{
-       struct ppp_net *pn;
-
-       pn = ppp_pernet(ppp->ppp_net);
-       mutex_lock(&pn->all_ppp_mutex);
-
-       /* This will call dev_close() for us. */
-       ppp_lock(ppp);
-       if (!ppp->closing) {
-               ppp->closing = 1;
-               ppp_unlock(ppp);
-               unregister_netdev(ppp->dev);
-               unit_put(&pn->units_idr, ppp->file.index);
-       } else
-               ppp_unlock(ppp);
-
-       ppp->file.dead = 1;
-       ppp->owner = NULL;
-       wake_up_interruptible(&ppp->file.rwait);
-
-       mutex_unlock(&pn->all_ppp_mutex);
-}
-
 /*
  * Free the memory used by a ppp unit.  This is only called once
  * there are no channels connected to the unit and no file structs
index 9d43460ce3c71f0b54c69b84fa5a0ec8b7341d5f..64a60afbe50cc4ca0ff12b65ad6331a5fcc5e3a7 100644 (file)
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
        /* 4. Gobi 1000 devices */
index 7fbca37a1adffe5d46d3603e9fd44d4dbd16d331..237f8e5e493ddaae958684e8ed411c6f7f2363d6 100644 (file)
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* Do we support "hardware" checksums? */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
                /* This opens up the world of extra features. */
-               dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+               dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
                if (csum)
-                       dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+                       dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
                        dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
index 7193b7304fdd3ed4b69c0125732d4a024d4a4b36..848ea6a399f236b14cc5d9a79dd38038e0331aec 100644 (file)
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
                chan->netdev->base_addr = chan->cosa->datareg;
                chan->netdev->irq = chan->cosa->irq;
                chan->netdev->dma = chan->cosa->dma;
-               if (register_hdlc_device(chan->netdev)) {
+               err = register_hdlc_device(chan->netdev);
+               if (err) {
                        netdev_warn(chan->netdev,
                                    "register_hdlc_device() failed\n");
                        free_netdev(chan->netdev);
index 25d1cbd34306e03ea4827e09509c345d11b5a1df..b2f0d245bcf3a0e71fb96797c47f71ad0ca736db 100644 (file)
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
                switch (phy->rev) {
                case 6:
                case 5:
-                       if (sprom->fem.ghz5.extpa_gain == 3)
+                       if (sprom->fem.ghz2.extpa_gain == 3)
                                return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
                        /* fall through */
                case 4:
index 5000bfcded617f32ca177ae7a0711f13a3ce65d3..5514ad6d4e54373d2a48564ec790489dfe1808dc 100644 (file)
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
        cmd->scan_priority =
                iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
 
-       if (iwl_mvm_scan_total_iterations(params) == 0)
+       if (iwl_mvm_scan_total_iterations(params) == 1)
                cmd->ooc_priority =
                        iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
        else
index 6203c4ad9bba5d8ce3bb2f46f832c3bb4ebe85fc..9e144e71da0b5980264702a6210684cfa34edab5 100644 (file)
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
                        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
                                          APMG_PCIDEV_STT_VAL_WAKE_ME);
-               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+               else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+                       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
                        iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                                    CSR_HW_IF_CONFIG_REG_PREPARE |
                                    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+                       mdelay(1);
+                       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+               }
                mdelay(5);
        }
 
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
        if (ret >= 0)
                return 0;
 
+       iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                   CSR_RESET_LINK_PWR_MGMT_DISABLED);
+       msleep(1);
+
        for (iter = 0; iter < 10; iter++) {
                /* If HW is not ready, prepare the conditions to check again */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
                do {
                        ret = iwl_pcie_set_hw_ready(trans);
-                       if (ret >= 0)
-                               return 0;
+                       if (ret >= 0) {
+                               ret = 0;
+                               goto out;
+                       }
 
                        usleep_range(200, 1000);
                        t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
 
        IWL_ERR(trans, "Couldn't prepare the card\n");
 
+out:
+       iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+                     CSR_RESET_LINK_PWR_MGMT_DISABLED);
+
        return ret;
 }
 
index 2b86c2135de36f627b397add88628bc47aa37271..607acb53c847558793d47a528d0830f9c79c8cbf 100644 (file)
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        /* start timer if queue currently empty */
        if (q->read_ptr == q->write_ptr) {
-               if (txq->wd_timeout)
-                       mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+               if (txq->wd_timeout) {
+                       /*
+                        * If the TXQ is active, then set the timer, if not,
+                        * set the timer in remainder so that the timer will
+                        * be armed with the right value when the station will
+                        * wake up.
+                        */
+                       if (!txq->frozen)
+                               mod_timer(&txq->stuck_timer,
+                                         jiffies + txq->wd_timeout);
+                       else
+                               txq->frozen_expiry_remainder = txq->wd_timeout;
+               }
                IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
                iwl_trans_pcie_ref(trans);
        }
index b6cc9ff47fc2e59b3cc92fe4002f05e59d981e56..1c6788aecc62658fe2cc7c4961415ef4715c8dde 100644 (file)
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                (struct rsi_91x_sdiodev *)adapter->rsi_dev;
        u32 len;
        u32 num_blocks;
+       const u8 *fw;
        const struct firmware *fw_entry = NULL;
        u32 block_size = dev->tx_blk_size;
        int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
+       fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw)
+               return -ENOMEM;
        len = fw_entry->size;
 
        if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
-       status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
+       status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
        release_firmware(fw_entry);
        return status;
 }
index 1106ce76707e1095fd523c5c541fa3740e9c9496..30c2cf7fa93b0c6015d22236c3a9189b462cb064 100644 (file)
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
                return status;
        }
 
+       /* Copy firmware into DMA-accessible memory */
        fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+       if (!fw)
+               return -ENOMEM;
        len = fw_entry->size;
 
        if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
        rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
 
        status = rsi_copy_to_card(common, fw, len, num_blocks);
+       kfree(fw);
        release_firmware(fw_entry);
        return status;
 }
index 3b3a88b53b119909112a806ee71ab4d4bfa67a79..585d0883c7e58760eed503de64ced513c8331c82 100644 (file)
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+       struct rtl_tcb_desc tcb_desc;
 
-       if (skb)
-               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
+       if (skb) {
+               memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
+               rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
+       }
 }
 
 static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
index 1017f02d7bf7520868b25330493822e08f7826a0..7bf88d9dcdc3fc4732170c121ef78d5a85ca3fa9 100644 (file)
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
 module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
 module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
                   bool, 0444);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
index 1a83e190fc15e4158b5e441cc267ad149d465abe..28577a31549d1569032d63457464fe11fdf44d32 100644 (file)
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
 {
        atomic_dec(&queue->inflight_packets);
+
+       /* Wake the dealloc thread _after_ decrementing inflight_packets so
+        * that if kthread_stop() has already been called, the dealloc thread
+        * does not wait forever with nothing to wake it.
+        */
+       wake_up(&queue->dealloc_wq);
 }
 
 int xenvif_schedulable(struct xenvif *vif)
index 7d50711476fe1e88debca95beb790d770261f036..3f44b522b8311a2c64eba48e6a9b7217ea0cb3a7 100644 (file)
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
                                                        struct sk_buff *skb,
                                                        struct xen_netif_tx_request *txp,
-                                                       struct gnttab_map_grant_ref *gop)
+                                                       struct gnttab_map_grant_ref *gop,
+                                                       unsigned int frag_overflow,
+                                                       struct sk_buff *nskb)
 {
        struct skb_shared_info *shinfo = skb_shinfo(skb);
        skb_frag_t *frags = shinfo->frags;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
        int start;
        pending_ring_idx_t index;
-       unsigned int nr_slots, frag_overflow = 0;
+       unsigned int nr_slots;
 
-       /* At this point shinfo->nr_frags is in fact the number of
-        * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
-        */
-       if (shinfo->nr_frags > MAX_SKB_FRAGS) {
-               frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
-               BUG_ON(frag_overflow > MAX_SKB_FRAGS);
-               shinfo->nr_frags = MAX_SKB_FRAGS;
-       }
        nr_slots = shinfo->nr_frags;
 
        /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
        }
 
        if (frag_overflow) {
-               struct sk_buff *nskb = xenvif_alloc_skb(0);
-               if (unlikely(nskb == NULL)) {
-                       if (net_ratelimit())
-                               netdev_err(queue->vif->dev,
-                                          "Can't allocate the frag_list skb.\n");
-                       return NULL;
-               }
 
                shinfo = skb_shinfo(nskb);
                frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                                     unsigned *copy_ops,
                                     unsigned *map_ops)
 {
-       struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
-       struct sk_buff *skb;
+       struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
+       struct sk_buff *skb, *nskb;
        int ret;
+       unsigned int frag_overflow;
 
        while (skb_queue_len(&queue->tx_queue) < budget) {
                struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        break;
                }
 
+               skb_shinfo(skb)->nr_frags = ret;
+               if (data_len < txreq.size)
+                       skb_shinfo(skb)->nr_frags++;
+               /* At this point shinfo->nr_frags is in fact the number of
+                * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
+                */
+               frag_overflow = 0;
+               nskb = NULL;
+               if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
+                       frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
+                       BUG_ON(frag_overflow > MAX_SKB_FRAGS);
+                       skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
+                       nskb = xenvif_alloc_skb(0);
+                       if (unlikely(nskb == NULL)) {
+                               kfree_skb(skb);
+                               xenvif_tx_err(queue, &txreq, idx);
+                               if (net_ratelimit())
+                                       netdev_err(queue->vif->dev,
+                                                  "Can't allocate the frag_list skb.\n");
+                               break;
+                       }
+               }
+
                if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
                        struct xen_netif_extra_info *gso;
                        gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
                                /* Failure in xenvif_set_skb_gso is fatal. */
                                kfree_skb(skb);
+                               kfree_skb(nskb);
                                break;
                        }
                }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                (*copy_ops)++;
 
-               skb_shinfo(skb)->nr_frags = ret;
                if (data_len < txreq.size) {
-                       skb_shinfo(skb)->nr_frags++;
                        frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
                                             pending_idx);
                        xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
 
                queue->pending_cons++;
 
-               request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
-               if (request_gop == NULL) {
-                       kfree_skb(skb);
-                       xenvif_tx_err(queue, &txreq, idx);
-                       break;
-               }
-               gop = request_gop;
+               gop = xenvif_get_requests(queue, skb, txfrags, gop,
+                                         frag_overflow, nskb);
 
                __skb_queue_tail(&queue->tx_queue, skb);
 
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
                smp_wmb();
                queue->dealloc_prod++;
        } while (ubuf);
-       wake_up(&queue->dealloc_wq);
        spin_unlock_irqrestore(&queue->callback_lock, flags);
 
        if (likely(zerocopy_success))
index 23435f2a5486f806ee8f686fdb898d0815a68013..2e2530743831a19f2ca4db3b313d1f8b2db44ebb 100644 (file)
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
        ntb->dev.bus = &ntb_bus;
        ntb->dev.parent = &ntb->pdev->dev;
        ntb->dev.release = ntb_dev_release;
-       dev_set_name(&ntb->dev, pci_name(ntb->pdev));
+       dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
 
        ntb->ctx = NULL;
        ntb->ctx_ops = NULL;
index efe3ad4122f2ee1094da78c1bb31d86642b5b3a6..1c6386d5f79c742737e4ee1a8a2b99df686ffaa0 100644 (file)
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
 
        void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
                           void *data, int len);
+       struct list_head rx_post_q;
        struct list_head rx_pend_q;
        struct list_head rx_free_q;
-       spinlock_t ntb_rx_pend_q_lock;
-       spinlock_t ntb_rx_free_q_lock;
+       /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
+       spinlock_t ntb_rx_q_lock;
        void *rx_buff;
        unsigned int rx_index;
        unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
        bool link_is_up;
        struct delayed_work link_work;
        struct work_struct link_cleanup;
+
+       struct dentry *debugfs_node_dir;
 };
 
 enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
        char *buf;
        ssize_t ret, out_offset, out_count;
 
+       qp = filp->private_data;
+
+       if (!qp || !qp->link_is_up)
+               return 0;
+
        out_count = 1000;
 
        buf = kmalloc(out_count, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
-       qp = filp->private_data;
        out_offset = 0;
        out_offset += snprintf(buf + out_offset, out_count - out_offset,
                               "NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
        return entry;
 }
 
+static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
+                                          struct list_head *list,
+                                          struct list_head *to_list)
+{
+       struct ntb_queue_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(lock, flags);
+
+       if (list_empty(list)) {
+               entry = NULL;
+       } else {
+               entry = list_first_entry(list, struct ntb_queue_entry, entry);
+               list_move_tail(&entry->entry, to_list);
+       }
+
+       spin_unlock_irqrestore(lock, flags);
+
+       return entry;
+}
+
 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
                                     unsigned int qp_num)
 {
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
 }
 
 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
-                     unsigned int size)
+                     resource_size_t size)
 {
        struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
        struct pci_dev *pdev = nt->ndev->pdev;
-       unsigned int xlat_size, buff_size;
+       size_t xlat_size, buff_size;
        int rc;
 
+       if (!size)
+               return -EINVAL;
+
        xlat_size = round_up(size, mw->xlat_align_size);
        buff_size = round_up(size, mw->xlat_align);
 
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
        if (!mw->virt_addr) {
                mw->xlat_size = 0;
                mw->buff_size = 0;
-               dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
+               dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
                        buff_size);
                return -ENOMEM;
        }
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
 
                if (qp->event_handler)
                        qp->event_handler(qp->cb_data, qp->link_is_up);
+
+               tasklet_schedule(&qp->rxc_db_work);
        } else if (nt->link_is_up)
                schedule_delayed_work(&qp->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        qp->tx_max_frame = min(transport_mtu, tx_size / 2);
        qp->tx_max_entry = tx_size / qp->tx_max_frame;
 
-       if (nt_debugfs_dir) {
+       if (nt->debugfs_node_dir) {
                char debugfs_name[4];
 
                snprintf(debugfs_name, 4, "qp%d", qp_num);
                qp->debugfs_dir = debugfs_create_dir(debugfs_name,
-                                                    nt_debugfs_dir);
+                                                    nt->debugfs_node_dir);
 
                qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
                                                        qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
        INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
        INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
 
-       spin_lock_init(&qp->ntb_rx_pend_q_lock);
-       spin_lock_init(&qp->ntb_rx_free_q_lock);
+       spin_lock_init(&qp->ntb_rx_q_lock);
        spin_lock_init(&qp->ntb_tx_free_q_lock);
 
+       INIT_LIST_HEAD(&qp->rx_post_q);
        INIT_LIST_HEAD(&qp->rx_pend_q);
        INIT_LIST_HEAD(&qp->rx_free_q);
        INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
                goto err2;
        }
 
+       if (nt_debugfs_dir) {
+               nt->debugfs_node_dir =
+                       debugfs_create_dir(pci_name(ndev->pdev),
+                                          nt_debugfs_dir);
+       }
+
        for (i = 0; i < qp_count; i++) {
                rc = ntb_transport_init_queue(nt, i);
                if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
        kfree(nt);
 }
 
-static void ntb_rx_copy_callback(void *data)
+static void ntb_complete_rxc(struct ntb_transport_qp *qp)
 {
-       struct ntb_queue_entry *entry = data;
-       struct ntb_transport_qp *qp = entry->qp;
-       void *cb_data = entry->cb_data;
-       unsigned int len = entry->len;
-       struct ntb_payload_header *hdr = entry->rx_hdr;
+       struct ntb_queue_entry *entry;
+       void *cb_data;
+       unsigned int len;
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+
+       while (!list_empty(&qp->rx_post_q)) {
+               entry = list_first_entry(&qp->rx_post_q,
+                                        struct ntb_queue_entry, entry);
+               if (!(entry->flags & DESC_DONE_FLAG))
+                       break;
+
+               entry->rx_hdr->flags = 0;
+               iowrite32(entry->index, &qp->rx_info->entry);
 
-       hdr->flags = 0;
+               cb_data = entry->cb_data;
+               len = entry->len;
 
-       iowrite32(entry->index, &qp->rx_info->entry);
+               list_move_tail(&entry->entry, &qp->rx_free_q);
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+               spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
 
-       if (qp->rx_handler && qp->client_ready)
-               qp->rx_handler(qp, qp->cb_data, cb_data, len);
+               if (qp->rx_handler && qp->client_ready)
+                       qp->rx_handler(qp, qp->cb_data, cb_data, len);
+
+               spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
+       }
+
+       spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
+}
+
+static void ntb_rx_copy_callback(void *data)
+{
+       struct ntb_queue_entry *entry = data;
+
+       entry->flags |= DESC_DONE_FLAG;
+
+       ntb_complete_rxc(entry->qp);
 }
 
 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
        ntb_rx_copy_callback(entry);
 }
 
-static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
-                        size_t len)
+static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
 {
        struct dma_async_tx_descriptor *txd;
        struct ntb_transport_qp *qp = entry->qp;
        struct dma_chan *chan = qp->dma_chan;
        struct dma_device *device;
-       size_t pay_off, buff_off;
+       size_t pay_off, buff_off, len;
        struct dmaengine_unmap_data *unmap;
        dma_cookie_t cookie;
        void *buf = entry->buf;
 
-       entry->len = len;
+       len = entry->len;
 
        if (!chan)
                goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
        struct ntb_payload_header *hdr;
        struct ntb_queue_entry *entry;
        void *offset;
-       int rc;
 
        offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
        hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
                return -EIO;
        }
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
        if (!entry) {
                dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
                qp->rx_err_no_buf++;
-
-               rc = -ENOMEM;
-               goto err;
+               return -EAGAIN;
        }
 
+       entry->rx_hdr = hdr;
+       entry->index = qp->rx_index;
+
        if (hdr->len > entry->len) {
                dev_dbg(&qp->ndev->pdev->dev,
                        "receive buffer overflow! Wanted %d got %d\n",
                        hdr->len, entry->len);
                qp->rx_err_oflow++;
 
-               rc = -EIO;
-               goto err;
-       }
+               entry->len = -EIO;
+               entry->flags |= DESC_DONE_FLAG;
 
-       dev_dbg(&qp->ndev->pdev->dev,
-               "RX OK index %u ver %u size %d into buf size %d\n",
-               qp->rx_index, hdr->ver, hdr->len, entry->len);
+               ntb_complete_rxc(qp);
+       } else {
+               dev_dbg(&qp->ndev->pdev->dev,
+                       "RX OK index %u ver %u size %d into buf size %d\n",
+                       qp->rx_index, hdr->ver, hdr->len, entry->len);
 
-       qp->rx_bytes += hdr->len;
-       qp->rx_pkts++;
+               qp->rx_bytes += hdr->len;
+               qp->rx_pkts++;
 
-       entry->index = qp->rx_index;
-       entry->rx_hdr = hdr;
+               entry->len = hdr->len;
 
-       ntb_async_rx(entry, offset, hdr->len);
+               ntb_async_rx(entry, offset);
+       }
 
        qp->rx_index++;
        qp->rx_index %= qp->rx_max_entry;
 
        return 0;
-
-err:
-       /* FIXME: if this syncrhonous update of the rx_index gets ahead of
-        * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
-        * scenarios:
-        *
-        * 1) The peer might miss this update, but observe the update
-        * from the memcpy completion callback.  In this case, the buffer will
-        * not be freed on the peer to be reused for a different packet.  The
-        * successful rx of a later packet would clear the condition, but the
-        * condition could persist if several rx fail in a row.
-        *
-        * 2) The peer may observe this update before the asyncrhonous copy of
-        * prior packets is completed.  The peer may overwrite the buffers of
-        * the prior packets before they are copied.
-        *
-        * 3) Both: the peer may observe the update, and then observe the index
-        * decrement by the asynchronous completion callback.  Who knows what
-        * badness that will cause.
-        */
-       hdr->flags = 0;
-       iowrite32(qp->rx_index, &qp->rx_info->entry);
-
-       return rc;
 }
 
 static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
                        break;
        }
 
-       if (qp->dma_chan)
+       if (i && qp->dma_chan)
                dma_async_issue_pending(qp->dma_chan);
 
        if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
                        goto err1;
 
                entry->qp = qp;
-               ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+               ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
                             &qp->rx_free_q);
        }
 
@@ -1634,7 +1674,7 @@ err2:
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 err1:
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
        if (qp->dma_chan)
                dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
  */
 void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 {
-       struct ntb_transport_ctx *nt = qp->transport;
        struct pci_dev *pdev;
        struct ntb_queue_entry *entry;
        u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
        qp->tx_handler = NULL;
        qp->event_handler = NULL;
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
                kfree(entry);
 
-       while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
-               dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
+               kfree(entry);
+       }
+
+       while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
+               dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
                kfree(entry);
        }
 
        while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
                kfree(entry);
 
-       nt->qp_bitmap_free |= qp_bit;
+       qp->transport->qp_bitmap_free |= qp_bit;
 
        dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
 }
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
        if (!qp || qp->client_ready)
                return NULL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
        if (!entry)
                return NULL;
 
        buf = entry->cb_data;
        *len = entry->len;
 
-       ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
 
        return buf;
 }
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
        if (!qp)
                return -EINVAL;
 
-       entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+       entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
        if (!entry)
                return -ENOMEM;
 
        entry->cb_data = cb;
        entry->buf = data;
        entry->len = len;
+       entry->flags = 0;
+
+       ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
 
-       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+       tasklet_schedule(&qp->rxc_db_work);
 
        return 0;
 }
index 73de4efcbe6edc85c8f3fb54e0c925a7ab30492b..944f50015ed07b41b2de0da464812910c411cc1a 100644 (file)
@@ -2,7 +2,7 @@
 # PCI configuration
 #
 config PCI_BUS_ADDR_T_64BIT
-       def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+       def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
        depends on PCI
 
 config PCI_MSI
index cefd636681b6418ce75376879dc26fe3891bc47d..b978bbfe044c163be9cf1a0a4f35bb1edbbab783 100644 (file)
@@ -997,7 +997,12 @@ void set_pcie_port_type(struct pci_dev *pdev)
        else if (type == PCI_EXP_TYPE_UPSTREAM ||
                 type == PCI_EXP_TYPE_DOWNSTREAM) {
                parent = pci_upstream_bridge(pdev);
-               if (!parent->has_secondary_link)
+
+               /*
+                * Usually there's an upstream device (Root Port or Switch
+                * Downstream Port), but we can't assume one exists.
+                */
+               if (parent && !parent->has_secondary_link)
                        pdev->has_secondary_link = 1;
        }
 }
index e17c539e4f6fbb138c04957532f3a329a8556b4a..2dad7e820ff0b16b7447b708f0c147b2e0289f02 100644 (file)
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
 
        sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
 }
+EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
 
 static struct phy_ops sun4i_usb_phy_ops = {
        .init           = sun4i_usb_phy_init,
index 3510b81db3faabcda59a7148e31a32ce403805a5..08020dc2c7c8c3496987589841fc0ddb0b1c9ff7 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/delay.h>
 #include <linux/phy/omap_control_phy.h>
 #include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #define        PLL_STATUS              0x00000004
 #define        PLL_GO                  0x00000008
@@ -52,6 +54,8 @@
 #define        PLL_LOCK                0x2
 #define        PLL_IDLE                0x1
 
+#define SATA_PLL_SOFT_RESET    BIT(18)
+
 /*
  * This is an Empirical value that works, need to confirm the actual
  * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -82,6 +86,9 @@ struct ti_pipe3 {
        struct clk              *refclk;
        struct clk              *div_clk;
        struct pipe3_dpll_map   *dpll_map;
+       struct regmap           *dpll_reset_syscon; /* ctrl. reg. acces */
+       unsigned int            dpll_reset_reg; /* reg. index within syscon */
+       bool                    sata_refclk_enabled;
 };
 
 static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -249,8 +256,11 @@ static int ti_pipe3_exit(struct phy *x)
        u32 val;
        unsigned long timeout;
 
-       /* SATA DPLL can't be powered down due to Errata i783 */
-       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata"))
+       /* If dpll_reset_syscon is not present we wont power down SATA DPLL
+        * due to Errata i783
+        */
+       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
+           !phy->dpll_reset_syscon)
                return 0;
 
        /* PCIe doesn't have internal DPLL */
@@ -276,6 +286,14 @@ static int ti_pipe3_exit(struct phy *x)
                }
        }
 
+       /* i783: SATA needs control bit toggle after PLL unlock */
+       if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
+               regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+                                  SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
+               regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
+                                  SATA_PLL_SOFT_RESET, 0);
+       }
+
        ti_pipe3_disable_clocks(phy);
 
        return 0;
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
                }
        } else {
                phy->wkupclk = ERR_PTR(-ENODEV);
+               phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
+                                                       "syscon-pllreset");
+               if (IS_ERR(phy->dpll_reset_syscon)) {
+                       dev_info(&pdev->dev,
+                                "can't get syscon-pllreset, sata dpll won't idle\n");
+                       phy->dpll_reset_syscon = NULL;
+               } else {
+                       if (of_property_read_u32_index(node,
+                                                      "syscon-pllreset", 1,
+                                                      &phy->dpll_reset_reg)) {
+                               dev_err(&pdev->dev,
+                                       "couldn't get pllreset reg. offset\n");
+                               return -EINVAL;
+                       }
+               }
        }
 
        if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -402,10 +435,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, phy);
        pm_runtime_enable(phy->dev);
-       /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */
-       if (of_device_is_compatible(node, "ti,phy-pipe3-sata"))
-               if (!IS_ERR(phy->refclk))
+
+       /*
+        * Prevent auto-disable of refclk for SATA PHY due to Errata i783
+        */
+       if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
+               if (!IS_ERR(phy->refclk)) {
                        clk_prepare_enable(phy->refclk);
+                       phy->sata_refclk_enabled = true;
+               }
+       }
 
        generic_phy = devm_phy_create(phy->dev, NULL, &ops);
        if (IS_ERR(generic_phy))
@@ -472,8 +511,18 @@ static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
 {
        if (!IS_ERR(phy->wkupclk))
                clk_disable_unprepare(phy->wkupclk);
-       if (!IS_ERR(phy->refclk))
+       if (!IS_ERR(phy->refclk)) {
                clk_disable_unprepare(phy->refclk);
+               /*
+                * SATA refclk needs an additional disable as we left it
+                * on in probe to avoid Errata i783
+                */
+               if (phy->sata_refclk_enabled) {
+                       clk_disable_unprepare(phy->refclk);
+                       phy->sata_refclk_enabled = false;
+               }
+       }
+
        if (!IS_ERR(phy->div_clk))
                clk_disable_unprepare(phy->div_clk);
 }
index cb13299195271ffed4854ed6b0b593c5e8487019..3271cd1abe7c0e6c5d2e813171aa5df494f9d977 100644 (file)
@@ -4,7 +4,6 @@
 
 menuconfig CHROME_PLATFORMS
        bool "Platform support for Chrome hardware"
-       depends on X86 || ARM
        ---help---
          Say Y here to get to see options for platform support for
          various Chromebooks and Chromeboxes. This option alone does
index 26270c351624f229cf85b6069ffe53ee26d50bef..ce129e595b55b663a11600e46571a1dba0d2ccf7 100644 (file)
@@ -39,7 +39,7 @@
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.6.0.17"
+#define DRV_VERSION            "1.6.0.17a"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
index 155b286f1a9d3cc8b366a6a5b7610ae562337f62..25436cd2860cc18a63ac599ed58e3c871c264d8e 100644 (file)
@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        unsigned long ptr;
        struct fc_rport_priv *rdata;
        spinlock_t *io_lock = NULL;
+       int io_lock_acquired = 0;
 
        if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
        spin_lock_irqsave(io_lock, flags);
 
        /* initialize rest of io_req */
+       io_lock_acquired = 1;
        io_req->port_id = rport->port_id;
        io_req->start_time = jiffies;
        CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
@@ -571,7 +573,7 @@ out:
                  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
 
        /* if only we issued IO, will we have the io lock */
-       if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
+       if (io_lock_acquired)
                spin_unlock_irqrestore(io_lock, flags);
 
        atomic_dec(&fnic->in_flight);
index 1b3a094734522803c7f3fecd39da5c297a1feb43..30f9ef0c0d4f8cea52b182f370a04ed1ba4a9908 100644 (file)
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
        if (resp) {
                resp(sp, fp, arg);
                res = true;
-       } else if (!IS_ERR(fp)) {
-               fc_frame_free(fp);
        }
 
        spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
         * If new exch resp handler is valid then call that
         * first.
         */
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
 
        fc_exch_release(ep);
        return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
        fc_exch_hold(ep);
        if (!rc)
                fc_exch_delete(ep);
-       fc_invoke_resp(ep, sp, fp);
+       if (!fc_invoke_resp(ep, sp, fp))
+               fc_frame_free(fp);
        if (has_rec)
                fc_exch_timer_set(ep, ep->r_a_tov);
        fc_exch_release(ep);
index c6795941b45d98579cd6117cc5b72f8c5c4d0bf9..2d5909c4685ca63375f5041d960effada171f5fc 100644 (file)
@@ -1039,11 +1039,26 @@ restart:
                fc_fcp_pkt_hold(fsp);
                spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
-               if (!fc_fcp_lock_pkt(fsp)) {
+               spin_lock_bh(&fsp->scsi_pkt_lock);
+               if (!(fsp->state & FC_SRB_COMPL)) {
+                       fsp->state |= FC_SRB_COMPL;
+                       /*
+                        * TODO: dropping scsi_pkt_lock and then reacquiring
+                        * again around fc_fcp_cleanup_cmd() is required,
+                        * since fc_fcp_cleanup_cmd() calls into
+                        * fc_seq_set_resp() and that func preempts cpu using
+                        * schedule. May be schedule and related code should be
+                        * removed instead of unlocking here to avoid scheduling
+                        * while atomic bug.
+                        */
+                       spin_unlock_bh(&fsp->scsi_pkt_lock);
+
                        fc_fcp_cleanup_cmd(fsp, error);
+
+                       spin_lock_bh(&fsp->scsi_pkt_lock);
                        fc_io_compl(fsp);
-                       fc_fcp_unlock_pkt(fsp);
                }
+               spin_unlock_bh(&fsp->scsi_pkt_lock);
 
                fc_fcp_pkt_release(fsp);
                spin_lock_irqsave(&si->scsi_queue_lock, flags);
index 8053f24f03499335112721cd50c2816da40393a6..98d9bb6ff725ff46621a408bdf1f208175e21daf 100644 (file)
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
 {
        struct iscsi_conn *conn = cls_conn->dd_data;
        struct iscsi_session *session = conn->session;
-       unsigned long flags;
 
        del_timer_sync(&conn->transport_timer);
 
+       mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->frwd_lock);
        conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
        if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        }
        spin_unlock_bh(&session->frwd_lock);
 
-       /*
-        * Block until all in-progress commands for this connection
-        * time out or fail.
-        */
-       for (;;) {
-               spin_lock_irqsave(session->host->host_lock, flags);
-               if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
-                       spin_unlock_irqrestore(session->host->host_lock, flags);
-                       break;
-               }
-               spin_unlock_irqrestore(session->host->host_lock, flags);
-               msleep_interruptible(500);
-               iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
-                                 "host_busy %d host_failed %d\n",
-                                 atomic_read(&session->host->host_busy),
-                                 session->host->host_failed);
-               /*
-                * force eh_abort() to unblock
-                */
-               wake_up(&conn->ehwait);
-       }
-
        /* flush queued up work because we free the connection below */
        iscsi_suspend_tx(conn);
 
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
        if (session->leadconn == conn)
                session->leadconn = NULL;
        spin_unlock_bh(&session->frwd_lock);
+       mutex_unlock(&session->eh_mutex);
 
        iscsi_destroy_conn(cls_conn);
 }
index cfadccef045c5f91d9efb5f575c98d1d9c68b090..6457a8a0db9c37ed8892c28effe768a533f2e7f3 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/jiffies.h>
-#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -2523,33 +2522,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
        }
 }
 EXPORT_SYMBOL(scsi_build_sense_buffer);
-
-/**
- * scsi_set_sense_information - set the information field in a
- *             formatted sense data buffer
- * @buf:       Where to build sense data
- * @info:      64-bit information value to be set
- *
- **/
-void scsi_set_sense_information(u8 *buf, u64 info)
-{
-       if ((buf[0] & 0x7f) == 0x72) {
-               u8 *ucp, len;
-
-               len = buf[7];
-               ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
-               if (!ucp) {
-                       buf[7] = len + 0xa;
-                       ucp = buf + 8 + len;
-               }
-               ucp[0] = 0;
-               ucp[1] = 0xa;
-               ucp[2] = 0x80; /* Valid bit */
-               ucp[3] = 0;
-               put_unaligned_be64(info, &ucp[4]);
-       } else if ((buf[0] & 0x7f) == 0x70) {
-               buf[0] |= 0x80;
-               put_unaligned_be64(info, &buf[3]);
-       }
-}
-EXPORT_SYMBOL(scsi_set_sense_information);
index 9e43ae1d2163dacaae769a816dc6973cfabbdca0..e4b7998379485454d26e39f6a5a91917a155ffbb 100644 (file)
@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
 {
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        struct scsi_device *sdev = to_scsi_device(dev);
-       int err;
+       int err = 0;
 
-       err = blk_pre_runtime_suspend(sdev->request_queue);
-       if (err)
-               return err;
-       if (pm && pm->runtime_suspend)
+       if (pm && pm->runtime_suspend) {
+               err = blk_pre_runtime_suspend(sdev->request_queue);
+               if (err)
+                       return err;
                err = pm->runtime_suspend(dev);
-       blk_post_runtime_suspend(sdev->request_queue, err);
-
+               blk_post_runtime_suspend(sdev->request_queue, err);
+       }
        return err;
 }
 
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        int err = 0;
 
-       blk_pre_runtime_resume(sdev->request_queue);
-       if (pm && pm->runtime_resume)
+       if (pm && pm->runtime_resume) {
+               blk_pre_runtime_resume(sdev->request_queue);
                err = pm->runtime_resume(dev);
-       blk_post_runtime_resume(sdev->request_queue, err);
-
+               blk_post_runtime_resume(sdev->request_queue, err);
+       }
        return err;
 }
 
index 3b2fcb4fada0491c4500b42555fdef542b4399d2..a20da8c25b4f960224fb4d772aafea38c57e1656 100644 (file)
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
        max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               max_xfer);
-       blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
+       sdkp->disk->queue->limits.max_sectors =
+               min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
        kfree(buffer);
index bfa42620a3f607857978895a49451b9177964374..940781183fac4e450503c7b3cd5df6c30bde6d0c 100644 (file)
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
                if (index == das1801hc || index == das1802hc)
                        return board;
                index = das1801hc;
+               break;
        default:
                dev_err(dev->class_dev,
                        "Board model: probe returned 0x%x (unknown, please report)\n",
index 9c934e6d2ea1114094921bd890eb0186a50f0e06..c61add46b4268b722eeaad74a0540a4a2c48f1e8 100644 (file)
@@ -40,7 +40,7 @@
 
 #define DEBUG_SUBSYSTEM D_OTHER
 
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
 
 #include "../include/obd_support.h"
 #include "../include/lustre_debug.h"
index b0c8e235b982164bb170b53655ccfc8d01378d6b..69bdc8f29b59f4c1e1cbacabf7563a7ca1d66817 100644 (file)
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
                }
        }
 
-       if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
-               if (conf->assoc) {
+       if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
+           priv->op_mode != NL80211_IFTYPE_AP) {
+               if (conf->assoc && conf->beacon_rate) {
                        CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
                                       conf->sync_tsf);
 
index cd77a064c772f1bbe897482d2372fe5bc6434328..fd092909a4577a7c4a708516bf3344fee331ad84 100644 (file)
@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
-       if (hdr->flags & ISCSI_FLAG_CMD_READ) {
+       if (hdr->flags & ISCSI_FLAG_CMD_READ)
                cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
-       } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+       else
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
index c2e9fea90b4a4bc16a0384d79fa9684c9f4176e0..860e840461778271191ba4ee8f1a4011dba5e78c 100644 (file)
@@ -457,8 +457,15 @@ void target_unregister_template(const struct target_core_fabric_ops *fo)
                if (!strcmp(t->tf_ops->name, fo->name)) {
                        BUG_ON(atomic_read(&t->tf_access_cnt));
                        list_del(&t->tf_list);
+                       mutex_unlock(&g_tf_lock);
+                       /*
+                        * Wait for any outstanding fabric se_deve_entry->rcu_head
+                        * callbacks to complete post kfree_rcu(), before allowing
+                        * fabric driver unload of TFO->module to proceed.
+                        */
+                       rcu_barrier();
                        kfree(t);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&g_tf_lock);
index 62ea4e8e70a8935398f2a0e86fc44627dfa3368e..be9cefc07407e80ef5dd7dfcbd8a0d025faf97f6 100644 (file)
@@ -84,8 +84,16 @@ void target_backend_unregister(const struct target_backend_ops *ops)
        list_for_each_entry(tb, &backend_list, list) {
                if (tb->ops == ops) {
                        list_del(&tb->list);
+                       mutex_unlock(&backend_mutex);
+                       /*
+                        * Wait for any outstanding backend driver ->rcu_head
+                        * callbacks to complete post TBO->free_device() ->
+                        * call_rcu(), before allowing backend driver module
+                        * unload of target_backend_ops->owner to proceed.
+                        */
+                       rcu_barrier();
                        kfree(tb);
-                       break;
+                       return;
                }
        }
        mutex_unlock(&backend_mutex);
index b5ba1ec3c35476361103d7dca47a1934cdd3289f..f87d4cef6d398c072e953e7eaa6b5d9d5b469d70 100644 (file)
@@ -1203,17 +1203,13 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
        struct se_dev_entry *deve;
        struct se_session *sess = cmd->se_sess;
        struct se_node_acl *nacl;
+       struct scsi_lun slun;
        unsigned char *buf;
        u32 lun_count = 0, offset = 8;
-
-       if (cmd->data_length < 16) {
-               pr_warn("REPORT LUNS allocation length %u too small\n",
-                       cmd->data_length);
-               return TCM_INVALID_CDB_FIELD;
-       }
+       __be32 len;
 
        buf = transport_kmap_data_sg(cmd);
-       if (!buf)
+       if (cmd->data_length && !buf)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        /*
@@ -1221,11 +1217,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * coming via a target_core_mod PASSTHROUGH op, and not through
         * a $FABRIC_MOD.  In that case, report LUN=0 only.
         */
-       if (!sess) {
-               int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
-               lun_count = 1;
+       if (!sess)
                goto done;
-       }
+
        nacl = sess->se_node_acl;
 
        rcu_read_lock();
@@ -1236,10 +1230,12 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
                 * See SPC2-R20 7.19.
                 */
                lun_count++;
-               if ((offset + 8) > cmd->data_length)
+               if (offset >= cmd->data_length)
                        continue;
 
-               int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
+               int_to_scsilun(deve->mapped_lun, &slun);
+               memcpy(buf + offset, &slun,
+                      min(8u, cmd->data_length - offset));
                offset += 8;
        }
        rcu_read_unlock();
@@ -1248,12 +1244,22 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
         * See SPC3 r07, page 159.
         */
 done:
-       lun_count *= 8;
-       buf[0] = ((lun_count >> 24) & 0xff);
-       buf[1] = ((lun_count >> 16) & 0xff);
-       buf[2] = ((lun_count >> 8) & 0xff);
-       buf[3] = (lun_count & 0xff);
-       transport_kunmap_data_sg(cmd);
+       /*
+        * If no LUNs are accessible, report virtual LUN 0.
+        */
+       if (lun_count == 0) {
+               int_to_scsilun(0, &slun);
+               if (cmd->data_length > 8)
+                       memcpy(buf + offset, &slun,
+                              min(8u, cmd->data_length - offset));
+               lun_count = 1;
+       }
+
+       if (buf) {
+               len = cpu_to_be32(lun_count * 8);
+               memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length));
+               transport_kunmap_data_sg(cmd);
+       }
 
        target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
        return 0;
index 6509c61b96484993333a4198138945f43154fdfd..620dcd405ff6eec9ae65b0af8e93c25daae15d1f 100644 (file)
@@ -68,7 +68,7 @@ struct power_table {
  *     registered cooling device.
  * @cpufreq_state: integer value representing the current state of cpufreq
  *     cooling devices.
- * @cpufreq_val: integer value representing the absolute value of the clipped
+ * @clipped_freq: integer value representing the absolute value of the clipped
  *     frequency.
  * @max_level: maximum cooling level. One less than total number of valid
  *     cpufreq frequencies.
@@ -91,7 +91,7 @@ struct cpufreq_cooling_device {
        int id;
        struct thermal_cooling_device *cool_dev;
        unsigned int cpufreq_state;
-       unsigned int cpufreq_val;
+       unsigned int clipped_freq;
        unsigned int max_level;
        unsigned int *freq_table;       /* In descending order */
        struct cpumask allowed_cpus;
@@ -107,6 +107,9 @@ struct cpufreq_cooling_device {
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
+static unsigned int cpufreq_dev_count;
+
+static DEFINE_MUTEX(cooling_list_lock);
 static LIST_HEAD(cpufreq_dev_list);
 
 /**
@@ -185,14 +188,14 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
 {
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       mutex_lock(&cooling_cpufreq_lock);
+       mutex_lock(&cooling_list_lock);
        list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
                if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
-                       mutex_unlock(&cooling_cpufreq_lock);
+                       mutex_unlock(&cooling_list_lock);
                        return get_level(cpufreq_dev, freq);
                }
        }
-       mutex_unlock(&cooling_cpufreq_lock);
+       mutex_unlock(&cooling_list_lock);
 
        pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
        return THERMAL_CSTATE_INVALID;
@@ -215,29 +218,35 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
                                    unsigned long event, void *data)
 {
        struct cpufreq_policy *policy = data;
-       unsigned long max_freq = 0;
+       unsigned long clipped_freq;
        struct cpufreq_cooling_device *cpufreq_dev;
 
-       switch (event) {
+       if (event != CPUFREQ_ADJUST)
+               return NOTIFY_DONE;
 
-       case CPUFREQ_ADJUST:
-               mutex_lock(&cooling_cpufreq_lock);
-               list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
-                       if (!cpumask_test_cpu(policy->cpu,
-                                             &cpufreq_dev->allowed_cpus))
-                               continue;
+       mutex_lock(&cooling_list_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus))
+                       continue;
 
-                       max_freq = cpufreq_dev->cpufreq_val;
+               /*
+                * policy->max is the maximum allowed frequency defined by user
+                * and clipped_freq is the maximum that thermal constraints
+                * allow.
+                *
+                * If clipped_freq is lower than policy->max, then we need to
+                * readjust policy->max.
+                *
+                * But, if clipped_freq is greater than policy->max, we don't
+                * need to do anything.
+                */
+               clipped_freq = cpufreq_dev->clipped_freq;
 
-                       if (policy->max != max_freq)
-                               cpufreq_verify_within_limits(policy, 0,
-                                                            max_freq);
-               }
-               mutex_unlock(&cooling_cpufreq_lock);
+               if (policy->max > clipped_freq)
+                       cpufreq_verify_within_limits(policy, 0, clipped_freq);
                break;
-       default:
-               return NOTIFY_DONE;
        }
+       mutex_unlock(&cooling_list_lock);
 
        return NOTIFY_OK;
 }
@@ -519,7 +528,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 
        clip_freq = cpufreq_device->freq_table[state];
        cpufreq_device->cpufreq_state = state;
-       cpufreq_device->cpufreq_val = clip_freq;
+       cpufreq_device->clipped_freq = clip_freq;
 
        cpufreq_update_policy(cpu);
 
@@ -861,17 +870,19 @@ __cpufreq_cooling_register(struct device_node *np,
                        pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
 
-       cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
+       cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
 
        mutex_lock(&cooling_cpufreq_lock);
 
+       mutex_lock(&cooling_list_lock);
+       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
+       mutex_unlock(&cooling_list_lock);
+
        /* Register the notifier for first cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       if (!cpufreq_dev_count++)
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
-       list_add(&cpufreq_dev->node, &cpufreq_dev_list);
-
        mutex_unlock(&cooling_cpufreq_lock);
 
        return cool_dev;
@@ -1013,13 +1024,17 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
                return;
 
        cpufreq_dev = cdev->devdata;
-       mutex_lock(&cooling_cpufreq_lock);
-       list_del(&cpufreq_dev->node);
 
        /* Unregister the notifier for the last cpufreq cooling device */
-       if (list_empty(&cpufreq_dev_list))
+       mutex_lock(&cooling_cpufreq_lock);
+       if (!--cpufreq_dev_count)
                cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
                                            CPUFREQ_POLICY_NOTIFIER);
+
+       mutex_lock(&cooling_list_lock);
+       list_del(&cpufreq_dev->node);
+       mutex_unlock(&cooling_list_lock);
+
        mutex_unlock(&cooling_cpufreq_lock);
 
        thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
index d5dd357ba57c4c6af785b4f915d9fccba6a4fa21..b49f97c734d00ddccb50379d3131c232651e96e5 100644 (file)
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
 static struct platform_driver hisi_thermal_driver = {
        .driver = {
                .name           = "hisi_thermal",
-               .owner          = THIS_MODULE,
                .pm             = &hisi_thermal_pm_ops,
                .of_match_table = of_hisi_thermal_match,
        },
index 4672250b329f4cec54ab243b55b41b127b1c48d0..7006860f2f3693b04ee44996c5085e2f94ceae44 100644 (file)
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
        struct thermal_instance *instance;
        struct power_allocator_params *params = tz->governor_data;
        u32 *req_power, *max_power, *granted_power, *extra_actor_power;
-       u32 total_req_power, max_allocatable_power;
+       u32 *weighted_req_power;
+       u32 total_req_power, max_allocatable_power, total_weighted_req_power;
        u32 total_granted_power, power_range;
        int i, num_actors, total_weight, ret = 0;
        int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
        }
 
        /*
-        * We need to allocate three arrays of the same size:
-        * req_power, max_power and granted_power.  They are going to
-        * be needed until this function returns.  Allocate them all
-        * in one go to simplify the allocation and deallocation
-        * logic.
+        * We need to allocate five arrays of the same size:
+        * req_power, max_power, granted_power, extra_actor_power and
+        * weighted_req_power.  They are going to be needed until this
+        * function returns.  Allocate them all in one go to simplify
+        * the allocation and deallocation logic.
         */
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
        BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
-       req_power = devm_kcalloc(&tz->device, num_actors * 4,
+       BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
+       req_power = devm_kcalloc(&tz->device, num_actors * 5,
                                 sizeof(*req_power), GFP_KERNEL);
        if (!req_power) {
                ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
        max_power = &req_power[num_actors];
        granted_power = &req_power[2 * num_actors];
        extra_actor_power = &req_power[3 * num_actors];
+       weighted_req_power = &req_power[4 * num_actors];
 
        i = 0;
+       total_weighted_req_power = 0;
        total_req_power = 0;
        max_allocatable_power = 0;
 
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
                else
                        weight = instance->weight;
 
-               req_power[i] = frac_to_int(weight * req_power[i]);
+               weighted_req_power[i] = frac_to_int(weight * req_power[i]);
 
                if (power_actor_get_max_power(cdev, tz, &max_power[i]))
                        continue;
 
                total_req_power += req_power[i];
                max_allocatable_power += max_power[i];
+               total_weighted_req_power += weighted_req_power[i];
 
                i++;
        }
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
        power_range = pid_controller(tz, current_temp, control_temp,
                                     max_allocatable_power);
 
-       divvy_up_power(req_power, max_power, num_actors, total_req_power,
-                      power_range, granted_power, extra_actor_power);
+       divvy_up_power(weighted_req_power, max_power, num_actors,
+                      total_weighted_req_power, power_range, granted_power,
+                      extra_actor_power);
 
        total_granted_power = 0;
        i = 0;
@@ -328,7 +334,7 @@ static int allocate_power(struct thermal_zone_device *tz,
                                      max_allocatable_power, current_temp,
                                      (s32)control_temp - (s32)current_temp);
 
-       devm_kfree(&tz->device, req_power);
+       kfree(req_power);
 unlock:
        mutex_unlock(&tz->lock);
 
@@ -420,7 +426,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
                return -EINVAL;
        }
 
-       params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL);
+       params = kzalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
                return -ENOMEM;
 
@@ -462,14 +468,14 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
        return 0;
 
 free:
-       devm_kfree(&tz->device, params);
+       kfree(params);
        return ret;
 }
 
 static void power_allocator_unbind(struct thermal_zone_device *tz)
 {
        dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
-       devm_kfree(&tz->device, tz->governor_data);
+       kfree(tz->governor_data);
        tz->governor_data = NULL;
 }
 
index c8e35c1a43dcfd19145a6d1e24b132b25b5c6169..e0da3865e0600f8f23b6368be63fd662d5dda6d1 100644 (file)
@@ -1,6 +1,6 @@
 config EXYNOS_THERMAL
        tristate "Exynos thermal management unit driver"
-       depends on OF
+       depends on THERMAL_OF
        help
          If you say yes here you get support for the TMU (Thermal Management
          Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
index 531f4b179871f63da7fea6f32af19b27af18e9a3..c96ff10b869efd941bfe8c32384d48b70b1c348d 100644 (file)
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
 
 static int exynos_tmu_probe(struct platform_device *pdev)
 {
-       struct exynos_tmu_platform_data *pdata;
        struct exynos_tmu_data *data;
        int ret;
 
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        if (ret)
                goto err_sensor;
 
-       pdata = data->pdata;
-
        INIT_WORK(&data->irq_work, exynos_tmu_work);
 
        data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
 err_sensor:
+       if (!IS_ERR_OR_NULL(data->regulator))
+               regulator_disable(data->regulator);
        thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
 
        return ret;
index 04659bfb888b73237257b75f944bf9130bc241b6..4ca211be4c0f197825be94f70be386af1c2cc33d 100644 (file)
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
        return -ENODEV;
 
 unbind:
+       device_remove_file(&tz->device, &pos->weight_attr);
        device_remove_file(&tz->device, &pos->attr);
        sysfs_remove_link(&tz->device.kobj, pos->name);
        release_idr(&tz->idr, &tz->lock, pos->id);
index 74fea4fa41b156248ce6d7116db02b54990066a9..3ad48e1c0c57e1722311c8393ad3bd21712fa1e7 100644 (file)
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
        },
 };
 
-module_platform_driver(ci_hdrc_driver);
+static int __init ci_hdrc_platform_register(void)
+{
+       ci_hdrc_host_driver_init();
+       return platform_driver_register(&ci_hdrc_driver);
+}
+module_init(ci_hdrc_platform_register);
+
+static void __exit ci_hdrc_platform_unregister(void)
+{
+       platform_driver_unregister(&ci_hdrc_driver);
+}
+module_exit(ci_hdrc_platform_unregister);
 
 MODULE_ALIAS("platform:ci_hdrc");
 MODULE_LICENSE("GPL v2");
index 6cf87b8b13a8a606b5ccf680e4635fcbc44874a1..7161439def19aa265c9f36530d3d97d63ecc51a7 100644 (file)
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
        rdrv->name      = "host";
        ci->roles[CI_ROLE_HOST] = rdrv;
 
+       return 0;
+}
+
+void ci_hdrc_host_driver_init(void)
+{
        ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
        orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
        ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
-
-       return 0;
 }
index 5707bf379bfb4b7ce98ff4a79d585619d1c64b48..0f12f131bdd3f22671eaf170476e2511950fa1be 100644 (file)
@@ -5,6 +5,7 @@
 
 int ci_hdrc_host_init(struct ci_hdrc *ci);
 void ci_hdrc_host_destroy(struct ci_hdrc *ci);
+void ci_hdrc_host_driver_init(void);
 
 #else
 
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
 
 }
 
+static void ci_hdrc_host_driver_init(void)
+{
+
+}
+
 #endif
 
 #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
index f7f35a36c09a06eab17ef2e5af013ee3de2b5b8e..6df9715a4bcd31179cb190100df45b2dd975a0d2 100644 (file)
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
        int ret;
 
        ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
+       if (ret >= HIDG_MINORS) {
+               ida_simple_remove(&hidg_ida, ret);
+               ret = -ENODEV;
+       }
 
        return ret;
 }
index 44173df272739543a6b3168323073dba45239760..357f63f47b42aba69d92e24346963645227d39e2 100644 (file)
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
 
 static inline int gprinter_get_minor(void)
 {
-       return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+       int ret;
+
+       ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+       if (ret >= PRINTER_MINORS) {
+               ida_simple_remove(&printer_ida, ret);
+               ret = -ENODEV;
+       }
+
+       return ret;
 }
 
 static inline void gprinter_put_minor(int minor)
index 6d3eb8b00a488446db954334e80ac12eccf0d5cf..53186154725330d4c1f710e4829d4a8b25b614cd 100644 (file)
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
                        factor = 1000;
                } else {
                        ep_desc = &hs_epin_desc;
-                       factor = 125;
+                       factor = 8000;
                }
 
                /* pre-compute some values for iso_complete() */
                uac2->p_framesize = opts->p_ssize *
                                    num_channels(opts->p_chmask);
                rate = opts->p_srate * uac2->p_framesize;
-               uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
+               uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
                uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
                                        prm->max_psize);
 
index b04980cf6dc42108f4861e4dfa7285dd4fe56af9..1efa61265d8d49c5116027c8e3555ae70b9cc12c 100644 (file)
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
        /* The current hw dequeue pointer */
        tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
        deq_ptr_64 = tmp_32;
-       tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1));
+       tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
        deq_ptr_64 |= ((u64)tmp_32 << 32);
 
        /* we have the dma addr of next bd that will be fetched by hardware */
index 362ee8af5fce87df4a2e7779f743c2ec722d9a5c..89ed5e71a1991e0cd249c48b18bd04dd67bacf91 100644 (file)
@@ -323,6 +323,7 @@ err4:
 
 err3:
        put_device(&udc->dev);
+       device_del(&gadget->dev);
 
 err2:
        put_device(&gadget->dev);
index 3e442f77a2b9367c5bd90d2ab69beb2db841fd26..9a8c936cd42c18ef72a695d45c7e4ce2e893f8b0 100644 (file)
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        int size;
        int i, j, num_ports;
 
-       del_timer_sync(&xhci->cmd_timer);
+       if (timer_pending(&xhci->cmd_timer))
+               del_timer_sync(&xhci->cmd_timer);
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
index 6a8fc52aed5863391885ac11c2661a407f707a30..32f4d564494a9f48cfebd328e61d3c281387d252 100644 (file)
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
                return 0;
        /* offset in TRBs */
        segment_offset = trb - seg->trbs;
-       if (segment_offset > TRBS_PER_SEGMENT)
+       if (segment_offset >= TRBS_PER_SEGMENT)
                return 0;
        return seg->dma + (segment_offset * sizeof(*trb));
 }
index 19b85ee98a7247c46089e023676633e70eb498df..876423b8892c96f80930fa7a103f36370e519dfe 100644 (file)
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
          .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+       { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+         .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
index 9c63897b3a564012ea63f99b9e5e73bc48b93d36..d156545728c2ab5b8bb81cf7537aa8a60805c08a 100644 (file)
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x1199, 0x901c)},   /* Sierra Wireless EM7700 */
        {DEVICE_SWI(0x1199, 0x901f)},   /* Sierra Wireless EM7355 */
        {DEVICE_SWI(0x1199, 0x9040)},   /* Sierra Wireless Modem */
-       {DEVICE_SWI(0x1199, 0x9041)},   /* Sierra Wireless MC7305/MC7355 */
        {DEVICE_SWI(0x1199, 0x9051)},   /* Netgear AirCard 340U */
        {DEVICE_SWI(0x1199, 0x9053)},   /* Sierra Wireless Modem */
        {DEVICE_SWI(0x1199, 0x9054)},   /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81a4)},   /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a8)},   /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {DEVICE_SWI(0x413c, 0x81a9)},   /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {DEVICE_SWI(0x413c, 0x81b1)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
 
        /* Huawei devices */
        {DEVICE_HWI(0x03f0, 0x581d)},   /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
index 46179a0828ebcbad9a78c11dad8044edff27664a..07d1ecd564f79d9c51798f6941b1295d794d468c 100644 (file)
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
        },
+       { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
        /* AT&T Direct IP LTE modems */
        { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
          .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
index 658c34bb9076f813058dfb03a47907ca48c6eb0a..1aaf89300621abc811f57f549c25b9a540a21d99 100644 (file)
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
        int y;
        int c = scr_readw((u16 *) vc->vc_pos);
 
+       ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
+
        if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
                return;
 
-       ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
        if (vc->vc_cursor_type & 0x10)
                fbcon_del_cursor_timer(info);
        else
index 2d98de535e0f7374804474c58de752ffb2848aa4..f888561568d91735a3a765a772630e8b2be0a54a 100644 (file)
@@ -298,7 +298,7 @@ config FB_ARMCLCD
 
 # Helper logic selected only by the ARM Versatile platform family.
 config PLAT_VERSATILE_CLCD
-       def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+       def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
        depends on ARM
        depends on FB_ARMCLCD && FB=y
 
index 928ee639c0c19ba3a2346737c41b44f38e82c0b5..bf407b6ba15ca0002166a0704124eeda0fb2052e 100644 (file)
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
                        }
                        prev = port;
                } while (of_node_cmp(port->name, "port") != 0);
+
+               of_node_put(ports);
        }
 
        return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
        if (!port)
                return NULL;
 
-       np = of_get_next_parent(port);
+       np = of_get_parent(port);
 
        for (i = 0; i < 2 && np; ++i) {
                struct property *prop;
index 86bd457d039d2ad9a85e82d3f26a3630e3134da7..50bce45e7f3d47d78163058eff366b32d4f56180 100644 (file)
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
                goto err_free_dma;
        }
 
-       ret = clk_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable clock\n");
                goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
        misc_deregister(&priv->misc_dev);
 
 err_disable_clk:
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return ret;
 }
index 111c2d1911d32ea38e86b11c0af753133ccfab05..b5102aa6090d111e25727f78422c8cbc183f086a 100644 (file)
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
                index = disp->native_mode;
 
        ret = videomode_from_timings(disp, vm, index);
-       if (ret)
-               return ret;
 
        display_timings_release(disp);
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(of_get_videomode);
index 60e2a16775637b778a8faeb8f2b3f84810a88716..c96944b59856c10c7d28c189b5ff86dbe4b3c932 100644 (file)
@@ -313,6 +313,7 @@ err_init_vq:
 static void virtinput_remove(struct virtio_device *vdev)
 {
        struct virtio_input *vi = vdev->priv;
+       void *buf;
        unsigned long flags;
 
        spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
        spin_unlock_irqrestore(&vi->lock, flags);
 
        input_unregister_device(vi->idev);
+       vdev->config->reset(vdev);
+       while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
+               kfree(buf);
        vdev->config->del_vqs(vdev);
        kfree(vi);
 }
index fd933695f2328f29c2493ee751f22230ec68cbb1..bf4a23c7c5918f6849e764a8376c3608cc591933 100644 (file)
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 }
 
 /*
- * We avoid multiple worker processes conflicting via the balloon mutex.
+ * As this is a work item it is guaranteed to run as a single instance only.
  * We may of course race updates of the target counts (which are protected
  * by the balloon lock), or with changes to the Xen hard limit, but we will
  * recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
        enum bp_state state = BP_DONE;
        long credit;
 
-       mutex_lock(&balloon_mutex);
 
        do {
+               mutex_lock(&balloon_mutex);
+
                credit = current_credit();
 
                if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
 
                state = update_schedule(state);
 
-#ifndef CONFIG_PREEMPT
-               if (need_resched())
-                       schedule();
-#endif
+               mutex_unlock(&balloon_mutex);
+
+               cond_resched();
+
        } while (credit && state == BP_DONE);
 
        /* Schedule more work if there is some still to be done. */
        if (state == BP_EAGAIN)
                schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
-
-       mutex_unlock(&balloon_mutex);
 }
 
 /* Resets the Xen limit, sets new target, and kicks off processing. */
index 67b9163db7185402b0ff3811c5363c1a1022e2c7..0dbb222daaf1c694b1f073f3e206f755f5f77cc6 100644 (file)
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
 
        pr_debug("priv %p\n", priv);
 
+       mutex_lock(&priv->lock);
        while (!list_empty(&priv->maps)) {
                map = list_entry(priv->maps.next, struct grant_map, next);
                list_del(&map->next);
                gntdev_put_map(NULL /* already removed */, map);
        }
        WARN_ON(!list_empty(&priv->freeable_maps));
+       mutex_unlock(&priv->lock);
 
        if (use_ptemod)
                mmu_notifier_unregister(&priv->mn, priv->mm);
index 9ad327238ba931243967455b5790916dc6b184f1..e30353575d5da11f75e8c927ba53945a23b73d76 100644 (file)
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
 
        rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
                               addrs);
-       if (!rv)
+       if (!rv) {
                vunmap(vaddr);
+               free_xenballooned_pages(node->nr_handles, node->hvm.pages);
+       }
        else
                WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
                     node->nr_handles);
index e9ace099162ce14d73d04523962465ee163b30c7..8a820295657686d634e3a5ded2ee21b5dcefc026 100644 (file)
@@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
                                /* Exclusive -> exclusive, nothing changed */
                        }
                }
+
+               /* For exclusive extent, free its reserved bytes too */
+               if (nr_old_roots == 0 && nr_new_roots == 1 &&
+                   cur_new_count == nr_new_roots)
+                       qg->reserved -= num_bytes;
                if (dirty)
                        qgroup_dirty(fs_info, qg);
        }
index dc10c9dd36c1a2ac6264ed21d3248e5f62f1e330..ddd5e94712904501db729c51b59de72cd88ddb5a 100644 (file)
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
 
        swap(cf, ci->i_prealloc_cap_flush);
        cf->caps = flushing;
-       cf->kick = false;
 
        spin_lock(&mdsc->cap_dirty_lock);
        list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
 
 static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
                                struct ceph_mds_session *session,
-                               struct ceph_inode_info *ci,
-                               bool kick_all)
+                               struct ceph_inode_info *ci)
 {
        struct inode *inode = &ci->vfs_inode;
        struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
 
                for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
                        cf = rb_entry(n, struct ceph_cap_flush, i_node);
-                       if (cf->tid < first_tid)
-                               continue;
-                       if (kick_all || cf->kick)
+                       if (cf->tid >= first_tid)
                                break;
                }
                if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
                }
 
                cf = rb_entry(n, struct ceph_cap_flush, i_node);
-               cf->kick = false;
 
                first_tid = cf->tid + 1;
 
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
 {
        struct ceph_inode_info *ci;
        struct ceph_cap *cap;
-       struct ceph_cap_flush *cf;
-       struct rb_node *n;
 
        dout("early_kick_flushing_caps mds%d\n", session->s_mds);
        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
                if ((cap->issued & ci->i_flushing_caps) !=
                    ci->i_flushing_caps) {
                        spin_unlock(&ci->i_ceph_lock);
-                       if (!__kick_flushing_caps(mdsc, session, ci, true))
+                       if (!__kick_flushing_caps(mdsc, session, ci))
                                continue;
                        spin_lock(&ci->i_ceph_lock);
                }
 
-               for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
-                       cf = rb_entry(n, struct ceph_cap_flush, i_node);
-                       cf->kick = true;
-               }
-
                spin_unlock(&ci->i_ceph_lock);
        }
 }
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
 
        dout("kick_flushing_caps mds%d\n", session->s_mds);
        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
-               int delayed = __kick_flushing_caps(mdsc, session, ci, false);
+               int delayed = __kick_flushing_caps(mdsc, session, ci);
                if (delayed) {
                        spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
 
                spin_unlock(&ci->i_ceph_lock);
 
-               delayed = __kick_flushing_caps(mdsc, session, ci, true);
+               delayed = __kick_flushing_caps(mdsc, session, ci);
                if (delayed) {
                        spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
index 4347039ecc183d538c23f32019e5213da2ebf2f4..6706bde9ad1b1e16e6a283a83ea93c4f58b6b2aa 100644 (file)
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
                return 0;
 
        spin_lock(&ctx->flc_lock);
-       list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
+       list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
                ++seen_fcntl;
                if (seen_fcntl > num_fcntl_locks) {
                        err = -ENOSPC;
index 860cc016e70d4ff463c1f7845fc648eaf58269c4..2f2460d23a0600f8f9bf2e1cc4fe3b2286684356 100644 (file)
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
 struct ceph_cap_flush {
        u64 tid;
        int caps;
-       bool kick;
        struct rb_node g_node; // global
        union {
                struct rb_node i_node; // inode
index 5c8ea15e73a53b6b6dbe3e9660973d2eda9c7800..9b5fe503f6cb6c8d76044bb8272084fdb0474c9b 100644 (file)
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
        inode_init_early();
 }
 
-void __init vfs_caches_init(unsigned long mempages)
+void __init vfs_caches_init(void)
 {
-       unsigned long reserve;
-
-       /* Base hash sizes on available memory, with a reserve equal to
-           150% of current kernel size */
-
-       reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
-       mempages -= reserve;
-
        names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
 
        dcache_init();
        inode_init();
-       files_init(mempages);
+       files_init();
+       files_maxfiles_init();
        mnt_init();
        bdev_cache_init();
        chrdev_init();
index 7f9d407c759596f950335bd418ab0226f4a629f8..ad17e05ebf95f07888b15f2b09a7b37ac89b9710 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/hardirq.h>
 #include <linux/task_work.h>
 #include <linux/ima.h>
+#include <linux/swap.h>
 
 #include <linux/atomic.h>
 
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
        }
 }
 
-void __init files_init(unsigned long mempages)
+void __init files_init(void)
 { 
-       unsigned long n;
-
        filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
                        SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
+       percpu_counter_init(&nr_files, 0, GFP_KERNEL);
+}
 
-       /*
-        * One file with associated inode and dcache is very roughly 1K.
-        * Per default don't use more than 10% of our memory for files. 
-        */ 
+/*
+ * One file with associated inode and dcache is very roughly 1K. Per default
+ * do not use more than 10% of our memory for files.
+ */
+void __init files_maxfiles_init(void)
+{
+       unsigned long n;
+       unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
+
+       memreserve = min(memreserve, totalram_pages - 1);
+       n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
 
-       n = (mempages * (PAGE_SIZE / 1024)) / 10;
        files_stat.max_files = max_t(unsigned long, n, NR_FILE);
-       percpu_counter_init(&nr_files, 0, GFP_KERNEL);
 } 
index 80cc1b35d46043c16bc456e0cadf61e76c281d52..ebb5e37455a07acd86f5fbf1b76d474e99b937fb 100644 (file)
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
 
                        err = -EINVAL;
                        if (old) {
-                               struct fuse_dev *fud = fuse_get_dev(old);
+                               struct fuse_dev *fud = NULL;
+
+                               /*
+                                * Check against file->f_op because CUSE
+                                * uses the same ioctl handler.
+                                */
+                               if (old->f_op == file->f_op &&
+                                   old->f_cred->user_ns == file->f_cred->user_ns)
+                                       fud = fuse_get_dev(old);
 
                                if (fud) {
                                        mutex_lock(&fuse_mutex);
index 0cf74df68617b8738342a5f7be7992ccc596bf0a..973c24ce59ad3ef1b62ff3ce00113d7d85cedb68 100644 (file)
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
        inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
        if (!inode)
                goto out_dentry;
+       if (creat_flags == HUGETLB_SHMFS_INODE)
+               inode->i_flags |= S_PRIVATE;
 
        file = ERR_PTR(-ENOMEM);
        if (hugetlb_reserve_pages(inode, 0,
index fbbcf0993312eb9c278dc1343ea2db0fe4edf959..1c2105ed20c5ef4fb390878fb5442943ceec29ae 100644 (file)
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
                return 0;
 
        /* Allowed if parent directory not sticky and world-writable. */
-       parent = nd->path.dentry->d_inode;
+       parent = nd->inode;
        if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
                return 0;
 
index 6904213a436368e47628af85701a3beb68e0550b..ebf90e487c752b59270aa559588a8805468ad0f5 100644 (file)
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
        BUG_ON(!ls->ls_file);
 
        if (nfsd4_layout_setlease(ls)) {
+               fput(ls->ls_file);
                put_nfs4_file(fp);
                kmem_cache_free(nfs4_layout_stateid_cache, ls);
                return NULL;
index 61dfb33f05593c1b19dff8a5346dee37a3539d79..95202719a1fd26bd27ea71a2fe85ec1c248e8d13 100644 (file)
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
        queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
 }
 
-static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
+static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
 {
-       if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
+       if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
                return nfserr_bad_stateid;
        return nfs_ok;
 }
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
 {
        __be32 status;
 
-       status = nfs4_check_fh(fhp, ols);
-       if (status)
-               return status;
        status = nfsd4_check_openowner_confirmed(ols);
        if (status)
                return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
                status = nfserr_bad_stateid;
                break;
        }
+       if (status)
+               goto out;
+       status = nfs4_check_fh(fhp, s);
 
 done:
        if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status)
                return status;
-       return nfs4_check_fh(current_fh, stp);
+       return nfs4_check_fh(current_fh, &stp->st_stid);
 }
 
 /* 
index 54633858733a8da5ac978fe1d19a0a055488e01d..75e0563c09d1911d927501ee52b53a3bd988940e 100644 (file)
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
 #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
                              FATTR4_WORD0_RDATTR_ERROR)
 #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+#define WORD2_ABSENT_FS_ATTRS 0
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
 { return 0; }
 #endif
 
-static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
+static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
 {
        /* As per referral draft:  */
        if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
        }
        *bmval0 &= WORD0_ABSENT_FS_ATTRS;
        *bmval1 &= WORD1_ABSENT_FS_ATTRS;
+       *bmval2 &= WORD2_ABSENT_FS_ATTRS;
        return 0;
 }
 
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
        BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
 
        if (exp->ex_fslocs.migrated) {
-               BUG_ON(bmval[2]);
-               status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
+               status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
                if (status)
                        goto out;
        }
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
        }
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
-       if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
-                       bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+       if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
+            bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
                err = security_inode_getsecctx(d_inode(dentry),
                                                &context, &contextlen);
                contextsupport = (err == 0);
index 92e48c70f0f05542a75804fe7aac752672abd214..39ddcaf0918f145fb3f2cb916d27aa1b866a220e 100644 (file)
@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
                                         unsigned int flags)
 {
        struct fsnotify_mark *lmark, *mark;
+       LIST_HEAD(to_free);
 
+       /*
+        * We have to be really careful here. Anytime we drop mark_mutex, e.g.
+        * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
+        * to_free list so we have to use mark_mutex even when accessing that
+        * list. And freeing mark requires us to drop mark_mutex. So we can
+        * reliably free only the first mark in the list. That's why we first
+        * move marks to free to to_free list in one go and then free marks in
+        * to_free list one by one.
+        */
        mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
        list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
-               if (mark->flags & flags) {
-                       fsnotify_get_mark(mark);
-                       fsnotify_destroy_mark_locked(mark, group);
-                       fsnotify_put_mark(mark);
-               }
+               if (mark->flags & flags)
+                       list_move(&mark->g_list, &to_free);
        }
        mutex_unlock(&group->mark_mutex);
+
+       while (1) {
+               mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+               if (list_empty(&to_free)) {
+                       mutex_unlock(&group->mark_mutex);
+                       break;
+               }
+               mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
+               fsnotify_get_mark(mark);
+               fsnotify_destroy_mark_locked(mark, group);
+               mutex_unlock(&group->mark_mutex);
+               fsnotify_put_mark(mark);
+       }
 }
 
 /*
index 1a35c6139656344516aacd59c7120f6fb877f2a2..0f5fd9db8194ef5d135f1896f6e2645a5f059cd8 100644 (file)
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
 
        if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
                u64 s = i_size_read(inode);
-               sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
+               sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
                        (do_div(s, osb->s_clustersize) >> 9);
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
                BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
 
                ret = blkdev_issue_zeroout(osb->sb->s_bdev,
-                               p_cpos << (osb->s_clustersize_bits - 9),
+                               (u64)p_cpos << (osb->s_clustersize_bits - 9),
                                zero_len_head >> 9, GFP_NOFS, false);
                if (ret < 0)
                        mlog_errno(ret);
index 8b23aa2f52ddafe31be83b730d7219693222a1c5..23157e40dd740204bc10f9eaeb55ec08f2f0dfb4 100644 (file)
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
        osb->dc_work_sequence = osb->dc_wake_sequence;
 
        processed = osb->blocked_lock_count;
-       while (processed) {
-               BUG_ON(list_empty(&osb->blocked_lock_list));
-
+       /*
+        * blocked lock processing in this loop might call iput which can
+        * remove items off osb->blocked_lock_list. Downconvert up to
+        * 'processed' number of locks, but stop short if we had some
+        * removed in ocfs2_mark_lockres_freeing when downconverting.
+        */
+       while (processed && !list_empty(&osb->blocked_lock_list)) {
                lockres = list_entry(osb->blocked_lock_list.next,
                                     struct ocfs2_lock_res, l_blocked_list);
                list_del_init(&lockres->l_blocked_list);
index 7e412ad748363489baad12cbb644b8074d78cfeb..270221fcef42cc42fcfdbc098b587b571be65a12 100644 (file)
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (kinfo->si_code == BUS_MCEERR_AR ||
-                   kinfo->si_code == BUS_MCEERR_AO)
+               if (kinfo->si_signo == SIGBUS &&
+                   (kinfo->si_code == BUS_MCEERR_AR ||
+                    kinfo->si_code == BUS_MCEERR_AO))
                        err |= __put_user((short) kinfo->si_addr_lsb,
                                          &uinfo->ssi_addr_lsb);
 #endif
index 48db6a56975f5ebc874d19054828b70a5f4aaa15..5aa519711e0b6fcb1212a517e50be5afac8ed7d7 100644 (file)
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
        struct timer_list disable_timer;                /* delayed disable timer */
 
        /* vblank counter, protected by dev->vblank_time_lock for writes */
-       unsigned long count;
+       u32 count;
        /* vblank timestamps, protected by dev->vblank_time_lock for writes */
        struct timeval time[DRM_VBLANKTIME_RBSIZE];
 
index 57ca8cc383a615344498202384b1b814911bc766..3b4d8a4a23fb760867fc7d59ede2a3459eac2375 100644 (file)
@@ -743,8 +743,6 @@ struct drm_connector {
        uint8_t num_h_tile, num_v_tile;
        uint8_t tile_h_loc, tile_v_loc;
        uint16_t tile_h_size, tile_v_size;
-
-       struct list_head destroy_list;
 };
 
 /**
index c8fc187061de5fbd9fc8545f602a62baaa45b8cc..918aa68b5199d54501a2a9d68404e44388e9e04e 100644 (file)
@@ -168,6 +168,7 @@ struct drm_encoder_helper_funcs {
  * @get_modes: get mode list for this connector
  * @mode_valid: is this mode valid on the given connector? (optional)
  * @best_encoder: return the preferred encoder for this connector
+ * @atomic_best_encoder: atomic version of @best_encoder
  *
  * The helper operations are called by the mid-layer CRTC helper.
  */
@@ -176,6 +177,8 @@ struct drm_connector_helper_funcs {
        enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
                                           struct drm_display_mode *mode);
        struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
+       struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
+                                                  struct drm_connector_state *connector_state);
 };
 
 extern void drm_helper_disable_unused_functions(struct drm_device *dev);
index 799050198323e852c7cbb45e3c1236cc67d386ac..53c53c459b15c8207997da61234d0ab9ea2805ae 100644 (file)
@@ -347,6 +347,25 @@ static inline int drm_eld_mnl(const uint8_t *eld)
        return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
 }
 
+/**
+ * drm_eld_sad - Get ELD SAD structures.
+ * @eld: pointer to an eld memory structure with sad_count set
+ */
+static inline const uint8_t *drm_eld_sad(const uint8_t *eld)
+{
+       unsigned int ver, mnl;
+
+       ver = (eld[DRM_ELD_VER] & DRM_ELD_VER_MASK) >> DRM_ELD_VER_SHIFT;
+       if (ver != 2 && ver != 31)
+               return NULL;
+
+       mnl = drm_eld_mnl(eld);
+       if (mnl > 16)
+               return NULL;
+
+       return eld + DRM_ELD_CEA_SAD(mnl, 0);
+}
+
 /**
  * drm_eld_sad_count - Get ELD SAD count.
  * @eld: pointer to an eld memory structure with sad_count set
index 45c39a37f9249562761dc9615ffecf12ec194846..8bc073d297db2a233cf389d6c0656dec78c0445a 100644 (file)
        {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 6c78956aa47092440edb3a73e7b9389ac3a57558..d2992bfa17063a052a08a106b9105284ce4bfa4a 100644 (file)
@@ -385,8 +385,6 @@ enum {
        SATA_SSP                = 0x06, /* Software Settings Preservation */
        SATA_DEVSLP             = 0x09, /* Device Sleep */
 
-       SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
-
        /* feature values for SET_MAX */
        ATA_SET_MAX_ADDR        = 0x00,
        ATA_SET_MAX_PASSWD      = 0x01,
@@ -530,8 +528,6 @@ struct ata_bmdma_prd {
 #define ata_id_cdb_intr(id)    (((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
 #define ata_id_has_da(id)      ((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
 #define ata_id_has_devslp(id)  ((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
-#define ata_id_has_ncq_autosense(id) \
-                               ((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
 
 static inline bool ata_id_has_hipm(const u16 *id)
 {
@@ -720,20 +716,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
        return false;
 }
 
-static inline bool ata_id_has_sense_reporting(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
-}
-
-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
-{
-       if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
-               return false;
-       return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
-}
-
 /**
  *     ata_id_major_version    -       get ATA level of drive
  *     @id: Identify data
index cc008c338f5a9bcb66076da96e929d6104697373..84b783f277f761a0ef7b940bd1fbab37db60567e 100644 (file)
@@ -55,7 +55,8 @@ struct vm_fault;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
-extern void __init files_init(unsigned long);
+extern void __init files_init(void);
+extern void __init files_maxfiles_init(void);
 
 extern struct files_stat_struct files_stat;
 extern unsigned long get_max_files(void);
@@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
 
 /* fs/dcache.c */
 extern void __init vfs_caches_init_early(void);
-extern void __init vfs_caches_init(unsigned long);
+extern void __init vfs_caches_init(void);
 
 extern struct kmem_cache *names_cachep;
 
index d9a366d24e3bb8736cc7acdaa4f541c9720747a6..6240063bdcac4c42da3f108e9b81c5800085c29b 100644 (file)
@@ -344,7 +344,7 @@ struct intel_iommu {
 
 #ifdef CONFIG_INTEL_IOMMU
        unsigned long   *domain_ids; /* bitmap of domains */
-       struct dmar_domain **domains; /* ptr to domains */
+       struct dmar_domain ***domains; /* ptr to domains */
        spinlock_t      lock; /* protect context, domain ids */
        struct root_entry *root_entry; /* virtual address */
 
index 92188b0225bb31f33eba9deacc4b9b88036c1d8e..51744bcf74eec7bee678b9e1f63c3a10f247350e 100644 (file)
@@ -484,6 +484,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
 extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
                                             void *vcpu_info);
+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
 #endif
 
 /* Handling of unhandled and spurious interrupts: */
index 2e872f92dbac0cecc2c5b3a65fbe7ff8678e48d5..bf6f117fcf4d80cb7de6147e86c6ba19fa13febd 100644 (file)
@@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
        return atomic_read(&(page)->_mapcount) >= 0;
 }
 
+/*
+ * Return true only if the page has been allocated with
+ * ALLOC_NO_WATERMARKS and the low watermark was not
+ * met implying that the system is under some pressure.
+ */
+static inline bool page_is_pfmemalloc(struct page *page)
+{
+       /*
+        * Page index cannot be this large so this must be
+        * a pfmemalloc page.
+        */
+       return page->index == -1UL;
+}
+
+/*
+ * Only to be called by the page allocator on a freshly allocated
+ * page.
+ */
+static inline void set_page_pfmemalloc(struct page *page)
+{
+       page->index = -1UL;
+}
+
+static inline void clear_page_pfmemalloc(struct page *page)
+{
+       page->index = 0;
+}
+
 /*
  * Different kinds of faults, as returned by handle_mm_fault().
  * Used to decide whether a process gets delivered SIGBUS or
index 0038ac7466fd26e7562a026af68632573e3bb0ea..15549578d55998e5497c5da58a50fa1531e132d8 100644 (file)
@@ -63,15 +63,6 @@ struct page {
                union {
                        pgoff_t index;          /* Our offset within mapping. */
                        void *freelist;         /* sl[aou]b first free object */
-                       bool pfmemalloc;        /* If set by the page allocator,
-                                                * ALLOC_NO_WATERMARKS was set
-                                                * and the low watermark was not
-                                                * met implying that the system
-                                                * is under some pressure. The
-                                                * caller should try ensure
-                                                * this page is only used to
-                                                * free other pages.
-                                                */
                };
 
                union {
index f34e040b34e9ffbf0abe66b439571ec8902440ca..41c93844fb1d1ed5c0dbad77fe5a409557d66067 100644 (file)
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
         1 << PG_private | 1 << PG_private_2 | \
         1 << PG_writeback | 1 << PG_reserved | \
         1 << PG_slab    | 1 << PG_swapcache | 1 << PG_active | \
-        1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
+        1 << PG_unevictable | __PG_MLOCKED | \
         __PG_COMPOUND_LOCK)
 
 /*
  * Flags checked when a page is prepped for return by the page allocator.
- * Pages being prepped should not have any flags set.  It they are set,
+ * Pages being prepped should not have these flags set.  It they are set,
  * there has been a kernel bug or struct page corruption.
+ *
+ * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
+ * alloc-free cycle to prevent from reusing the page.
  */
-#define PAGE_FLAGS_CHECK_AT_PREP       ((1 << NR_PAGEFLAGS) - 1)
+#define PAGE_FLAGS_CHECK_AT_PREP       \
+       (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
 
 #define PAGE_FLAGS_PRIVATE                             \
        (1 << PG_private | 1 << PG_private_2)
index d6cdd6e87d53bcd1b4f390f61f73b1c91b076bdd..9b88536487e667b8727414e4131188db869711d4 100644 (file)
@@ -1602,20 +1602,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
        /*
-        * Propagate page->pfmemalloc to the skb if we can. The problem is
-        * that not all callers have unique ownership of the page. If
-        * pfmemalloc is set, we check the mapping as a mapping implies
-        * page->index is set (index and pfmemalloc share space).
-        * If it's a valid mapping, we cannot use page->pfmemalloc but we
-        * do not lose pfmemalloc information as the pages would not be
-        * allocated using __GFP_MEMALLOC.
+        * Propagate page pfmemalloc to the skb if we can. The problem is
+        * that not all callers have unique ownership of the page but rely
+        * on page_is_pfmemalloc doing the right thing(tm).
         */
        frag->page.p              = page;
        frag->page_offset         = off;
        skb_frag_size_set(frag, size);
 
        page = compound_head(page);
-       if (page->pfmemalloc && !page->mapping)
+       if (page_is_pfmemalloc(page))
                skb->pfmemalloc = true;
 }
 
@@ -2263,7 +2259,7 @@ static inline struct page *dev_alloc_page(void)
 static inline void skb_propagate_pfmemalloc(struct page *page,
                                             struct sk_buff *skb)
 {
-       if (page && page->pfmemalloc)
+       if (page_is_pfmemalloc(page))
                skb->pfmemalloc = true;
 }
 
@@ -2884,11 +2880,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
  *
  * PHY drivers may accept clones of transmitted packets for
  * timestamping via their phy_driver.txtstamp method. These drivers
- * must call this function to return the skb back to the stack, with
- * or without a timestamp.
+ * must call this function to return the skb back to the stack with a
+ * timestamp.
  *
  * @skb: clone of the the original outgoing packet
- * @hwtstamps: hardware time stamps, may be NULL if not available
+ * @hwtstamps: hardware time stamps
  *
  */
 void skb_complete_tx_timestamp(struct sk_buff *skb,
index 45534da57759a30d4a8c281bda094c8d4921dba3..644bdc61c387581c29a29d20c0cf1c3fc5085830 100644 (file)
@@ -74,8 +74,6 @@ enum rc_filter_type {
  * @input_dev: the input child device used to communicate events to userspace
  * @driver_type: specifies if protocol decoding is done in hardware or software
  * @idle: used to keep track of RX state
- * @encode_wakeup: wakeup filtering uses IR encode API, therefore the allowed
- *     wakeup protocols is the set of all raw encoders
  * @allowed_protocols: bitmask with the supported RC_BIT_* protocols
  * @enabled_protocols: bitmask with the enabled RC_BIT_* protocols
  * @allowed_wakeup_protocols: bitmask with the supported RC_BIT_* wakeup protocols
@@ -136,7 +134,6 @@ struct rc_dev {
        struct input_dev                *input_dev;
        enum rc_driver_type             driver_type;
        bool                            idle;
-       bool                            encode_wakeup;
        u64                             allowed_protocols;
        u64                             enabled_protocols;
        u64                             allowed_wakeup_protocols;
@@ -246,7 +243,6 @@ static inline void init_ir_raw_event(struct ir_raw_event *ev)
 #define US_TO_NS(usec)         ((usec) * 1000)
 #define MS_TO_US(msec)         ((msec) * 1000)
 #define MS_TO_NS(msec)         ((msec) * 1000 * 1000)
-#define NS_TO_US(nsec)         DIV_ROUND_UP(nsec, 1000L)
 
 void ir_raw_event_handle(struct rc_dev *dev);
 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
@@ -254,9 +250,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type);
 int ir_raw_event_store_with_filter(struct rc_dev *dev,
                                struct ir_raw_event *ev);
 void ir_raw_event_set_idle(struct rc_dev *dev, bool idle);
-int ir_raw_encode_scancode(u64 protocols,
-                          const struct rc_scancode_filter *scancode,
-                          struct ir_raw_event *events, unsigned int max);
 
 static inline void ir_raw_event_reset(struct rc_dev *dev)
 {
index 22a44c2f596380856acb1bccf95f14d0c2144994..c192e1b46cdc27372dc56ba704aa630ff113dfd6 100644 (file)
@@ -139,6 +139,7 @@ enum vb2_io_modes {
  * @VB2_BUF_STATE_PREPARING:   buffer is being prepared in videobuf
  * @VB2_BUF_STATE_PREPARED:    buffer prepared in videobuf and by the driver
  * @VB2_BUF_STATE_QUEUED:      buffer queued in videobuf, but not in driver
+ * @VB2_BUF_STATE_REQUEUEING:  re-queue a buffer to the driver
  * @VB2_BUF_STATE_ACTIVE:      buffer queued in driver and possibly used
  *                             in a hardware operation
  * @VB2_BUF_STATE_DONE:                buffer returned from driver to videobuf, but
@@ -152,6 +153,7 @@ enum vb2_buffer_state {
        VB2_BUF_STATE_PREPARING,
        VB2_BUF_STATE_PREPARED,
        VB2_BUF_STATE_QUEUED,
+       VB2_BUF_STATE_REQUEUEING,
        VB2_BUF_STATE_ACTIVE,
        VB2_BUF_STATE_DONE,
        VB2_BUF_STATE_ERROR,
index 4942710ef720ea5716e8cc6ebf0df941e22500ba..8d1d7fa67ec48bad6872be07258066f9410eec6e 100644 (file)
@@ -28,7 +28,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
                                   u64 * info_out);
 
 extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
-extern void scsi_set_sense_information(u8 *buf, u64 info);
 
 extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 
index 1ab2813273cd1cf7d0116ca3d64b08de18013304..8cb3a7ecd6f89a51871e830e6ca3d8e7dbc79587 100644 (file)
@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup {
        unsigned int reg;
 };
 
-struct tegra_smmu_ops {
-       void (*flush_dcache)(struct page *page, unsigned long offset,
-                            size_t size);
-};
-
 struct tegra_smmu_soc {
        const struct tegra_mc_client *clients;
        unsigned int num_clients;
@@ -66,9 +61,8 @@ struct tegra_smmu_soc {
        bool supports_round_robin_arbitration;
        bool supports_request_limit;
 
+       unsigned int num_tlb_lines;
        unsigned int num_asids;
-
-       const struct tegra_smmu_ops *ops;
 };
 
 struct tegra_mc;
index 865a141b118b15874e27b0e56473bac8854c4823..427bc41df3aef3a3f931bd94830f027bafea2661 100644 (file)
@@ -141,6 +141,8 @@ struct snd_soc_tplg_ops {
        int io_ops_count;
 };
 
+#ifdef CONFIG_SND_SOC_TOPOLOGY
+
 /* gets a pointer to data from the firmware block header */
 static inline const void *snd_soc_tplg_get_data(struct snd_soc_tplg_hdr *hdr)
 {
@@ -165,4 +167,14 @@ int snd_soc_tplg_widget_bind_event(struct snd_soc_dapm_widget *w,
        const struct snd_soc_tplg_widget_events *events, int num_events,
        u16 event_type);
 
+#else
+
+static inline int snd_soc_tplg_component_remove(struct snd_soc_component *comp,
+                                               u32 index)
+{
+       return 0;
+}
+
+#endif
+
 #endif
index efe3443572baa5a3650d638806fd60ef09a90f68..413417f3707bbfde6375dfc14098bc1e099b5b70 100644 (file)
 #define PCI_MSIX_PBA           8       /* Pending Bit Array offset */
 #define  PCI_MSIX_PBA_BIR      0x00000007 /* BAR index */
 #define  PCI_MSIX_PBA_OFFSET   0xfffffff8 /* Offset into specified BAR */
+#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
 #define PCI_CAP_MSIX_SIZEOF    12      /* size of MSIX registers */
 
 /* MSI-X Table entry format */
index 785c5ca0994b5ab41e43fcfe6553197f6633dd06..247c50bd60f0d067ad8884dbe4574adf0bfcf596 100644 (file)
 #include <linux/types.h>
 #include <sound/asound.h>
 
+#ifndef __KERNEL__
+#error This API is an early revision and not enabled in the current
+#error kernel release, it will be enabled in a future kernel version
+#error with incompatible changes to what is here.
+#endif
+
 /*
  * Maximum number of channels topology kcontrol can represent.
  */
@@ -77,7 +83,7 @@
 #define SND_SOC_TPLG_NUM_TEXTS         16
 
 /* ABI version */
-#define SND_SOC_TPLG_ABI_VERSION       0x2
+#define SND_SOC_TPLG_ABI_VERSION       0x3
 
 /* Max size of TLV data */
 #define SND_SOC_TPLG_TLV_SIZE          32
 #define SND_SOC_TPLG_TYPE_PCM          7
 #define SND_SOC_TPLG_TYPE_MANIFEST     8
 #define SND_SOC_TPLG_TYPE_CODEC_LINK   9
-#define SND_SOC_TPLG_TYPE_MAX  SND_SOC_TPLG_TYPE_CODEC_LINK
+#define SND_SOC_TPLG_TYPE_PDATA                10
+#define SND_SOC_TPLG_TYPE_MAX  SND_SOC_TPLG_TYPE_PDATA
 
 /* vendor block IDs - please add new vendor types to end */
 #define SND_SOC_TPLG_TYPE_VENDOR_FW    1000
@@ -137,11 +144,19 @@ struct snd_soc_tplg_private {
 /*
  * Kcontrol TLV data.
  */
+struct snd_soc_tplg_tlv_dbscale {
+       __le32 min;
+       __le32 step;
+       __le32 mute;
+} __attribute__((packed));
+
 struct snd_soc_tplg_ctl_tlv {
-       __le32 size;    /* in bytes aligned to 4 */
-       __le32 numid;   /* control element numeric identification */
-       __le32 count;   /* number of elem in data array */
-       __le32 data[SND_SOC_TPLG_TLV_SIZE];
+       __le32 size;    /* in bytes of this structure */
+       __le32 type;    /* SNDRV_CTL_TLVT_*, type of TLV */
+       union {
+               __le32 data[SND_SOC_TPLG_TLV_SIZE];
+               struct snd_soc_tplg_tlv_dbscale scale;
+       };
 } __attribute__((packed));
 
 /*
@@ -155,9 +170,11 @@ struct snd_soc_tplg_channel {
 } __attribute__((packed));
 
 /*
- * Kcontrol Operations IDs
+ * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
+ * Kcontrol ops need get/put/info.
+ * Bytes ext ops need get/put.
  */
-struct snd_soc_tplg_kcontrol_ops_id {
+struct snd_soc_tplg_io_ops {
        __le32 get;
        __le32 put;
        __le32 info;
@@ -171,8 +188,8 @@ struct snd_soc_tplg_ctl_hdr {
        __le32 type;
        char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
        __le32 access;
-       struct snd_soc_tplg_kcontrol_ops_id ops;
-       __le32 tlv_size;        /* non zero means control has TLV data */
+       struct snd_soc_tplg_io_ops ops;
+       struct snd_soc_tplg_ctl_tlv tlv;
 } __attribute__((packed));
 
 /*
@@ -238,6 +255,7 @@ struct snd_soc_tplg_manifest {
        __le32 graph_elems;     /* number of graph elements */
        __le32 dai_elems;       /* number of DAI elements */
        __le32 dai_link_elems;  /* number of DAI link elements */
+       struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
 /*
@@ -259,7 +277,6 @@ struct snd_soc_tplg_mixer_control {
        __le32 invert;
        __le32 num_channels;
        struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
-       struct snd_soc_tplg_ctl_tlv tlv;
        struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
@@ -303,6 +320,7 @@ struct snd_soc_tplg_bytes_control {
        __le32 mask;
        __le32 base;
        __le32 num_regs;
+       struct snd_soc_tplg_io_ops ext_ops;
        struct snd_soc_tplg_private priv;
 } __attribute__((packed));
 
@@ -347,6 +365,7 @@ struct snd_soc_tplg_dapm_widget {
        __le32 reg;             /* negative reg = no direct dapm */
        __le32 shift;           /* bits to shift */
        __le32 mask;            /* non-shifted mask */
+       __le32 subseq;          /* sort within widget type */
        __u32 invert;           /* invert the power bit */
        __u32 ignore_suspend;   /* kept enabled over suspend */
        __u16 event_flags;
index c5d5626289cee3a46f7e1b7d2441ca0496ba4471..56506553d4d80dff814b75f45db6db280fd0dea7 100644 (file)
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
        key_init();
        security_init();
        dbg_late_init();
-       vfs_caches_init(totalram_pages);
+       vfs_caches_init();
        signals_init();
        /* rootfs populating might need page-writeback */
        page_writeback_init();
index a24ba9fe5bb8892dfaa7452fe78f9ef68d1d97fc..161a1807e6efb0fe8e773c41dafc8b4a76b38f71 100644 (file)
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
                if (!leaf)
                        return -ENOMEM;
                INIT_LIST_HEAD(&leaf->msg_list);
-               info->qsize += sizeof(*leaf);
        }
        leaf->priority = msg->m_type;
        rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
                             "lazy leaf delete!\n");
                rb_erase(&leaf->rb_node, &info->msg_tree);
                if (info->node_cache) {
-                       info->qsize -= sizeof(*leaf);
                        kfree(leaf);
                } else {
                        info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
                if (list_empty(&leaf->msg_list)) {
                        rb_erase(&leaf->rb_node, &info->msg_tree);
                        if (info->node_cache) {
-                               info->qsize -= sizeof(*leaf);
                                kfree(leaf);
                        } else {
                                info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
                new_leaf = NULL;
        } else {
                kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                /* Save our speculative allocation into the cache */
                INIT_LIST_HEAD(&new_leaf->msg_list);
                info->node_cache = new_leaf;
-               info->qsize += sizeof(*new_leaf);
        } else {
                kfree(new_leaf);
        }
index bc3d530cb23efacb2e5695ad85a9bd3898524fa2..b471e5a3863ddbca70f2bf4dee22f40df0345fbe 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -252,6 +252,16 @@ static void sem_rcu_free(struct rcu_head *head)
        ipc_rcu_free(head);
 }
 
+/*
+ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
+ * are only control barriers.
+ * The code must pair with spin_unlock(&sem->lock) or
+ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
+ *
+ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+ */
+#define ipc_smp_acquire__after_spin_is_unlocked()      smp_rmb()
+
 /*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
+       ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                /* Then check that the global lock is free */
                if (!spin_is_locked(&sma->sem_perm.lock)) {
                        /*
-                        * The ipc object lock check must be visible on all
-                        * cores before rechecking the complex count.  Otherwise
-                        * we can race with  another thread that does:
+                        * We need a memory barrier with acquire semantics,
+                        * otherwise we can race with another thread that does:
                         *      complex_count++;
                         *      spin_unlock(sem_perm.lock);
                         */
-                       smp_rmb();
+                       ipc_smp_acquire__after_spin_is_unlocked();
 
                        /*
                         * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
                rcu_read_lock();
                un = list_entry_rcu(ulp->list_proc.next,
                                    struct sem_undo, list_proc);
-               if (&un->list_proc == &ulp->list_proc)
-                       semid = -1;
-                else
-                       semid = un->semid;
+               if (&un->list_proc == &ulp->list_proc) {
+                       /*
+                        * We must wait for freeary() before freeing this ulp,
+                        * in case we raced with last sem_undo. There is a small
+                        * possibility where we exit while freeary() didn't
+                        * finish unlocking sem_undo_list.
+                        */
+                       spin_unlock_wait(&ulp->lock);
+                       rcu_read_unlock();
+                       break;
+               }
+               spin_lock(&ulp->lock);
+               semid = un->semid;
+               spin_unlock(&ulp->lock);
 
+               /* exit_sem raced with IPC_RMID, nothing to do */
                if (semid == -1) {
                        rcu_read_unlock();
-                       break;
+                       continue;
                }
 
-               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
+               sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
                /* exit_sem raced with IPC_RMID, nothing to do */
                if (IS_ERR(sma)) {
                        rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
                ipc_assert_locked_object(&sma->sem_perm);
                list_del(&un->list_id);
 
-               spin_lock(&ulp->lock);
+               /* we are the last process using this ulp, acquiring ulp->lock
+                * isn't required. Besides that, we are also protected against
+                * IPC_RMID as we hold sma->sem_perm lock now
+                */
                list_del_rcu(&un->list_proc);
-               spin_unlock(&ulp->lock);
 
                /* perform adjustments registered in un */
                for (i = 0; i < sma->sem_nsems; i++) {
index 06e5cf2fe019faee43aa9f8ca9f17cad4973b74d..4aef24d91b633e12275cea64a380df4543fc796b 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
                if  ((shmflg & SHM_NORESERVE) &&
                                sysctl_overcommit_memory != OVERCOMMIT_NEVER)
                        acctflag = VM_NORESERVE;
-               file = shmem_file_setup(name, size, acctflag);
+               file = shmem_kernel_file_setup(name, size, acctflag);
        }
        error = PTR_ERR(file);
        if (IS_ERR(file))
index ee14e3a35a2994399edf176e7775e778c395e592..f0acff0f66c91380412dcbc1c899c94b1d3236b0 100644 (file)
@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
        spin_unlock_irq(&callback_lock);
 
        /* use trialcs->mems_allowed as a temp variable */
-       update_nodemasks_hier(cs, &cs->mems_allowed);
+       update_nodemasks_hier(cs, &trialcs->mems_allowed);
 done:
        return retval;
 }
index d3dae3419b99566c127f1682b29f39bb184bbdb1..e6feb51141340a99a248fea0ad1dc17402b0dbdb 100644 (file)
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
 
        perf_pmu_disable(event->pmu);
 
-       event->tstamp_running += tstamp - event->tstamp_stopped;
-
        perf_set_shadow_time(event, ctx, tstamp);
 
        perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
                goto out;
        }
 
+       event->tstamp_running += tstamp - event->tstamp_stopped;
+
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
        if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-static int perf_event_period(struct perf_event *event, u64 __user *arg)
-{
-       struct perf_event_context *ctx = event->ctx;
-       int ret = 0, active;
+struct period_event {
+       struct perf_event *event;
        u64 value;
+};
 
-       if (!is_sampling_event(event))
-               return -EINVAL;
-
-       if (copy_from_user(&value, arg, sizeof(value)))
-               return -EFAULT;
-
-       if (!value)
-               return -EINVAL;
+static int __perf_event_period(void *info)
+{
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       struct perf_event_context *ctx = event->ctx;
+       u64 value = pe->value;
+       bool active;
 
-       raw_spin_lock_irq(&ctx->lock);
+       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
-               if (value > sysctl_perf_event_sample_rate) {
-                       ret = -EINVAL;
-                       goto unlock;
-               }
-
                event->attr.sample_freq = value;
        } else {
                event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
+       raw_spin_unlock(&ctx->lock);
 
-unlock:
+       return 0;
+}
+
+static int perf_event_period(struct perf_event *event, u64 __user *arg)
+{
+       struct period_event pe = { .event = event, };
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task;
+       u64 value;
+
+       if (!is_sampling_event(event))
+               return -EINVAL;
+
+       if (copy_from_user(&value, arg, sizeof(value)))
+               return -EFAULT;
+
+       if (!value)
+               return -EINVAL;
+
+       if (event->attr.freq && value > sysctl_perf_event_sample_rate)
+               return -EINVAL;
+
+       task = ctx->task;
+       pe.value = value;
+
+       if (!task) {
+               cpu_function_call(event->cpu, __perf_event_period, &pe);
+               return 0;
+       }
+
+retry:
+       if (!task_function_call(task, __perf_event_period, &pe))
+               return 0;
+
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               raw_spin_unlock_irq(&ctx->lock);
+               task = ctx->task;
+               goto retry;
+       }
+
+       __perf_event_period(&pe);
        raw_spin_unlock_irq(&ctx->lock);
 
-       return ret;
+       return 0;
 }
 
 static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
  * to user-space before waking everybody up.
  */
 
+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+       /* only the parent has fasync state */
+       if (event->parent)
+               event = event->parent;
+       return &event->fasync;
+}
+
 void perf_event_wakeup(struct perf_event *event)
 {
        ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
-               kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+               kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
                event->pending_kill = 0;
        }
 }
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
        else
                perf_event_output(event, data, regs);
 
-       if (event->fasync && event->pending_kill) {
+       if (*perf_event_fasync(event) && event->pending_kill) {
                event->pending_wakeup = 1;
                irq_work_queue(&event->pending);
        }
index b2be01b1aa9dcb7a70792fa381c264b229a106d0..c8aa3f75bc4db8ad7a2242aae6406bfd6f86f8c5 100644 (file)
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
                rb->aux_priv = NULL;
        }
 
-       for (pg = 0; pg < rb->aux_nr_pages; pg++)
-               rb_free_aux_page(rb, pg);
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
 
-       kfree(rb->aux_pages);
-       rb->aux_nr_pages = 0;
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
 }
 
 void rb_free_aux(struct ring_buffer *rb)
index 27f4332c7f84ea8b3d7ec8bb3466a64f225a8487..ae216824e8ca9224c4b76f225ef58d664a3e1726 100644 (file)
@@ -984,6 +984,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
        return -ENOSYS;
 }
 
+/**
+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
+ * @data:      Pointer to interrupt specific data
+ * @type:      IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
+ *
+ * Conditional, as the underlying parent chip might not implement it.
+ */
+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
+{
+       data = data->parent_data;
+
+       if (data->chip->irq_set_type)
+               return data->chip->irq_set_type(data, type);
+
+       return -ENOSYS;
+}
+
 /**
  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
  * @data:      Pointer to interrupt specific data
@@ -997,7 +1014,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
                if (data->chip && data->chip->irq_retrigger)
                        return data->chip->irq_retrigger(data);
 
-       return -ENOSYS;
+       return 0;
 }
 
 /**
index 10e489c448fe4e934e2c203ca2aa7a8d0679bb5e..fdea0bee7b5a4d5e2fcf43ee3b92e1a37dea6c71 100644 (file)
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
 {
        return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
 }
+EXPORT_SYMBOL_GPL(kthread_should_park);
 
 /**
  * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
 {
        __kthread_parkme(to_kthread(current));
 }
+EXPORT_SYMBOL_GPL(kthread_parkme);
 
 static int kthread(void *_create)
 {
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
        if (kthread)
                __kthread_unpark(k, kthread);
 }
+EXPORT_SYMBOL_GPL(kthread_unpark);
 
 /**
  * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(kthread_park);
 
 /**
  * kthread_stop - stop a thread created by kthread_create().
index 04ab18151cc8fa174a5859124ee07c144f33d505..df19ae4debd09c134d438b57e4ead7c71462c2b6 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/hash.h>
 #include <linux/bootmem.h>
+#include <linux/debug_locks.h>
 
 /*
  * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
        struct pv_node *node;
+       u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
 
        /*
         * We must not unlock if SLOW, because in that case we must first
         * unhash. Otherwise it would be possible to have multiple @lock
         * entries, which would be BAD.
         */
-       if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+       if (likely(lockval == _Q_LOCKED_VAL))
                return;
 
+       if (unlikely(lockval != _Q_SLOW_VAL)) {
+               if (debug_locks_silent)
+                       return;
+               WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
+               return;
+       }
+
        /*
         * Since the above failed to release, this must be the SLOW path.
         * Therefore start by looking up the blocked node and unhashing it.
index 4d2b82e610e2a48f429a700f0529d6dc5942700a..b86b7bf1be388d72fe92fb6038b4a67b4710df1f 100644 (file)
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
 }
 EXPORT_SYMBOL_GPL(find_symbol);
 
-/* Search for module by name: must hold module_mutex. */
+/*
+ * Search for module by name: must hold module_mutex (or preempt disabled
+ * for read-only access).
+ */
 static struct module *find_module_all(const char *name, size_t len,
                                      bool even_unformed)
 {
        struct module *mod;
 
-       module_assert_mutex();
+       module_assert_mutex_or_preempt();
 
        list_for_each_entry(mod, &modules, list) {
                if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
 
 struct module *find_module(const char *name)
 {
+       module_assert_mutex();
        return find_module_all(name, strlen(name), false);
 }
 EXPORT_SYMBOL_GPL(find_module);
index 836df8dac6ccd1230f21d20dc610429538cc59a1..0f6bbbe77b46c092d0de31e0c9eec8a0f17e6791 100644 (file)
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
                 * Other callers might not initialize the si_lsb field,
                 * so check explicitly for the right codes here.
                 */
-               if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
+               if (from->si_signo == SIGBUS &&
+                   (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
                        err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
 #endif
 #ifdef SEGV_BNDERR
-               err |= __put_user(from->si_lower, &to->si_lower);
-               err |= __put_user(from->si_upper, &to->si_upper);
+               if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
+                       err |= __put_user(from->si_lower, &to->si_lower);
+                       err |= __put_user(from->si_upper, &to->si_upper);
+               }
 #endif
                break;
        case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
        int ret = copy_siginfo_from_user32(&info, uinfo);
        if (unlikely(ret))
                return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
                        int, sig,
                        struct compat_siginfo __user *, uinfo)
 {
-       siginfo_t info;
+       siginfo_t info = {};
 
        if (copy_siginfo_from_user32(&info, uinfo))
                return -EFAULT;
index 5e097fa9faf7016470b8283931023a15d20ed97d..84190f02b521c9fc77cee6e6a6722000939ee470 100644 (file)
@@ -807,8 +807,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
                        spin_unlock(&base->lock);
                        base = new_base;
                        spin_lock(&base->lock);
-                       timer->flags &= ~TIMER_BASEMASK;
-                       timer->flags |= base->cpu;
+                       WRITE_ONCE(timer->flags,
+                                  (timer->flags & ~TIMER_BASEMASK) | base->cpu);
                }
        }
 
index df30632f0bef9ec1c36a48d83a6eb87cd18ee405..ff19f66d3f7fbd635a44cc03614b5fbe4485cc56 100644 (file)
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
        unsigned long align_mask = 0;
 
        if (align_order > 0)
-               align_mask = 0xffffffffffffffffl >> (64 - align_order);
+               align_mask = ~0ul >> (BITS_PER_LONG - align_order);
 
        /* Sanity check */
        if (unlikely(npages == 0)) {
index 1132d733556dbc330d32eda5460f55e6e067b627..17c75a4246c8bbab8b56fe4d562cd85ea670a21f 100644 (file)
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
 extern struct cma cma_areas[MAX_CMA_AREAS];
 extern unsigned cma_area_count;
 
-static unsigned long cma_bitmap_maxno(struct cma *cma)
+static inline unsigned long cma_bitmap_maxno(struct cma *cma)
 {
        return cma->count >> cma->order_per_bit;
 }
index c107094f79bae9ee895bd6bf30976d900f16c141..097c7a4bfbd9f13f4845acae80d73aa7b0e66fb2 100644 (file)
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
                /* after clearing PageTail the gup refcount can be released */
                smp_mb__after_atomic();
 
-               /*
-                * retain hwpoison flag of the poisoned tail page:
-                *   fix for the unsuitable process killed on Guest Machine(KVM)
-                *   by the memory-failure.
-                */
-               page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
+               page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
                page_tail->flags |= (page->flags &
                                     ((1L << PG_referenced) |
                                      (1L << PG_swapbacked) |
index 6c513a63ea84c3c7ffd41201b7a419ff7b6dfd5d..7b28e9cdf1c7686428fe49802fced44088043555 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains shadow memory manipulation code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index 680ceedf810ab4c9cd08c9929f5d445de9f5aa6a..e07c94fbd0ac5a141ecf95ab7d39d046fea13e67 100644 (file)
@@ -2,7 +2,7 @@
  * This file contains error reporting code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
  * Some of code borrowed from https://github.com/xairy/linux by
  *        Andrey Konovalov <adech.fo@gmail.com>
index c53543d892828e75796239d6ce36afa90203085b..1f4446a90cef07c67ee1082b83f0ca87ebfefea1 100644 (file)
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
         * directly for tail pages.
         */
        if (PageTransHuge(head)) {
+               /*
+                * Non anonymous thp exists only in allocation/free time. We
+                * can't handle such a case correctly, so let's give it up.
+                * This should be better than triggering BUG_ON when kernel
+                * tries to touch the "partially handled" page.
+                */
+               if (!PageAnon(head)) {
+                       pr_err("MCE: %#lx: non anonymous thp\n",
+                               page_to_pfn(page));
+                       return 0;
+               }
+
                if (get_page_unless_zero(head)) {
                        if (PageTail(page))
                                get_page(page);
@@ -1134,17 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        }
 
        if (!PageHuge(p) && PageTransHuge(hpage)) {
-               if (!PageAnon(hpage)) {
-                       pr_err("MCE: %#lx: non anonymous thp\n", pfn);
-                       if (TestClearPageHWPoison(p))
-                               atomic_long_sub(nr_pages, &num_poisoned_pages);
-                       put_page(p);
-                       if (p != hpage)
-                               put_page(hpage);
-                       return -EBUSY;
-               }
-               if (unlikely(split_huge_page(hpage))) {
-                       pr_err("MCE: %#lx: thp split failed\n", pfn);
+               if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
+                       if (!PageAnon(hpage))
+                               pr_err("MCE: %#lx: non anonymous thp\n", pfn);
+                       else
+                               pr_err("MCE: %#lx: thp split failed\n", pfn);
                        if (TestClearPageHWPoison(p))
                                atomic_long_sub(nr_pages, &num_poisoned_pages);
                        put_page(p);
@@ -1209,9 +1215,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
        if (!PageHWPoison(p)) {
                printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
                atomic_long_sub(nr_pages, &num_poisoned_pages);
+               unlock_page(hpage);
                put_page(hpage);
-               res = 0;
-               goto out;
+               return 0;
        }
        if (hwpoison_filter(p)) {
                if (TestClearPageHWPoison(p))
@@ -1535,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
                 */
                ret = __get_any_page(page, pfn, 0);
                if (!PageLRU(page)) {
+                       /* Drop page reference which is from __get_any_page() */
+                       put_page(page);
                        pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
                                pfn, page->flags);
                        return -EIO;
@@ -1564,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
        unlock_page(hpage);
 
        ret = isolate_huge_page(hpage, &pagelist);
-       if (ret) {
-               /*
-                * get_any_page() and isolate_huge_page() takes a refcount each,
-                * so need to drop one here.
-                */
-               put_page(hpage);
-       } else {
+       /*
+        * get_any_page() and isolate_huge_page() takes a refcount each,
+        * so need to drop one here.
+        */
+       put_page(hpage);
+       if (!ret) {
                pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
                return -EBUSY;
        }
@@ -1656,6 +1663,8 @@ static int __soft_offline_page(struct page *page, int flags)
                inc_zone_page_state(page, NR_ISOLATED_ANON +
                                        page_is_file_cache(page));
                list_add(&page->lru, &pagelist);
+               if (!TestSetPageHWPoison(page))
+                       atomic_long_inc(&num_poisoned_pages);
                ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
                                        MIGRATE_SYNC, MR_MEMORY_FAILURE);
                if (ret) {
@@ -1670,9 +1679,8 @@ static int __soft_offline_page(struct page *page, int flags)
                                pfn, ret, page->flags);
                        if (ret > 0)
                                ret = -EIO;
-               } else {
-                       SetPageHWPoison(page);
-                       atomic_long_inc(&num_poisoned_pages);
+                       if (TestClearPageHWPoison(page))
+                               atomic_long_dec(&num_poisoned_pages);
                }
        } else {
                pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
index 26fbba7d888f887c3383c1bb5829cb4f204e2040..6da82bcb0a8b66b7326c1a021a7eac3b476cd85e 100644 (file)
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
        int nr_pages = PAGES_PER_SECTION;
        int nid = pgdat->node_id;
        int zone_type;
-       unsigned long flags;
+       unsigned long flags, pfn;
        int ret;
 
        zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
        pgdat_resize_unlock(zone->zone_pgdat, &flags);
        memmap_init_zone(nr_pages, nid, zone_type,
                         phys_start_pfn, MEMMAP_HOTPLUG);
+
+       /* online_page_range is called later and expects pages reserved */
+       for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
+               if (!pfn_valid(pfn))
+                       continue;
+
+               SetPageReserved(pfn_to_page(pfn));
+       }
        return 0;
 }
 
@@ -1269,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
 
        /* create new memmap entry */
        firmware_map_add_hotplug(start, start + size, "System RAM");
+       memblock_add_node(start, size, nid);
 
        goto out;
 
@@ -2005,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
 
        /* remove memmap entry */
        firmware_map_remove(start, start + size, "System RAM");
+       memblock_free(start, size);
+       memblock_remove(start, size);
 
        arch_remove_memory(start, size);
 
index ee401e4e5ef187c92247d03dd6d2ea0893092d1c..eb4267107d1fee9fa2a55e4076c014500e3b1edb 100644 (file)
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
        /* Establish migration ptes or remove ptes */
        if (page_mapped(page)) {
                try_to_unmap(page,
-                       TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
+                       TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
+                       TTU_IGNORE_HWPOISON);
                page_was_mapped = 1;
        }
 
@@ -950,7 +951,10 @@ out:
                list_del(&page->lru);
                dec_zone_page_state(page, NR_ISOLATED_ANON +
                                page_is_file_cache(page));
-               if (reason != MR_MEMORY_FAILURE)
+               /* Soft-offlined page shouldn't go through lru cache list */
+               if (reason == MR_MEMORY_FAILURE)
+                       put_page(page);
+               else
                        putback_lru_page(page);
        }
 
index 22cddd3e5de8433952e99438d3260ae9ff20bd8d..5cccc127ef81f1d64ca46f9ce9ad50f519d4ea9f 100644 (file)
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
  */
 void __init page_writeback_init(void)
 {
+       BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
+
        writeback_set_ratelimit();
        register_cpu_notifier(&ratelimit_nb);
-
-       BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
 }
 
 /**
index ef19f22b2b7de1728fb4ed8d6451d29bb993a928..5b5240b7f642de179efa3552245fbf9326d7a206 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/mm.h>
 #include <linux/swap.h>
 #include <linux/interrupt.h>
-#include <linux/rwsem.h>
 #include <linux/pagemap.h>
 #include <linux/jiffies.h>
 #include <linux/bootmem.h>
@@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
 
 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
        defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
-/* Only safe to use early in boot when initialisation is single-threaded */
+
 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
 
 int __meminit early_pfn_to_nid(unsigned long pfn)
 {
+       static DEFINE_SPINLOCK(early_pfn_lock);
        int nid;
 
-       /* The system will behave unpredictably otherwise */
-       BUG_ON(system_state != SYSTEM_BOOTING);
-
+       spin_lock(&early_pfn_lock);
        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
-       if (nid >= 0)
-               return nid;
-       /* just returns 0 */
-       return 0;
+       if (nid < 0)
+               nid = 0;
+       spin_unlock(&early_pfn_lock);
+
+       return nid;
 }
 #endif
 
@@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
                __free_pages_boot_core(page, pfn, 0);
 }
 
-static __initdata DECLARE_RWSEM(pgdat_init_rwsem);
+/* Completion tracking for deferred_init_memmap() threads */
+static atomic_t pgdat_init_n_undone __initdata;
+static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
+
+static inline void __init pgdat_init_report_one_done(void)
+{
+       if (atomic_dec_and_test(&pgdat_init_n_undone))
+               complete(&pgdat_init_all_done_comp);
+}
 
 /* Initialise remaining memory on a node */
 static int __init deferred_init_memmap(void *data)
@@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
        if (first_init_pfn == ULONG_MAX) {
-               up_read(&pgdat_init_rwsem);
+               pgdat_init_report_one_done();
                return 0;
        }
 
@@ -1177,7 +1184,8 @@ free_range:
 
        pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
                                        jiffies_to_msecs(jiffies - start));
-       up_read(&pgdat_init_rwsem);
+
+       pgdat_init_report_one_done();
        return 0;
 }
 
@@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void)
 {
        int nid;
 
+       /* There will be num_node_state(N_MEMORY) threads */
+       atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
        for_each_node_state(nid, N_MEMORY) {
-               down_read(&pgdat_init_rwsem);
                kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
        }
 
        /* Block until all are initialised */
-       down_write(&pgdat_init_rwsem);
-       up_write(&pgdat_init_rwsem);
+       wait_for_completion(&pgdat_init_all_done_comp);
+
+       /* Reinit limits that are based on free pages after the kernel is up */
+       files_maxfiles_init();
 }
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
 
@@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page)
                bad_reason = "non-NULL mapping";
        if (unlikely(atomic_read(&page->_count) != 0))
                bad_reason = "nonzero _count";
+       if (unlikely(page->flags & __PG_HWPOISON)) {
+               bad_reason = "HWPoisoned (hardware-corrupted)";
+               bad_flags = __PG_HWPOISON;
+       }
        if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
                bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
                bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
@@ -1328,12 +1343,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
        set_page_owner(page, order, gfp_flags);
 
        /*
-        * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
+        * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
         * allocate the page. The expectation is that the caller is taking
         * steps that will free more memory. The caller should avoid the page
         * being used for !PFMEMALLOC purposes.
         */
-       page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
+       if (alloc_flags & ALLOC_NO_WATERMARKS)
+               set_page_pfmemalloc(page);
+       else
+               clear_page_pfmemalloc(page);
 
        return 0;
 }
@@ -3330,7 +3348,7 @@ refill:
                atomic_add(size - 1, &page->_count);
 
                /* reset page count bias and offset to start of new frag */
-               nc->pfmemalloc = page->pfmemalloc;
+               nc->pfmemalloc = page_is_pfmemalloc(page);
                nc->pagecnt_bias = size;
                nc->offset = size;
        }
@@ -5045,6 +5063,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
 {
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        /* Get the start and end of the zone */
        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5108,6 +5130,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
        unsigned long zone_start_pfn, zone_end_pfn;
 
+       /* When hotadd a new node, the node should be empty */
+       if (!node_start_pfn && !node_end_pfn)
+               return 0;
+
        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
 
index 4caf8ed24d6586e32ab910f28f945c01cef6373b..dbe0c1e8349c72ac569a58289da702a841104951 100644 (file)
@@ -3363,8 +3363,8 @@ put_path:
  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
  *     kernel internal.  There will be NO LSM permission checks against the
  *     underlying inode.  So users of this interface must do LSM checks at a
- *     higher layer.  The one user is the big_key implementation.  LSM checks
- *     are provided at the key level rather than the inode level.
+ *     higher layer.  The users are the big_key and shm implementations.  LSM
+ *     checks are provided at the key or shm level rather than the inode.
  * @name: name for dentry (to be seen in /proc/<pid>/maps
  * @size: size to be set for the file
  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
index 200e22412a161fc7a2232bcb923f07bc117362b8..bbd0b47dc6a97eecea7650ce6b351e88d5a17295 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1603,7 +1603,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
        }
 
        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
-       if (unlikely(page->pfmemalloc))
+       if (page_is_pfmemalloc(page))
                pfmemalloc_active = true;
 
        nr_pages = (1 << cachep->gfporder);
@@ -1614,7 +1614,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
                add_zone_page_state(page_zone(page),
                        NR_SLAB_UNRECLAIMABLE, nr_pages);
        __SetPageSlab(page);
-       if (page->pfmemalloc)
+       if (page_is_pfmemalloc(page))
                SetPageSlabPfmemalloc(page);
 
        if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
index 3e5f8f29c28640e44af5f5f9d1c3553986064588..86831105a09f44ffae37c074a6e5587c5b7056ce 100644 (file)
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
                SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
                SLAB_FAILSLAB)
 
-#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
-               SLAB_CACHE_DMA | SLAB_NOTRACK)
+#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
index 816df0016555ad8a5cf03c8020e0b39b75b0a498..f68c0e50f3c083abe295a1dcd60668321a8f9232 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
        inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab_cache = s;
        __SetPageSlab(page);
-       if (page->pfmemalloc)
+       if (page_is_pfmemalloc(page))
                SetPageSlabPfmemalloc(page);
 
        start = page_address(page);
index e61445dce04e3cc83e9704e84f3d5bf9074b31db..8286938c70ded6b82d4268174c92669a90eeb674 100644 (file)
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 *    caller can stall after page list has been processed.
                 *
                 * 2) Global or new memcg reclaim encounters a page that is
-                *    not marked for immediate reclaim or the caller does not
-                *    have __GFP_IO. In this case mark the page for immediate
+                *    not marked for immediate reclaim, or the caller does not
+                *    have __GFP_FS (or __GFP_IO if it's simply going to swap,
+                *    not to fs). In this case mark the page for immediate
                 *    reclaim and continue scanning.
                 *
-                *    __GFP_IO is checked  because a loop driver thread might
+                *    Require may_enter_fs because we would wait on fs, which
+                *    may not have submitted IO yet. And the loop driver might
                 *    enter reclaim, and deadlock if it waits on a page for
                 *    which it is needed to do the write (loop masks off
                 *    __GFP_IO|__GFP_FS for this reason); but more thought
                 *    would probably show more reasons.
                 *
-                *    Don't require __GFP_FS, since we're not going into the
-                *    FS, just waiting on its writeback completion. Worryingly,
-                *    ext4 gfs2 and xfs allocate pages with
-                *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
-                *    may_enter_fs here is liable to OOM on them.
-                *
                 * 3) Legacy memcg encounters a page that is not already marked
                 *    PageReclaim. memcg does not have any dirty pages
                 *    throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                        /* Case 2 above */
                        } else if (sane_reclaim(sc) ||
-                           !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
+                           !PageReclaim(page) || !may_enter_fs) {
                                /*
                                 * This is slightly racy - end_page_writeback()
                                 * might have just cleared PageReclaim, then
index 498454b3c06c3ddf0e8c3989e23bef9360587544..ea79ee9a73489f43a88e57c2eb529fc11d894e5f 100644 (file)
@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
        struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
        int total = 0;
+       *err = 0;
 
        p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
                   fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
@@ -1620,6 +1621,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
        struct p9_client *clnt = fid->clnt;
        struct p9_req_t *req;
        int total = 0;
+       *err = 0;
 
        p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
                                fid->fid, (unsigned long long) offset,
index fb54e6aed096edd267fc211e4cd2a0139fe71f8a..6d0b471eede8639f55b33c5c4d434c8fddef6ee5 100644 (file)
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
+ *
+ * Returns true if the packet was snooped and consumed by DAT. False if the
+ * packet has to be delivered to the interface
  */
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        uint16_t type;
        __be32 ip_src, ip_dst;
        uint8_t *hw_src, *hw_dst;
-       bool ret = false;
+       bool dropped = false;
        unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        /* if this REPLY is directed to a client of mine, let's deliver the
         * packet to the interface
         */
-       ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
+       dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
+
+       /* if this REPLY is sent on behalf of a client of mine, let's drop the
+        * packet because the client will reply by itself
+        */
+       dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
 out:
-       if (ret)
+       if (dropped)
                kfree_skb(skb);
-       /* if ret == false -> packet has to be delivered to the interface */
-       return ret;
+       /* if dropped == false -> deliver to the interface */
+       return dropped;
 }
 
 /**
index bb01586206289929f8c5e43153b75f9c279b9f85..cffa92dd98778bf2ffdf11107243fcc0f60cab6b 100644 (file)
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 
        INIT_HLIST_NODE(&gw_node->list);
        gw_node->orig_node = orig_node;
+       gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+       gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
        atomic_set(&gw_node->refcount, 1);
 
        spin_lock_bh(&bat_priv->gw.list_lock);
index c002961da75d655deb813990f5706cf37fbd6d7d..a2fc843c22432e790980fa15653cf95e6c60b384 100644 (file)
@@ -479,6 +479,9 @@ out:
  */
 void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
+       if (!vlan)
+               return;
+
        if (atomic_dec_and_test(&vlan->refcount)) {
                spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
                hlist_del_rcu(&vlan->list);
index b4824951010ba6b42bf7d7c7eb62c529ed340158..5809b39c1922320e8dbb353de212b472199c796d 100644 (file)
@@ -594,6 +594,12 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
        /* increase the refcounter of the related vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
+                addr, BATADV_PRINT_VID(vid))) {
+               kfree(tt_local);
+               tt_local = NULL;
+               goto out;
+       }
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1040,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
        struct batadv_softif_vlan *vlan;
+       void *tt_entry_exists;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1061,11 +1068,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
         * immediately purge it
         */
        batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
-       hlist_del_rcu(&tt_local_entry->common.hash_entry);
+
+       tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
+                                            batadv_compare_tt,
+                                            batadv_choose_tt,
+                                            &tt_local_entry->common);
+       if (!tt_entry_exists)
+               goto out;
+
+       /* extra call to free the local tt entry */
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
        /* decrease the reference held for this vlan */
        vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               goto out;
+
        batadv_softif_vlan_free_ref(vlan);
        batadv_softif_vlan_free_ref(vlan);
 
@@ -1166,8 +1184,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                        /* decrease the reference held for this vlan */
                        vlan = batadv_softif_vlan_get(bat_priv,
                                                      tt_common_entry->vid);
-                       batadv_softif_vlan_free_ref(vlan);
-                       batadv_softif_vlan_free_ref(vlan);
+                       if (vlan) {
+                               batadv_softif_vlan_free_ref(vlan);
+                               batadv_softif_vlan_free_ref(vlan);
+                       }
 
                        batadv_tt_local_entry_free_ref(tt_local);
                }
@@ -3207,8 +3227,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
 
                        /* decrease the reference held for this vlan */
                        vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-                       batadv_softif_vlan_free_ref(vlan);
-                       batadv_softif_vlan_free_ref(vlan);
+                       if (vlan) {
+                               batadv_softif_vlan_free_ref(vlan);
+                               batadv_softif_vlan_free_ref(vlan);
+                       }
 
                        batadv_tt_local_entry_free_ref(tt_local);
                }
index 7998fb27916568da087b2734a017355158044a75..92720f3fe57370137f22ae3d2b76b390da09e9a1 100644 (file)
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
        /* Make sure we copy only the significant bytes based on the
         * encryption key size, and set the rest of the value to zeroes.
         */
-       memcpy(ev.key.val, key->val, sizeof(key->enc_size));
+       memcpy(ev.key.val, key->val, key->enc_size);
        memset(ev.key.val + key->enc_size, 0,
               sizeof(ev.key.val) - key->enc_size);
 
index 0b39dcc65b94f0aa571dc22dc0c97afbf4e3d744..1285eaf5dc222e7cf75f4da796f2075f098105ae 100644 (file)
@@ -1591,7 +1591,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
@@ -1636,7 +1636,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                break;
        }
 
-       if (skb_trimmed)
+       if (skb_trimmed && skb_trimmed != skb)
                kfree_skb(skb_trimmed);
 
        return err;
index 3da5525eb8a2dc21e5f64638881a351a72a0d300..4d74a0639c4ccd040b6371665e6a74f8dca6f800 100644 (file)
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
                + nla_total_size(1)     /* IFLA_BRPORT_FAST_LEAVE */
                + nla_total_size(1)     /* IFLA_BRPORT_LEARNING */
                + nla_total_size(1)     /* IFLA_BRPORT_UNICAST_FLOOD */
+               + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP */
+               + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP_WIFI */
                + 0;
 }
 
@@ -506,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
        [IFLA_BRPORT_LEARNING]  = { .type = NLA_U8 },
        [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
+       [IFLA_BRPORT_PROXYARP]  = { .type = NLA_U8 },
+       [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
index 4967262b27076af66347d20eca54b65a2e61d789..617088aee21d41ba98d4ef5ebee5d6c002efe029 100644 (file)
@@ -131,12 +131,12 @@ out_noerr:
        goto out;
 }
 
-static int skb_set_peeked(struct sk_buff *skb)
+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
 {
        struct sk_buff *nskb;
 
        if (skb->peeked)
-               return 0;
+               return skb;
 
        /* We have to unshare an skb before modifying it. */
        if (!skb_shared(skb))
@@ -144,7 +144,7 @@ static int skb_set_peeked(struct sk_buff *skb)
 
        nskb = skb_clone(skb, GFP_ATOMIC);
        if (!nskb)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        skb->prev->next = nskb;
        skb->next->prev = nskb;
@@ -157,7 +157,7 @@ static int skb_set_peeked(struct sk_buff *skb)
 done:
        skb->peeked = 1;
 
-       return 0;
+       return skb;
 }
 
 /**
@@ -229,8 +229,9 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
                                        continue;
                                }
 
-                               error = skb_set_peeked(skb);
-                               if (error)
+                               skb = skb_set_peeked(skb);
+                               error = PTR_ERR(skb);
+                               if (IS_ERR(skb))
                                        goto unlock_err;
 
                                atomic_inc(&skb->users);
index 1ebdf1c0d1188c309d854bc9145c9b2f5b7b58a4..1cbd209192eacd6b7ec9a13f8180a5138760fc7e 100644 (file)
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
 
        set_freezable();
 
-       __set_current_state(TASK_RUNNING);
-
        while (!kthread_should_stop()) {
                pkt_dev = next_to_run(t);
 
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
 
                try_to_freeze();
        }
-       set_current_state(TASK_INTERRUPTIBLE);
 
        pr_debug("%s stopping all device\n", t->tsk->comm);
        pktgen_stop(t);
index 87b22c0bc08c2f33fa31948b8b2604f48b8009bc..b42f0e26f89e4cf2e37a8329da549eb5cd1200c5 100644 (file)
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                        spin_lock_bh(&queue->syn_wait_lock);
                        while ((req = lopt->syn_table[i]) != NULL) {
                                lopt->syn_table[i] = req->dl_next;
+                               /* Because of following del_timer_sync(),
+                                * we must release the spinlock here
+                                * or risk a dead lock.
+                                */
+                               spin_unlock_bh(&queue->syn_wait_lock);
                                atomic_inc(&lopt->qlen_dec);
-                               if (del_timer(&req->rsk_timer))
+                               if (del_timer_sync(&req->rsk_timer))
                                        reqsk_put(req);
                                reqsk_put(req);
+                               spin_lock_bh(&queue->syn_wait_lock);
                        }
                        spin_unlock_bh(&queue->syn_wait_lock);
                }
index b6a19ca0f99e49c7406f1fedb6c59433bbd7fd38..7b84330e5d30693cf63fe2d04b7f64f2b8893362 100644 (file)
@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
 
        if (skb && frag_size) {
                skb->head_frag = 1;
-               if (virt_to_head_page(data)->pfmemalloc)
+               if (page_is_pfmemalloc(virt_to_head_page(data)))
                        skb->pfmemalloc = 1;
        }
        return skb;
@@ -4022,8 +4022,8 @@ EXPORT_SYMBOL(skb_checksum_setup);
  * Otherwise returns the provided skb. Returns NULL in error cases
  * (e.g. transport_len exceeds skb length or out-of-memory).
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
                                               unsigned int transport_len)
@@ -4032,16 +4032,12 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
        unsigned int len = skb_transport_offset(skb) + transport_len;
        int ret;
 
-       if (skb->len < len) {
-               kfree_skb(skb);
+       if (skb->len < len)
                return NULL;
-       } else if (skb->len == len) {
+       else if (skb->len == len)
                return skb;
-       }
 
        skb_chk = skb_clone(skb, GFP_ATOMIC);
-       kfree_skb(skb);
-
        if (!skb_chk)
                return NULL;
 
@@ -4066,8 +4062,8 @@ static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
  * If the skb has data beyond the given transport length, then a
  * trimmed & cloned skb is checked and returned.
  *
- * Caller needs to set the skb transport header and release the returned skb.
- * Provided skb is consumed.
+ * Caller needs to set the skb transport header and free any returned skb if it
+ * differs from the provided skb.
  */
 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
                                     unsigned int transport_len,
@@ -4079,23 +4075,26 @@ struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
 
        skb_chk = skb_checksum_maybe_trim(skb, transport_len);
        if (!skb_chk)
-               return NULL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, offset)) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (!pskb_may_pull(skb_chk, offset))
+               goto err;
 
        __skb_pull(skb_chk, offset);
        ret = skb_chkf(skb_chk);
        __skb_push(skb_chk, offset);
 
-       if (ret) {
-               kfree_skb(skb_chk);
-               return NULL;
-       }
+       if (ret)
+               goto err;
 
        return skb_chk;
+
+err:
+       if (skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return NULL;
+
 }
 EXPORT_SYMBOL(skb_checksum_trimmed);
 
index 0917123790eaf09b001c97a733039185fdb0a800..35c47ddd04f0ee3eb965fd99b06bab2ee4670774 100644 (file)
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
                return -ENODEV;
 
        /* Use already configured phy mode */
-       p->phy_interface = p->phy->interface;
+       if (p->phy_interface == PHY_INTERFACE_MODE_NA)
+               p->phy_interface = p->phy->interface;
        phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
                           p->phy_interface);
 
index 37c4bb89a7082bbe36b40d928f7fd1d95bfe8252..b0c6258ffb79a7cbcaaf1296e4842db52876b5b2 100644 (file)
@@ -2465,7 +2465,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
                key = l->key + 1;
                iter->pos++;
 
-               if (pos-- <= 0)
+               if (--pos <= 0)
                        break;
 
                l = NULL;
index 651cdf648ec4728bff6e709b0324b7d52ffd65ed..9fdfd9deac11dde85bc62803068fbe50e45837b8 100644 (file)
@@ -1435,33 +1435,35 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
        struct sk_buff *skb_chk;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct igmphdr);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ip_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ip_mc_check_igmp_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -1470,7 +1472,7 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * @skb_trimmed: to store an skb pointer trimmed to IPv4 packet tail (optional)
  *
  * Checks whether an IPv4 packet is a valid IGMP packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -1485,7 +1487,8 @@ static int __ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index 60021d0d9326ac691dcef21e1f9c20de5f8fe7c6..134957159c27eb9180e08b73360fe891574b4742 100644 (file)
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
        }
 
        spin_unlock(&queue->syn_wait_lock);
-       if (del_timer(&req->rsk_timer))
+       if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
                reqsk_put(req);
        return found;
 }
index fe8cc183411e052f6e0ba4afefbeaef1e77313cd..95ea633e8356eb9b419e4027f9954810194aa23c 100644 (file)
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
index 433231ccfb17fc6d01179247d1d81226803d18df..0330ab2e2b6329ced120cd9b7100a5a34f50e82b 100644 (file)
@@ -41,8 +41,6 @@ static int tcp_syn_retries_min = 1;
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
-static int min_sndbuf = SOCK_MIN_SNDBUF;
-static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
 /* Update system visible IP port range */
 static void set_local_port_range(struct net *net, int range[2])
@@ -530,7 +528,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_wmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_notsent_lowat",
@@ -545,7 +543,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_tcp_rmem),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one,
        },
        {
                .procname       = "tcp_app_win",
@@ -758,7 +756,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_rmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_rcvbuf,
+               .extra1         = &one
        },
        {
                .procname       = "udp_wmem_min",
@@ -766,7 +764,7 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = sizeof(sysctl_udp_wmem_min),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &min_sndbuf,
+               .extra1         = &one
        },
        { }
 };
index d7d4c2b79cf2f516f9e3f62c6fe4415e9bc137a0..0ea2e1c5d395ac979e9a301d006867d49b866ecb 100644 (file)
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               if (!nsk)
+               if (!nsk || nsk == sk)
                        reqsk_put(req);
                return nsk;
        }
index 83aa604f9273c332c5a0e5399253d961ef92eb9a..1b8c5ba7d5f732ea2220ada929049c5c7cfd5e83 100644 (file)
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
 
        skb->sk = sk;
        skb->destructor = sock_efree;
-       dst = sk->sk_rx_dst;
+       dst = READ_ONCE(sk->sk_rx_dst);
 
        if (dst)
                dst = dst_check(dst, 0);
-       if (dst)
-               skb_dst_set_noref(skb, dst);
+       if (dst) {
+               /* DST_NOCACHE can not be used without taking a reference */
+               if (dst->flags & DST_NOCACHE) {
+                       if (likely(atomic_inc_not_zero(&dst->__refcnt)))
+                               skb_dst_set(skb, dst);
+               } else {
+                       skb_dst_set_noref(skb, dst);
+               }
+       }
 }
 
 int udp_rcv(struct sk_buff *skb)
index 55d19861ab20f4a91b6b289be7ca3b0250df4531..548c6237b1e706f8ef72575a6a7dbad544b60fcc 100644 (file)
@@ -172,6 +172,8 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                        *ppcpu_rt = NULL;
                }
        }
+
+       non_pcpu_rt->rt6i_pcpu = NULL;
 }
 
 static void rt6_release(struct rt6_info *rt)
index df8afe5ab31e4b8e75bf2fbf844f8b3e798edbba..9405b04eecc64f478960329da93f6e01d437954e 100644 (file)
@@ -143,34 +143,36 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
        struct sk_buff *skb_chk = NULL;
        unsigned int transport_len;
        unsigned int len = skb_transport_offset(skb) + sizeof(struct mld_msg);
-       int ret;
+       int ret = -EINVAL;
 
        transport_len = ntohs(ipv6_hdr(skb)->payload_len);
        transport_len -= skb_transport_offset(skb) - sizeof(struct ipv6hdr);
 
-       skb_get(skb);
        skb_chk = skb_checksum_trimmed(skb, transport_len,
                                       ipv6_mc_validate_checksum);
        if (!skb_chk)
-               return -EINVAL;
+               goto err;
 
-       if (!pskb_may_pull(skb_chk, len)) {
-               kfree_skb(skb_chk);
-               return -EINVAL;
-       }
+       if (!pskb_may_pull(skb_chk, len))
+               goto err;
 
        ret = ipv6_mc_check_mld_msg(skb_chk);
-       if (ret) {
-               kfree_skb(skb_chk);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        if (skb_trimmed)
                *skb_trimmed = skb_chk;
-       else
+       /* free now unneeded clone */
+       else if (skb_chk != skb)
                kfree_skb(skb_chk);
 
-       return 0;
+       ret = 0;
+
+err:
+       if (ret && skb_chk && skb_chk != skb)
+               kfree_skb(skb_chk);
+
+       return ret;
 }
 
 /**
@@ -179,7 +181,7 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * @skb_trimmed: to store an skb pointer trimmed to IPv6 packet tail (optional)
  *
  * Checks whether an IPv6 packet is a valid MLD packet. If so sets
- * skb network and transport headers accordingly and returns zero.
+ * skb transport header accordingly and returns zero.
  *
  * -EINVAL: A broken packet was detected, i.e. it violates some internet
  *  standard
@@ -194,7 +196,8 @@ static int __ipv6_mc_check_mld(struct sk_buff *skb,
  * to leave the original skb and its full frame unchanged (which might be
  * desirable for layer 2 frame jugglers).
  *
- * The caller needs to release a reference count from any returned skb_trimmed.
+ * Caller needs to set the skb network header and free any returned skb if it
+ * differs from the provided skb.
  */
 int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed)
 {
index 6edb7b106de769728357174d0657c644f83e41e8..ebbb754c2111b73c87fe85aa58b3124e0a3032ef 100644 (file)
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
 }
 
 static void
-synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+synproxy_send_tcp(const struct synproxy_net *snet,
+                 const struct sk_buff *skb, struct sk_buff *nskb,
                  struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
                  struct ipv6hdr *niph, struct tcphdr *nth,
                  unsigned int tcp_hdr_size)
 {
-       struct net *net = nf_ct_net((struct nf_conn *)nfct);
+       struct net *net = nf_ct_net(snet->tmpl);
        struct dst_entry *dst;
        struct flowi6 fl6;
 
@@ -83,7 +84,8 @@ free_nskb:
 }
 
 static void
-synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+synproxy_send_client_synack(const struct synproxy_net *snet,
+                           const struct sk_buff *skb, const struct tcphdr *th,
                            const struct synproxy_options *opts)
 {
        struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
                          niph, nth, tcp_hdr_size);
 }
 
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+       synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
                          niph, nth, tcp_hdr_size);
 }
 
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
 }
 
 static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+                         niph, nth, tcp_hdr_size);
 }
 
 static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                                          XT_SYNPROXY_OPT_SACK_PERM |
                                          XT_SYNPROXY_OPT_ECN);
 
-               synproxy_send_client_synack(skb, th, &opts);
+               synproxy_send_client_synack(snet, skb, th, &opts);
                return NF_DROP;
 
        } else if (th->ack && !(th->fin || th->rst || th->syn)) {
index 6090969937f8b6809f74c3d03f29a0703089eff1..d15586490cecaedcc29bc821163cd6c85544b0b8 100644 (file)
@@ -318,8 +318,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
 /* allocate dst with ip6_dst_ops */
 static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
-                                       int flags,
-                                       struct fib6_table *table)
+                                       int flags)
 {
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
@@ -336,10 +335,9 @@ static struct rt6_info *__ip6_dst_alloc(struct net *net,
 
 static struct rt6_info *ip6_dst_alloc(struct net *net,
                                      struct net_device *dev,
-                                     int flags,
-                                     struct fib6_table *table)
+                                     int flags)
 {
-       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags, table);
+       struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
 
        if (rt) {
                rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
@@ -950,8 +948,7 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
        if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
                ort = (struct rt6_info *)ort->dst.from;
 
-       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev,
-                            0, ort->rt6i_table);
+       rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
 
        if (!rt)
                return NULL;
@@ -983,8 +980,7 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
        struct rt6_info *pcpu_rt;
 
        pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
-                                 rt->dst.dev, rt->dst.flags,
-                                 rt->rt6i_table);
+                                 rt->dst.dev, rt->dst.flags);
 
        if (!pcpu_rt)
                return NULL;
@@ -997,32 +993,53 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
 /* It should be called with read_lock_bh(&tb6_lock) acquired */
 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
 {
-       struct rt6_info *pcpu_rt, *prev, **p;
+       struct rt6_info *pcpu_rt, **p;
 
        p = this_cpu_ptr(rt->rt6i_pcpu);
        pcpu_rt = *p;
 
-       if (pcpu_rt)
-               goto done;
+       if (pcpu_rt) {
+               dst_hold(&pcpu_rt->dst);
+               rt6_dst_from_metrics_check(pcpu_rt);
+       }
+       return pcpu_rt;
+}
+
+static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
+{
+       struct fib6_table *table = rt->rt6i_table;
+       struct rt6_info *pcpu_rt, *prev, **p;
 
        pcpu_rt = ip6_rt_pcpu_alloc(rt);
        if (!pcpu_rt) {
                struct net *net = dev_net(rt->dst.dev);
 
-               pcpu_rt = net->ipv6.ip6_null_entry;
-               goto done;
+               dst_hold(&net->ipv6.ip6_null_entry->dst);
+               return net->ipv6.ip6_null_entry;
        }
 
-       prev = cmpxchg(p, NULL, pcpu_rt);
-       if (prev) {
-               /* If someone did it before us, return prev instead */
+       read_lock_bh(&table->tb6_lock);
+       if (rt->rt6i_pcpu) {
+               p = this_cpu_ptr(rt->rt6i_pcpu);
+               prev = cmpxchg(p, NULL, pcpu_rt);
+               if (prev) {
+                       /* If someone did it before us, return prev instead */
+                       dst_destroy(&pcpu_rt->dst);
+                       pcpu_rt = prev;
+               }
+       } else {
+               /* rt has been removed from the fib6 tree
+                * before we have a chance to acquire the read_lock.
+                * In this case, don't brother to create a pcpu rt
+                * since rt is going away anyway.  The next
+                * dst_check() will trigger a re-lookup.
+                */
                dst_destroy(&pcpu_rt->dst);
-               pcpu_rt = prev;
+               pcpu_rt = rt;
        }
-
-done:
        dst_hold(&pcpu_rt->dst);
        rt6_dst_from_metrics_check(pcpu_rt);
+       read_unlock_bh(&table->tb6_lock);
        return pcpu_rt;
 }
 
@@ -1097,9 +1114,22 @@ redo_rt6_select:
                rt->dst.lastuse = jiffies;
                rt->dst.__use++;
                pcpu_rt = rt6_get_pcpu_route(rt);
-               read_unlock_bh(&table->tb6_lock);
+
+               if (pcpu_rt) {
+                       read_unlock_bh(&table->tb6_lock);
+               } else {
+                       /* We have to do the read_unlock first
+                        * because rt6_make_pcpu_route() may trigger
+                        * ip6_dst_gc() which will take the write_lock.
+                        */
+                       dst_hold(&rt->dst);
+                       read_unlock_bh(&table->tb6_lock);
+                       pcpu_rt = rt6_make_pcpu_route(rt);
+                       dst_release(&rt->dst);
+               }
 
                return pcpu_rt;
+
        }
 }
 
@@ -1555,7 +1585,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        if (unlikely(!idev))
                return ERR_PTR(-ENODEV);
 
-       rt = ip6_dst_alloc(net, dev, 0, NULL);
+       rt = ip6_dst_alloc(net, dev, 0);
        if (unlikely(!rt)) {
                in6_dev_put(idev);
                dst = ERR_PTR(-ENOMEM);
@@ -1742,7 +1772,8 @@ int ip6_route_add(struct fib6_config *cfg)
        if (!table)
                goto out;
 
-       rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
+       rt = ip6_dst_alloc(net, NULL,
+                          (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
 
        if (!rt) {
                err = -ENOMEM;
@@ -1831,6 +1862,7 @@ int ip6_route_add(struct fib6_config *cfg)
                int gwa_type;
 
                gw_addr = &cfg->fc_gateway;
+               gwa_type = ipv6_addr_type(gw_addr);
 
                /* if gw_addr is local we will fail to detect this in case
                 * address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1838,11 +1870,12 @@ int ip6_route_add(struct fib6_config *cfg)
                 * prefix route was assigned to, which might be non-loopback.
                 */
                err = -EINVAL;
-               if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0))
+               if (ipv6_chk_addr_and_flags(net, gw_addr,
+                                           gwa_type & IPV6_ADDR_LINKLOCAL ?
+                                           dev : NULL, 0, 0))
                        goto out;
 
                rt->rt6i_gateway = *gw_addr;
-               gwa_type = ipv6_addr_type(gw_addr);
 
                if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
                        struct rt6_info *grt;
@@ -2397,7 +2430,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
 {
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
-                                           DST_NOCOUNT, NULL);
+                                           DST_NOCOUNT);
        if (!rt)
                return ERR_PTR(-ENOMEM);
 
index 6748c4277affad71cd721e3a985af10c31c047ad..7a6cea5e427414062f408cfa66f57808d48382d7 100644 (file)
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
                                   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
        if (req) {
                nsk = tcp_check_req(sk, skb, req, false);
-               if (!nsk)
+               if (!nsk || nsk == sk)
                        reqsk_put(req);
                return nsk;
        }
index 247552a7f6c2f23a1e4bc89b647d8d37680bf2c3..3ece7d1034c81ae8749cada074fbebecbe06d57f 100644 (file)
@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
 static inline void
 minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
 {
-       int j = MAX_THR_RATES;
-       struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
+       int j;
+       struct minstrel_rate_stats *tmp_mrs;
        struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
 
-       while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
-              minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
-               j--;
+       for (j = MAX_THR_RATES; j > 0; --j) {
                tmp_mrs = &mi->r[tp_list[j - 1]].stats;
+               if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
+                   minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
+                       break;
        }
 
        if (j < MAX_THR_RATES - 1)
index 651039ad1681db0434cff21275f1bcbe3f8464bc..3c20d02aee738c5293a5b449f28ebff596c7232d 100644 (file)
@@ -292,7 +292,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
 {
        struct nf_conn *tmpl;
 
-       tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL);
+       tmpl = kzalloc(sizeof(*tmpl), flags);
        if (tmpl == NULL)
                return NULL;
 
@@ -303,7 +303,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
        if (zone) {
                struct nf_conntrack_zone *nf_ct_zone;
 
-               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC);
+               nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
                if (!nf_ct_zone)
                        goto out_free;
                nf_ct_zone->id = zone;
@@ -1544,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
-       if (!hash) {
-               printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+       if (!hash)
                hash = vzalloc(sz);
-       }
 
        if (hash && nulls)
                for (i = 0; i < nr_slots; i++)
index 71f1e9fdfa18fb9b1f2f2730ca21af42dad98eea..d7f1685279034b5e3b62748284d24c52cc1f8907 100644 (file)
@@ -353,10 +353,8 @@ static int __net_init synproxy_net_init(struct net *net)
        int err = -ENOMEM;
 
        ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
-       if (IS_ERR(ct)) {
-               err = PTR_ERR(ct);
+       if (!ct)
                goto err1;
-       }
 
        if (!nfct_seqadj_ext_add(ct))
                goto err2;
index c6630030c9121c7af27a3052ad776cd6646eb601..43ddeee404e91f97908fb9228c1e873931b75bcc 100644 (file)
@@ -202,9 +202,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
                goto err1;
 
        ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
-       ret = PTR_ERR(ct);
-       if (IS_ERR(ct))
+       if (!ct) {
+               ret = -ENOMEM;
                goto err2;
+       }
 
        ret = 0;
        if ((info->ct_events || info->exp_events) &&
index d8e2e3918ce2fd95637c4cba8bfc4886feb91ea6..67d2104778636c62e7d3fa94c73ce5fb2cd4cb7d 100644 (file)
@@ -1096,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
 
        err = __netlink_insert(table, sk);
        if (err) {
+               /* In case the hashtable backend returns with -EBUSY
+                * from here, it must not escape to the caller.
+                */
+               if (unlikely(err == -EBUSY))
+                       err = -EOVERFLOW;
                if (err == -EEXIST)
                        err = -EADDRINUSE;
                nlk_sk(sk)->portid = 0;
index 8a8c0b8b4f63a4bd8e5ff776250189558e6fcb1e..ee34f474ad1465087da8fd506410559abe47d81e 100644 (file)
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
        return 0;
 }
 
-static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
-                       __be32 *addr, __be32 new_addr)
+static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
+                                 __be32 addr, __be32 new_addr)
 {
        int transport_len = skb->len - skb_transport_offset(skb);
 
+       if (nh->frag_off & htons(IP_OFFSET))
+               return;
+
        if (nh->protocol == IPPROTO_TCP) {
                if (likely(transport_len >= sizeof(struct tcphdr)))
                        inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
-                                                *addr, new_addr, 1);
+                                                addr, new_addr, 1);
        } else if (nh->protocol == IPPROTO_UDP) {
                if (likely(transport_len >= sizeof(struct udphdr))) {
                        struct udphdr *uh = udp_hdr(skb);
 
                        if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
                                inet_proto_csum_replace4(&uh->check, skb,
-                                                        *addr, new_addr, 1);
+                                                        addr, new_addr, 1);
                                if (!uh->check)
                                        uh->check = CSUM_MANGLED_0;
                        }
                }
        }
+}
 
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+                       __be32 *addr, __be32 new_addr)
+{
+       update_ip_l4_checksum(skb, nh, *addr, new_addr);
        csum_replace4(&nh->check, *addr, new_addr);
        skb_clear_hash(skb);
        *addr = new_addr;
index 9a6b4f66187cf3e5ab533cd01344c9856834ebb7..140a44a5f7b7f1c08b3f329707b72fc75a9a81fe 100644 (file)
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
 
        /* check for all kinds of wrapping and the like */
        start = (unsigned long)optval;
-       if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+       if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
                ret = -EINVAL;
                goto out;
        }
index a42a3b257226178eb5af04054a17813c04368613..268545050ddbd67245ead8394a82f44503657ea5 100644 (file)
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        return ret;
                ret = ACT_P_CREATED;
        } else {
+               if (bind)
+                       return 0;
                if (!ovr) {
                        tcf_hash_release(a, bind);
                        return -EEXIST;
index 21ca33c9f0368b21cdb00fbdbbca4851c2ad87a2..a9ba030435a2a5c2ca4bea21d62c6d631c6168f4 100644 (file)
@@ -288,10 +288,26 @@ begin:
 
 static void fq_codel_reset(struct Qdisc *sch)
 {
-       struct sk_buff *skb;
+       struct fq_codel_sched_data *q = qdisc_priv(sch);
+       int i;
 
-       while ((skb = fq_codel_dequeue(sch)) != NULL)
-               kfree_skb(skb);
+       INIT_LIST_HEAD(&q->new_flows);
+       INIT_LIST_HEAD(&q->old_flows);
+       for (i = 0; i < q->flows_cnt; i++) {
+               struct fq_codel_flow *flow = q->flows + i;
+
+               while (flow->head) {
+                       struct sk_buff *skb = dequeue_head(flow);
+
+                       qdisc_qstats_backlog_dec(sch, skb);
+                       kfree_skb(skb);
+               }
+
+               INIT_LIST_HEAD(&flow->flowchain);
+               codel_vars_init(&flow->cvars);
+       }
+       memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
+       sch->q.qlen = 0;
 }
 
 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
index 9cb8522d8d22a826caaec968e82c77516339ca01..f3d3fb42b8735e76aa54bd4c433574f7afc43b0e 100755 (executable)
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
 my $kconfig = $ARGV[1];
 my $lsmod_file = $ENV{'LSMOD'};
 
-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
 chomp @makefiles;
 
 my %depends;
index 9ed32502470e9bc1d776d5d925ee48be587baa17..5ebb8968793670d6a23ee6e6d2b30df6243a63ca 100644 (file)
@@ -406,6 +406,7 @@ static __init int yama_init(void)
         */
        if (!security_module_enable("yama"))
                return 0;
+       yama_add_hooks();
 #endif
        pr_info("Yama: becoming mindful.\n");
 
index 7bb988fa6b6d17764e08f39ecbf45f31ee2ff2ee..2a153d260836704011b6eadddfbc9b463e1b37b3 100644 (file)
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
            s->data_block_counter != UINT_MAX)
                data_block_counter = s->data_block_counter;
 
-       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
-           (s->data_block_counter == UINT_MAX)) {
+       if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
+            data_block_counter == s->tx_first_dbc) ||
+           s->data_block_counter == UINT_MAX) {
                lost = false;
        } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
                lost = data_block_counter != s->data_block_counter;
index 26b909329e54d96d7fcb075f9538599aaf30cb09..b2cf9e75693b643ce0ac81ebdef15458fd2d4018 100644 (file)
@@ -157,6 +157,8 @@ struct amdtp_stream {
 
        /* quirk: fixed interval of dbc between previos/current packets. */
        unsigned int tx_dbc_interval;
+       /* quirk: indicate the value of dbc field in a first packet. */
+       unsigned int tx_first_dbc;
 
        bool callbacked;
        wait_queue_head_t callback_wait;
index c670db4eee70d42c91db2904354bf391267c22ef..c94a432f7cc653dca45316ea67bdc481b9e8e887 100644 (file)
@@ -248,10 +248,16 @@ efw_probe(struct fw_unit *unit,
        err = get_hardware_info(efw);
        if (err < 0)
                goto error;
-       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2)
-               efw->is_af2 = true;
+       /* AudioFire8 (since 2009) and AudioFirePre8 */
        if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
                efw->is_af9 = true;
+       /* These models uses the same firmware. */
+       if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
+           entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
+           entry->model_id == MODEL_GIBSON_RIP ||
+           entry->model_id == MODEL_GIBSON_GOLDTOP)
+               efw->is_fireworks3 = true;
 
        snd_efw_proc_init(efw);
 
index c33252b7bc847501c4fc2c7b2c18d07a9ec18898..084d414b228cf425dc99440cc081d65459bbe175 100644 (file)
@@ -70,8 +70,8 @@ struct snd_efw {
        bool resp_addr_changable;
 
        /* for quirks */
-       bool is_af2;
        bool is_af9;
+       bool is_fireworks3;
        u32 firmware_version;
 
        unsigned int midi_in_ports;
index a0762dd6231e6080eb9ab86343c413db0bd64492..7e353f1f7bff359ea8845630aeff2521759e508f 100644 (file)
@@ -172,9 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
        efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
        /* Fireworks reset dbc at bus reset. */
        efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
-       /* AudioFire2 starts packets with non-zero dbc. */
-       if (efw->is_af2)
-               efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK;
+       /*
+        * But Recent firmwares starts packets with non-zero dbc.
+        * Driver version 5.7.6 installs firmware version 5.7.3.
+        */
+       if (efw->is_fireworks3 &&
+           (efw->firmware_version == 0x5070000 ||
+            efw->firmware_version == 0x5070300 ||
+            efw->firmware_version == 0x5080000))
+               efw->tx_stream.tx_first_dbc = 0x02;
        /* AudioFire9 always reports wrong dbs. */
        if (efw->is_af9)
                efw->tx_stream.flags |= CIP_WRONG_DBS;
index b2da19b60f4e25cf6ec858832d6f33349313ca66..358f16195483f6b51c3a84a3cff672d8696951a6 100644 (file)
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
 
        offset = snd_hdac_chip_readl(bus, LLCH);
 
-       if (offset < 0)
-               return -EIO;
-
        /* Lets walk the linked capabilities list */
        do {
                cur_cap = _snd_hdac_chip_read(l, bus, offset);
 
-               if (cur_cap < 0)
-                       return -EIO;
-
                dev_dbg(bus->dev, "Capability version: 0x%x\n",
                                ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
 
index f8ffbdbb450d785e281bd7a0aae3c6d8a8c2a2ab..3de47dd1a76d856f95c2051b1bde6e55522f2b55 100644 (file)
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
                if (stream->direction != substream->stream)
                        continue;
 
-               if (stream->opened) {
+               if (!stream->opened) {
                        if (!hstream->decoupled)
                                snd_hdac_ext_stream_decouple(ebus, hstream, true);
                        res = hstream;
index c456c04e0928d2c4ef9e65d114bdec3bf6b9332f..374ea53288ca25ff6093467012afd2607192f505 100644 (file)
@@ -5189,6 +5189,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+       SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5290,6 +5292,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2211, "Thinkpad W541", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 6492bca8c70f8bbfdf31e2fb13321904f5a5b00a..4ca12665ff730c6980e2a4366dd195e47f94aecd 100644 (file)
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
        int changed;
 
        mutex_lock(&chip->mutex);
-       changed = !value->value.integer.value[0] != chip->dac_mute;
+       changed = (!value->value.integer.value[0]) != chip->dac_mute;
        if (changed) {
                chip->dac_mute = !value->value.integer.value[0];
                chip->model.update_dac_mute(chip);
index 2ae9619443d15dbeaf128cab385347db6bf26369..1d651b8a89570404cd306f239b17e939b1d5fa81 100644 (file)
@@ -30,6 +30,9 @@ config SND_SOC_GENERIC_DMAENGINE_PCM
        bool
        select SND_DMAENGINE_PCM
 
+config SND_SOC_TOPOLOGY
+       bool
+
 # All the supported SoCs
 source "sound/soc/adi/Kconfig"
 source "sound/soc/atmel/Kconfig"
index e189903fabf42958eff143e487999f0f013b85d7..669648b41d3027adf29ead27eda5f72a6ed0aaf1 100644 (file)
@@ -1,6 +1,9 @@
 snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
 snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
+
+ifneq ($(CONFIG_SND_SOC_TOPOLOGY),)
 snd-soc-core-objs += soc-topology.o
+endif
 
 ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
 snd-soc-core-objs += soc-generic-dmaengine-pcm.o
index d7ec4756e45bf9bab0c08059ee74c39b4ea3c812..8e36198474d94d59cd8b7bda10e6be434103fa55 100644 (file)
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
        case SND_SOC_DAIFMT_RIGHT_J:
                if (params_width(params) == 16) {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
-                               CS4265_DAC_CTL_DIF, (1 << 5));
+                               CS4265_DAC_CTL_DIF, (2 << 4));
                        snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
-                               CS4265_SPDIF_CTL2_DIF, (1 << 7));
+                               CS4265_SPDIF_CTL2_DIF, (2 << 6));
                } else {
                        snd_soc_update_bits(codec, CS4265_DAC_CTL,
-                               CS4265_DAC_CTL_DIF, (3 << 5));
+                               CS4265_DAC_CTL_DIF, (3 << 4));
                        snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
-                               CS4265_SPDIF_CTL2_DIF, (1 << 7));
+                               CS4265_SPDIF_CTL2_DIF, (3 << 6));
                }
                break;
        case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
                snd_soc_update_bits(codec, CS4265_ADC_CTL,
                        CS4265_ADC_DIF, 0);
                snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
-                       CS4265_SPDIF_CTL2_DIF, (1 << 6));
+                       CS4265_SPDIF_CTL2_DIF, 0);
 
                break;
        default:
index e9cc3aae5366d30d0522b105ed11fdb88943948f..961bd7e5877ee42c3e50f7a3e2e826a1ffab9cc8 100644 (file)
@@ -3341,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
                break;
 
        case RT5645_DMIC_DATA_GPIO5:
+               regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
+                       RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
                regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
                        RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
                regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
index 0353a6a273ab4ca2bd5136c28aaedee14c6e54b2..278bb9f464c4120b681c7ad5c6086df89d6de3ab 100644 (file)
 #define RT5645_GP6_PIN_SFT                     6
 #define RT5645_GP6_PIN_GPIO6                   (0x0 << 6)
 #define RT5645_GP6_PIN_DMIC2_SDA               (0x1 << 6)
+#define RT5645_I2S2_DAC_PIN_MASK               (0x1 << 4)
+#define RT5645_I2S2_DAC_PIN_SFT                        4
+#define RT5645_I2S2_DAC_PIN_I2S                        (0x0 << 4)
+#define RT5645_I2S2_DAC_PIN_GPIO               (0x1 << 4)
 #define RT5645_GP8_PIN_MASK                    (0x1 << 3)
 #define RT5645_GP8_PIN_SFT                     3
 #define RT5645_GP8_PIN_GPIO8                   (0x0 << 3)
index 4c01bb43928d136c93963730f1149f9d9bf4a66b..5bbaa667bec1c608c35968c983eaeece020ad8db 100644 (file)
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
        if (byt == NULL)
                return -ENOMEM;
 
+       byt->dev = dev;
+
        ipc = &byt->ipc;
        ipc->dev = dev;
        ipc->ops.tx_msg = byt_tx_msg;
index f95f271aab0ce30412954428b426af2461438dea..f6efa9d4acadd5e056ea0a0494ed51da859fbc3a 100644 (file)
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
        if (hsw == NULL)
                return -ENOMEM;
 
+       hsw->dev = dev;
+
        ipc = &hsw->ipc;
        ipc->dev = dev;
        ipc->ops.tx_msg = hsw_tx_msg;
index 59ac211f8fe7c273c84dcd67ac94bb27cb760564..31068b8f3db0dd965cc2bdc6742684dc2cb8d8ea 100644 (file)
@@ -33,6 +33,7 @@
 #include <sound/soc.h>
 #include <sound/soc-dapm.h>
 #include <sound/soc-topology.h>
+#include <sound/tlv.h>
 
 /*
  * We make several passes over the data (since it wont necessarily be ordered)
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
                        k->put = bops[i].put;
                if (k->get == NULL && bops[i].id == hdr->ops.get)
                        k->get = bops[i].get;
-               if (k->info == NULL && ops[i].id == hdr->ops.info)
+               if (k->info == NULL && bops[i].id == hdr->ops.info)
                        k->info = bops[i].info;
        }
 
@@ -579,28 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
        return 0;
 }
 
+
+static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
+       struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
+{
+       unsigned int item_len = 2 * sizeof(unsigned int);
+       unsigned int *p;
+
+       p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       p[0] = SNDRV_CTL_TLVT_DB_SCALE;
+       p[1] = item_len;
+       p[2] = scale->min;
+       p[3] = (scale->step & TLV_DB_SCALE_MASK)
+                       | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
+
+       kc->tlv.p = (void *)p;
+       return 0;
+}
+
 static int soc_tplg_create_tlv(struct soc_tplg *tplg,
-       struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv)
+       struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
 {
-       struct snd_ctl_tlv *tlv;
-       int size;
+       struct snd_soc_tplg_ctl_tlv *tplg_tlv;
 
-       if (tplg_tlv->count == 0)
+       if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
                return 0;
 
-       size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) &
-               ~(sizeof(unsigned int) - 1));
-       tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL);
-       if (tlv == NULL)
-               return -ENOMEM;
-
-       dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n",
-               tplg_tlv->numid, size);
+       if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+               kc->tlv.c = snd_soc_bytes_tlv_callback;
+       } else {
+               tplg_tlv = &tc->tlv;
+               switch (tplg_tlv->type) {
+               case SNDRV_CTL_TLVT_DB_SCALE:
+                       return soc_tplg_create_tlv_db_scale(tplg, kc,
+                                       &tplg_tlv->scale);
 
-       tlv->numid = tplg_tlv->numid;
-       tlv->length = size;
-       memcpy(&tlv->tlv[0], tplg_tlv->data, size);
-       kc->tlv.p = (void *)tlv;
+               /* TODO: add support for other TLV types */
+               default:
+                       dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
+                                       tplg_tlv->type);
+                       return -EINVAL;
+               }
+       }
 
        return 0;
 }
@@ -772,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
                }
 
                /* create any TLV data */
-               soc_tplg_create_tlv(tplg, &kc, &mc->tlv);
+               soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
 
                /* register control here */
                err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1350,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
        template.reg = w->reg;
        template.shift = w->shift;
        template.mask = w->mask;
+       template.subseq = w->subseq;
        template.on_val = w->invert ? 0 : 1;
        template.off_val = w->invert ? 1 : 0;
        template.ignore_suspend = w->ignore_suspend;
index 1fab9778807a0015f2578f0504306ed852eb4932..0450593980fd3525a65feca17dccea0e9940506e 100644 (file)
@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
        int err = -ENODEV;
 
        down_read(&chip->shutdown_rwsem);
-       if (chip->probing && chip->in_pm)
+       if (chip->probing || chip->in_pm)
                err = 0;
        else if (!chip->shutdown)
                err = usb_autopm_get_interface(chip->pm_intf);
index de165a1b92402ac7a6267bd0a0c5aa30a0053c92..20b56eb987f89f21fe20e53decf8ee1fbe275a2e 100644 (file)
@@ -521,6 +521,15 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                goto out_child;
        }
 
+       /*
+        * Normally perf_session__new would do this, but it doesn't have the
+        * evlist.
+        */
+       if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+               pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
+               rec->tool.ordered_events = false;
+       }
+
        if (!rec->evlist->nr_groups)
                perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
 
@@ -965,9 +974,11 @@ static struct record record = {
        .tool = {
                .sample         = process_sample_event,
                .fork           = perf_event__process_fork,
+               .exit           = perf_event__process_exit,
                .comm           = perf_event__process_comm,
                .mmap           = perf_event__process_mmap,
                .mmap2          = perf_event__process_mmap2,
+               .ordered_events = true,
        },
 };
 
index ecf319728f25d649768e33b3e1f274d04432f3fc..6135cc07213cb0d3379d58033ea87aa8dc580878 100644 (file)
@@ -601,8 +601,8 @@ static void display_sig(int sig __maybe_unused)
 
 static void display_setup_sig(void)
 {
-       signal(SIGSEGV, display_sig);
-       signal(SIGFPE,  display_sig);
+       signal(SIGSEGV, sighandler_dump_stack);
+       signal(SIGFPE, sighandler_dump_stack);
        signal(SIGINT,  display_sig);
        signal(SIGQUIT, display_sig);
        signal(SIGTERM, display_sig);
index 094ddaee104c73d7caae22d851d79629c4715cd3..d31fac19c30b2d298ab2cf3a710b9b27c5764144 100644 (file)
@@ -638,7 +638,7 @@ ifndef DESTDIR
 prefix ?= $(HOME)
 endif
 bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+bindir = $(abspath $(prefix)/$(bindir_relative))
 mandir = share/man
 infodir = share/info
 perfexecdir = libexec/perf-core
index 7ff682770fdb16e71b368af16efad0edd9443d87..f1a4c833121e306a364851328f6409cb22d779eb 100644 (file)
@@ -1387,6 +1387,24 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
                                                        event->fork.ptid);
        int err = 0;
 
+       if (dump_trace)
+               perf_event__fprintf_task(event, stdout);
+
+       /*
+        * There may be an existing thread that is not actually the parent,
+        * either because we are processing events out of order, or because the
+        * (fork) event that would have removed the thread was lost. Assume the
+        * latter case and continue on as best we can.
+        */
+       if (parent->pid_ != (pid_t)event->fork.ppid) {
+               dump_printf("removing erroneous parent thread %d/%d\n",
+                           parent->pid_, parent->tid);
+               machine__remove_thread(machine, parent);
+               thread__put(parent);
+               parent = machine__findnew_thread(machine, event->fork.ppid,
+                                                event->fork.ptid);
+       }
+
        /* if a thread currently exists for the thread id remove it */
        if (thread != NULL) {
                machine__remove_thread(machine, thread);
@@ -1395,8 +1413,6 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
 
        thread = machine__findnew_thread(machine, event->fork.pid,
                                         event->fork.tid);
-       if (dump_trace)
-               perf_event__fprintf_task(event, stdout);
 
        if (thread == NULL || parent == NULL ||
            thread__fork(thread, parent, sample->time) < 0) {
index 53e8bb7bc8521a09f1347d48de2a0dd1eeb5e0bc..2a5d8d7698aedb8c82bdf8488f1fb62ded5b438a 100644 (file)
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
        else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
                update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
-               update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+               update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
                update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
                                " #   %5.2f%% aborted cycles         ",
                                100.0 * ((total2-avg) / total));
        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / transaction   ", ratio);
        } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
-                  avg > 0 &&
                   runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
                total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
 
-               if (total)
+               if (avg)
                        ratio = total / avg;
 
                fprintf(out, " # %8.0f cycles / elision       ", ratio);
index 28c4b746baa19bef9830814c4fe7c69f1be0b06b..0a9ae8014729c085ffd2872e2bc13871f79c9908 100644 (file)
@@ -191,6 +191,12 @@ static int thread__clone_map_groups(struct thread *thread,
        if (thread->pid_ == parent->pid_)
                return 0;
 
+       if (thread->mg == parent->mg) {
+               pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
+                        thread->pid_, thread->tid, parent->pid_, parent->tid);
+               return 0;
+       }
+
        /* But this one is new process, copy maps. */
        for (i = 0; i < MAP__NR_TYPES; ++i)
                if (map_groups__clone(thread->mg, parent->mg, i) < 0)