]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Sat, 28 Jan 2017 15:33:06 +0000 (10:33 -0500)
committerDavid S. Miller <davem@davemloft.net>
Sat, 28 Jan 2017 15:33:06 +0000 (10:33 -0500)
Two trivial overlapping changes conflicts in MPLS and mlx5.

Signed-off-by: David S. Miller <davem@davemloft.net>
485 files changed:
Documentation/devicetree/bindings/net/mediatek-net.txt
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/filesystems/proc.txt
Documentation/power/states.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/cache.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/module.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/setup.h
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/intc-compact.c
arch/arc/kernel/mcip.c
arch/arc/kernel/module.c
arch/arc/mm/cache.c
arch/arc/mm/init.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-icev2.dts
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/da850-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra72-evm-revc.dts
arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-mdm9615.dtsi
arch/arm/boot/dts/sun6i-a31-hummingbird.dts
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/s3c2410_defconfig
arch/arm/include/asm/cputype.h
arch/arm/include/asm/ftrace.h
arch/arm/include/asm/virt.h
arch/arm/include/uapi/asm/types.h [moved from arch/arm/include/asm/types.h with 94% similarity]
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/smp_tlb.c
arch/arm/kvm/arm.c
arch/arm/mach-omap1/dma.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-ux500/pm.c
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/exynos/exynos5433.dtsi
arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
arch/arm64/boot/dts/xilinx/zynqmp.dtsi
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/virt.h
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/traps.c
arch/arm64/mm/init.c
arch/frv/include/asm/atomic.h
arch/mn10300/include/asm/switch_to.h
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/include/asm/pgtable-be-types.h
arch/powerpc/include/asm/pgtable-types.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init-common.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/power9-events-list.h
arch/powerpc/perf/power9-pmu.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/defconfig
arch/s390/include/asm/ctl_reg.h
arch/s390/kernel/ptrace.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/pgtable.c
arch/tile/kernel/ptrace.c
arch/x86/events/amd/ibs.c
arch/x86/events/intel/core.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kvm/x86.c
arch/x86/pci/acpi.c
block/blk-mq.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/sleep.c
drivers/acpi/video_detect.c
drivers/base/memory.c
drivers/block/nbd.c
drivers/block/xen-blkfront.c
drivers/char/virtio_console.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clocksource/exynos_mct.c
drivers/cpufreq/intel_pstate.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/cirrus/Kconfig
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_render_cl.c
drivers/gpu/drm/virtio/virtgpu_fb.c
drivers/hid/hid-corsair.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/isdn/hardware/eicon/message.c
drivers/media/cec/cec-adap.c
drivers/media/dvb-core/dvb_net.c
drivers/media/i2c/Kconfig
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/i2c/tvp5150.c
drivers/media/i2c/tvp5150_reg.h
drivers/media/pci/cobalt/cobalt-driver.c
drivers/media/pci/cobalt/cobalt-driver.h
drivers/media/usb/dvb-usb/pctv452e.c
drivers/memstick/core/memstick.c
drivers/mmc/host/dw_mmc.c
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/ti_hecc.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/gtp.c
drivers/net/macvtap.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/dp83848.c
drivers/net/phy/marvell.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_led_triggers.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvme/host/fc.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c
drivers/pci/host/pci-xgene-msi.c
drivers/pci/host/pcie-designware.c
drivers/pci/probe.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-broxton.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/surface3-wmi.c
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_nx2.c
drivers/scsi/qla2xxx/qla_nx2.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla2xxx/tcm_qla2xxx.h
drivers/scsi/sd.c
drivers/scsi/ses.c
drivers/soc/ti/wkup_m3_ipc.c
drivers/spi/Kconfig
drivers/spi/spi-armada-3700.c
drivers/spi/spi-axi-spi-engine.c
drivers/spi/spi-davinci.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-sh-msiof.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/thermal_core.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/atmel_usba_udc.h
drivers/usb/host/xhci-plat.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vhost/scsi.c
drivers/vhost/vsock.c
drivers/video/fbdev/core/fbcmap.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_ring.c
drivers/xen/platform-pci.c
drivers/xen/swiotlb-xen.c
fs/Kconfig
fs/block_dev.c
fs/btrfs/inode.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/dax.c
fs/ext2/Kconfig
fs/ext4/Kconfig
fs/fuse/dev.c
fs/fuse/dir.c
fs/overlayfs/namei.c
fs/proc/base.c
fs/romfs/super.c
fs/ubifs/Kconfig
fs/ubifs/dir.c
fs/ubifs/ioctl.c
fs/ubifs/journal.c
fs/ubifs/tnc.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_dir2.h
fs/xfs/libxfs/xfs_ialloc_btree.c
fs/xfs/libxfs/xfs_ialloc_btree.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
include/drm/drm_atomic.h
include/drm/drm_mode_config.h
include/kvm/arm_arch_timer.h
include/linux/bpf.h
include/linux/cpuhotplug.h
include/linux/gpio/driver.h
include/linux/kernel.h
include/linux/memory_hotplug.h
include/linux/micrel_phy.h
include/linux/mmzone.h
include/linux/nmi.h
include/linux/phy.h
include/linux/phy_led_triggers.h
include/linux/rcupdate.h
include/linux/suspend.h
include/linux/virtio_net.h
include/net/ipv6.h
include/net/lwtunnel.h
include/net/netfilter/nf_tables.h
include/net/netfilter/nft_fib.h
include/rdma/ib_verbs.h
include/scsi/libfc.h
include/uapi/linux/cec-funcs.h
include/uapi/linux/netfilter/nf_log.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/rdma/Kbuild
include/uapi/rdma/cxgb3-abi.h
kernel/bpf/arraymap.c
kernel/bpf/hashtab.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/cpu.c
kernel/module.c
kernel/panic.c
kernel/power/suspend.c
kernel/rcu/rcu.h
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/sysctl.c
kernel/ucount.c
kernel/watchdog.c
kernel/watchdog_hld.c
lib/ioremap.c
lib/radix-tree.c
mm/huge_memory.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/page_alloc.c
mm/slub.c
net/batman-adv/fragmentation.c
net/bridge/br_netlink.c
net/ceph/crypto.c
net/core/dev.c
net/core/ethtool.c
net/core/lwt_bpf.c
net/core/lwtunnel.c
net/dccp/ipv6.c
net/dsa/slave.c
net/ipv4/fib_frontend.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv6/addrconf.c
net/ipv6/ila/ila_lwt.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/route.c
net/ipv6/seg6.c
net/ipv6/seg6_iptunnel.c
net/ipv6/tcp_ipv6.c
net/mac80211/rate.c
net/mpls/af_mpls.c
net/mpls/mpls_iptunnel.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_log.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_dynset.c
net/netfilter/nft_log.c
net/netfilter/nft_lookup.c
net/netfilter/nft_objref.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/packet/af_packet.c
net/sctp/ipv6.c
net/sctp/offload.c
net/sctp/socket.c
net/tipc/node.c
net/tipc/server.c
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/af_unix.c
samples/bpf/tc_l2_redirect_kern.c
samples/bpf/xdp_tx_iptunnel_kern.c
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/testing/selftests/bpf/test_lru_map.c
tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
tools/virtio/ringtest/main.h
tools/virtio/ringtest/run-on-all.sh
virt/kvm/arm/arch_timer.c
virt/kvm/arm/hyp/timer-sr.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c

index c010fafc66a8e4d6f0b1543104ac352f82ec992f..c7194e87d5f4b7d9b61927a430479e39f52ddca8 100644 (file)
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
 * Ethernet controller node
 
 Required properties:
-- compatible: Should be "mediatek,mt7623-eth"
+- compatible: Should be "mediatek,mt2701-eth"
 - reg: Address and length of the register set for the device
 - interrupts: Should contain the three frame engines interrupts in numeric
        order. These are fe_int0, fe_int1 and fe_int2.
index ff1bc4b1bb3b5e1d91a1747fcc73c24d587cd1a8..fb5056b22685c249c9bf812a1ada038070d8c03d 100644 (file)
@@ -19,8 +19,9 @@ Optional Properties:
   specifications. If neither of these are specified, the default is to
   assume clause 22.
 
-  If the phy's identifier is known then the list may contain an entry
-  of the form: "ethernet-phy-idAAAA.BBBB" where
+  If the PHY reports an incorrect ID (or none at all) then the
+  "compatible" list may contain an entry with the correct PHY ID in the
+  form: "ethernet-phy-idAAAA.BBBB" where
      AAAA - The value of the 16 bit Phy Identifier 1 register as
             4 hex digits. This is the chip vendor OUI bits 3:18
      BBBB - The value of the 16 bit Phy Identifier 2 register as
index da6614c6379604bb209156cea87a4e3b27a7d1b8..dc975064fa273c3600eee7d822c94eb5fea73502 100644 (file)
@@ -1,17 +1,23 @@
 Renesas MSIOF spi controller
 
 Required properties:
-- compatible           : "renesas,msiof-<soctype>" for SoCs,
-                        "renesas,sh-msiof" for SuperH, or
-                        "renesas,sh-mobile-msiof" for SH Mobile series.
-                        Examples with soctypes are:
-                        "renesas,msiof-r8a7790" (R-Car H2)
+- compatible           : "renesas,msiof-r8a7790" (R-Car H2)
                         "renesas,msiof-r8a7791" (R-Car M2-W)
                         "renesas,msiof-r8a7792" (R-Car V2H)
                         "renesas,msiof-r8a7793" (R-Car M2-N)
                         "renesas,msiof-r8a7794" (R-Car E2)
                         "renesas,msiof-r8a7796" (R-Car M3-W)
                         "renesas,msiof-sh73a0" (SH-Mobile AG5)
+                        "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
+                        "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
+                        "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
+                        "renesas,sh-msiof"      (deprecated)
+
+                        When compatible with the generic version, nodes
+                        must list the SoC-specific version corresponding
+                        to the platform first followed by the generic
+                        version.
+
 - reg                  : A list of offsets and lengths of the register sets for
                         the device.
                         If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
 Example:
 
        msiof0: spi@e6e20000 {
-               compatible = "renesas,msiof-r8a7791";
+               compatible = "renesas,msiof-r8a7791",
+                            "renesas,rcar-gen2-msiof";
                reg = <0 0xe6e20000 0 0x0064>;
                interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
index 72624a16b79284c0f2484741144e6b90ac084064..c94b4675d021ffd374de22d7d83df61dbb6c34dd 100644 (file)
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
 snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
 It's slow but very precise.
 
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
 ..............................................................................
  Field                       Content
  Name                        filename of the executable
+ Umask                       file mode creation mask
  State                       state (R is running, S is sleeping, D is sleeping
                              in an uninterruptible wait, Z is zombie,
                             T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
  TracerPid                   PID of process tracing this process (0 if not)
  Uid                         Real, effective, saved set, and  file system UIDs
  Gid                         Real, effective, saved set, and  file system GIDs
- Umask                       file mode creation mask
  FDSize                      number of file descriptor slots currently allocated
  Groups                      supplementary group list
  NStgid                      descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
  VmPeak                      peak virtual memory size
  VmSize                      total program size
  VmLck                       locked memory size
+ VmPin                       pinned memory size
  VmHWM                       peak resident set size ("high water mark")
  VmRSS                       size of memory portions. It contains the three
                              following parts (VmRSS = RssAnon + RssFile + RssShmem)
index 8a39ce45d8a01c298ec3b62b878e2623cba5a958..008ecb588317bc1d354bb5f50513605d6154237c 100644 (file)
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
 The default suspend mode (ie. the one to be used without writing anything into
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line.  On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
 
 The properties of all of the sleep states are described below.
 
index d76fccd092662679ff6b693c0589316fe4a80237..cc106f71a9b830c1be00445bfd1ac791058635e7 100644 (file)
@@ -976,6 +976,7 @@ M:  Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.armlinux.org.uk/
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git
 F:     arch/arm/
 
 ARM SUB-ARCHITECTURES
@@ -1153,6 +1154,7 @@ ARM/CLKDEV SUPPORT
 M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
 F:     arch/arm/include/asm/clkdev.h
 F:     drivers/clk/clkdev.c
 
@@ -1688,6 +1690,7 @@ M:        Krzysztof Kozlowski <krzk@kernel.org>
 R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
 S:     Maintained
 F:     arch/arm/boot/dts/s3c*
 F:     arch/arm/boot/dts/s5p*
@@ -3570,7 +3573,7 @@ F:        drivers/infiniband/hw/cxgb3/
 F:     include/uapi/rdma/cxgb3-abi.h
 
 CXGB4 ETHERNET DRIVER (CXGB4)
-M:     Hariprasad S <hariprasad@chelsio.com>
+M:     Ganesh Goudar <ganeshgr@chelsio.com>
 L:     netdev@vger.kernel.org
 W:     http://www.chelsio.com
 S:     Supported
@@ -4103,12 +4106,18 @@ F:      drivers/gpu/drm/bridge/
 
 DRM DRIVER FOR BOCHS VIRTUAL GPU
 M:     Gerd Hoffmann <kraxel@redhat.com>
-S:     Odd Fixes
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/bochs/
 
 DRM DRIVER FOR QEMU'S CIRRUS DEVICE
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Obsolete
+W:     https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
 F:     drivers/gpu/drm/cirrus/
 
 RADEON and AMDGPU DRM DRIVERS
@@ -4150,7 +4159,7 @@ F:        Documentation/gpu/i915.rst
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
-L:      igvt-g-dev@lists.01.org
+L:      intel-gvt-dev@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 W:      https://01.org/igvt-g
 T:      git https://github.com/01org/gvt-linux.git
@@ -4301,7 +4310,10 @@ F:       Documentation/devicetree/bindings/display/renesas,du.txt
 
 DRM DRIVER FOR QXL VIRTUAL GPU
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/qxl/
 F:     include/uapi/drm/qxl_drm.h
 
@@ -7703,8 +7715,10 @@ F:       drivers/net/dsa/mv88e6xxx/
 F:     Documentation/devicetree/bindings/net/dsa/marvell.txt
 
 MARVELL ARMADA DRM SUPPORT
-M:     Russell King <rmk+kernel@armlinux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
 F:     drivers/gpu/drm/armada/
 F:     include/uapi/drm/armada_drm.h
 F:     Documentation/devicetree/bindings/display/armada/
@@ -8909,8 +8923,10 @@ S:       Supported
 F:     drivers/nfc/nxp-nci
 
 NXP TDA998X DRM DRIVER
-M:     Russell King <rmk+kernel@armlinux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Supported
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
 
@@ -13105,6 +13121,7 @@ M:      David Airlie <airlied@linux.ie>
 M:     Gerd Hoffmann <kraxel@redhat.com>
 L:     dri-devel@lists.freedesktop.org
 L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
 S:     Maintained
 F:     drivers/gpu/drm/virtio/
 F:     include/uapi/linux/virtio_gpu.h
@@ -13456,6 +13473,7 @@ F:      arch/x86/
 
 X86 PLATFORM DRIVERS
 M:     Darren Hart <dvhart@infradead.org>
+M:     Andy Shevchenko <andy@infradead.org>
 L:     platform-driver-x86@vger.kernel.org
 T:     git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
 S:     Maintained
@@ -13627,6 +13645,7 @@ F:      drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -13682,6 +13701,7 @@ F:      Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 96e2352d10a8cc830812b819cfebb888603c4038..098840012b9bb4604d82c5269e53274170ae656f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
-NAME = Roaring Lionus
+EXTRAVERSION = -rc5
+NAME = Anniversary Edition
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index c75d29077e4a654276219883629444deec89c955..283099c9560aa296377d588e0402d5e0b49884b4 100644 (file)
@@ -29,7 +29,7 @@ config ARC
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_MEMBLOCK
-       select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+       select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HANDLE_DOMAIN_IRQ
index b3410ff6a62dbcc589ffa411f326d6954c8ba80c..5008021fba9894c0ba44251ddfd41a716d6c6cca 100644 (file)
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_IC_PTAG_HI     0x1F
 
 /* Bit val in IC_CTRL */
-#define IC_CTRL_CACHE_DISABLE   0x1
+#define IC_CTRL_DIS            0x1
 
 /* Data cache related Auxiliary registers */
 #define ARC_REG_DC_BCR         0x72    /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_DC_PTAG_HI     0x5F
 
 /* Bit val in DC_CTRL */
-#define DC_CTRL_INV_MODE_FLUSH  0x40
-#define DC_CTRL_FLUSH_STATUS    0x100
+#define DC_CTRL_DIS            0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS   0x100
 
 /*System-level cache (L2 cache) related Auxiliary registers */
 #define ARC_REG_SLC_CFG                0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_SLC_RGN_END    0x916
 
 /* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS           0x001
 #define SLC_CTRL_IM            0x040
-#define SLC_CTRL_DISABLE       0x001
 #define SLC_CTRL_BUSY          0x100
 #define SLC_CTRL_RGN_OP_INV    0x200
 
index b5ff87e6f4b71352fc8990624114c025b6c5ccb0..aee1a77934cf694e37ae579347a37bc167e43762 100644 (file)
@@ -16,6 +16,7 @@
        ;
        ; Now manually save: r12, sp, fp, gp, r25
 
+       PUSH    r30
        PUSH    r12
 
        ; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
        POPAX   AUX_USER_SP
 1:
        POP     r12
+       POP     r30
 
 .endm
 
index 6e91d8b339c3616b59d7b389353c477acba8a418..567590ea8f6c9166d7cef4643529ae621910ddce 100644 (file)
 
 #include <asm-generic/module.h>
 
-#ifdef CONFIG_ARC_DW2_UNWIND
 struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
        void *unw_info;
        int unw_sec_idx;
+#endif
        const char *secstr;
 };
-#endif
 
 #define MODULE_PROC_FAMILY "ARC700"
 
index 69095da1fcfd1e35f16234aaf473896194064d38..47111d565a959d117ab9e2c7c9eea3b852137971 100644 (file)
@@ -84,7 +84,7 @@ struct pt_regs {
        unsigned long fp;
        unsigned long sp;       /* user/kernel sp depending on where we came from  */
 
-       unsigned long r12;
+       unsigned long r12, r30;
 
        /*------- Below list auto saved by h/w -----------*/
        unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
index cb954cdab07087bc6b49e72c8d117c77b905595c..c568a9df82b1a397d73d3fa3c0c523804a2856f2 100644 (file)
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
 
 void setup_processor(void);
 void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
 
 /* Helpers used in arc_*_mumbojumbo routines */
 #define IS_AVAIL1(v, s)                ((v) ? s : "")
index 994dca7014db645b32cfb22753cb25bae4c46566..ecef0fb0b66c37d814b2adc2cc207ef178903bef 100644 (file)
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
 
 static void arcv2_irq_mask(struct irq_data *data)
 {
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_ENABLE, 0);
 }
 
 static void arcv2_irq_unmask(struct irq_data *data)
 {
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_ENABLE, 1);
 }
 
 void arcv2_irq_enable(struct irq_data *data)
 {
        /* set default priority */
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
 
        /*
index ce9deb953ca90e1a0309ccd69f9608e8fd32cf70..8c1fd5c007822409ff9bc07e7dbec4eaa9fdad09 100644 (file)
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
        unsigned int ienb;
 
        ienb = read_aux_reg(AUX_IENABLE);
-       ienb &= ~(1 << data->irq);
+       ienb &= ~(1 << data->hwirq);
        write_aux_reg(AUX_IENABLE, ienb);
 }
 
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
        unsigned int ienb;
 
        ienb = read_aux_reg(AUX_IENABLE);
-       ienb |= (1 << data->irq);
+       ienb |= (1 << data->hwirq);
        write_aux_reg(AUX_IENABLE, ienb);
 }
 
index 560c4afc2af4882f7e64191350be90efef808754..9274f8ade8c7ce58cdb6b943c9efed3bd995a3bb 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/spinlock.h>
 #include <soc/arc/mcip.h>
 #include <asm/irqflags-arcv2.h>
@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
 static void idu_cascade_isr(struct irq_desc *desc)
 {
        struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+       struct irq_chip *core_chip = irq_desc_get_chip(desc);
        irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
        irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
 
+       chained_irq_enter(core_chip, desc);
        generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+       chained_irq_exit(core_chip, desc);
 }
 
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
index 42e964db29677877438f8b9bc8e6225cc5f64174..3d99a60913325d1ac5229b7b6e5933caddb0cbbc 100644 (file)
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
 #ifdef CONFIG_ARC_DW2_UNWIND
        mod->arch.unw_sec_idx = 0;
        mod->arch.unw_info = NULL;
-       mod->arch.secstr = secstr;
 #endif
+       mod->arch.secstr = secstr;
        return 0;
 }
 
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
 
        }
 
+#ifdef CONFIG_ARC_DW2_UNWIND
        if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
                module->arch.unw_sec_idx = tgtsec;
+#endif
 
        return 0;
 
index ec86ac0e33213b889cd6100e10e95fda8f3c31e4..d408fa21a07c9937a0e2956a6e12a7895ffef684 100644 (file)
@@ -23,7 +23,7 @@
 
 static int l2_line_sz;
 static int ioc_exists;
-int slc_enable = 1, ioc_enable = 0;
+int slc_enable = 1, ioc_enable = 1;
 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
 
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
 
 /*
  * For ARC700 MMUv3 I-cache and D-cache flushes
- * Also reused for HS38 aliasing I-cache configuration
+ *  - ARC700 programming model requires paddr and vaddr be passed in seperate
+ *    AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
+ *    caches actually alias or not.
+ * -  For HS38, only the aliasing I-cache configuration uses the PTAG reg
+ *    (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
  */
 static inline
 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
        __after_dc_op(op);
 }
 
+static inline void __dc_disable(void)
+{
+       const int r = ARC_REG_DC_CTRL;
+
+       __dc_entire_op(OP_FLUSH_N_INV);
+       write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+       const int r = ARC_REG_DC_CTRL;
+
+       write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
 /* For kernel mappings cache operation: index is same as paddr */
 #define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
 
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
 #else
 
 #define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
 #define __dc_line_op(paddr, vaddr, sz, op)
 #define __dc_line_op_k(paddr, sz, op)
 
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
 #endif
 }
 
+noinline static void slc_entire_op(const int op)
+{
+       unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+       ctrl = read_aux_reg(r);
+
+       if (!(op & OP_FLUSH))           /* i.e. OP_INV */
+               ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
+       else
+               ctrl |= SLC_CTRL_IM;
+
+       write_aux_reg(r, ctrl);
+
+       write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+
+       /* Important to wait for flush to complete */
+       while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+       const int r = ARC_REG_SLC_CTRL;
+
+       slc_entire_op(OP_FLUSH_N_INV);
+       write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+       const int r = ARC_REG_SLC_CTRL;
+
+       write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
 /***********************************************************
  * Exported APIs
  */
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
        return 0;
 }
 
-void arc_cache_init(void)
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ *    Non-Masters need not be accessing caches at that time
+ *    - They are either HALT_ON_RESET and kick started much later or
+ *    - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ *      doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ *    otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ *    Coherency transactions
+ */
+noinline void __init arc_ioc_setup(void)
 {
-       unsigned int __maybe_unused cpu = smp_processor_id();
-       char str[256];
+       unsigned int ap_sz;
 
-       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+       /* Flush + invalidate + disable L1 dcache */
+       __dc_disable();
+
+       /* Flush + invalidate SLC */
+       if (read_aux_reg(ARC_REG_SLC_BCR))
+               slc_entire_op(OP_FLUSH_N_INV);
+
+       /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
+       write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
 
        /*
-        * Only master CPU needs to execute rest of function:
-        *  - Assume SMP so all cores will have same cache config so
-        *    any geomtry checks will be same for all
-        *  - IOC setup / dma callbacks only need to be setup once
+        * IOC Aperture size:
+        *   decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
+        * TBD: fix for PGU + 1GB of low mem
+        * TBD: fix for PAE
         */
-       if (cpu)
-               return;
+       ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
+       write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
+
+       write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
+       write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+
+       /* Re-enable L1 dcache */
+       __dc_enable();
+}
+
+void __init arc_cache_init_master(void)
+{
+       unsigned int __maybe_unused cpu = smp_processor_id();
 
        if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
                struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
                }
        }
 
-       if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
-
-               /* IM set : flush before invalidate */
-               write_aux_reg(ARC_REG_SLC_CTRL,
-                       read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
+       /* Note that SLC disable not formally supported till HS 3.0 */
+       if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+               arc_slc_disable();
 
-               write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
-
-               /* Important to wait for flush to complete */
-               while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
-               write_aux_reg(ARC_REG_SLC_CTRL,
-                       read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
-       }
+       if (is_isa_arcv2() && ioc_enable)
+               arc_ioc_setup();
 
        if (is_isa_arcv2() && ioc_enable) {
-               /* IO coherency base - 0x8z */
-               write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
-               /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
-               write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
-               /* Enable partial writes */
-               write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
-               /* Enable IO coherency */
-               write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
-
                __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
                __dma_cache_inv = __dma_cache_inv_ioc;
                __dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
                __dma_cache_wback = __dma_cache_wback_l1;
        }
 }
+
+void __ref arc_cache_init(void)
+{
+       unsigned int __maybe_unused cpu = smp_processor_id();
+       char str[256];
+
+       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+       /*
+        * Only master CPU needs to execute rest of function:
+        *  - Assume SMP so all cores will have same cache config so
+        *    any geomtry checks will be same for all
+        *  - IOC setup / dma callbacks only need to be setup once
+        */
+       if (!cpu)
+               arc_cache_init_master();
+}
index 399e2f223d25303f1294c35b52862e584fbcb1eb..8c9415ed62804d0a8b4a35df176f1b43e6c1be44 100644 (file)
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 #endif
 
+long __init arc_get_mem_sz(void)
+{
+       return low_mem_sz;
+}
+
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
index 7327250f0bb66e716dacd07d35019c858f38cf68..f10fe8526239552a676df4a4bfb1ae6a21b1aa41 100644 (file)
@@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
        sun8i-a83t-allwinner-h8homlet-v2.dtb \
        sun8i-a83t-cubietruck-plus.dtb \
        sun8i-h3-bananapi-m2-plus.dtb \
+       sun8i-h3-nanopi-m1.dtb  \
        sun8i-h3-nanopi-neo.dtb \
        sun8i-h3-orangepi-2.dtb \
        sun8i-h3-orangepi-lite.dtb \
index 1463df3b5b195544f7fafc97f3a7dc62efa01f4f..8ed46f9d79b75f1f64e466eba59beb254dbd1f48 100644 (file)
                        AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
                        AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
                        AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
-                       AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
                >;
        };
 
index b6142bda661e1a701c66c6590970553ce5255fcb..15f07f9af3b3df082f8fd0676ebc843c5b097ab5 100644 (file)
 
        axi {
                compatible = "simple-bus";
-               ranges = <0x00000000 0x18000000 0x0011c40a>;
+               ranges = <0x00000000 0x18000000 0x0011c40c>;
                #address-cells = <1>;
                #size-cells = <1>;
 
index 41de15fe15a2f0dfa241748b2b49ef90be48b323..78492a0bbbab94ffec665bc36a205e48c21e0070 100644 (file)
@@ -99,6 +99,7 @@
                                #size-cells = <1>;
                                compatible = "m25p64";
                                spi-max-frequency = <30000000>;
+                               m25p,fast-read;
                                reg = <0>;
                                partition@0 {
                                        label = "U-Boot-SPL";
index 3a8579cb8dd91e522ffe366dbcf0134c6018ae10..3e1f75026eac467992644e51f43fcfd824b58499 100644 (file)
                        phy-names = "sata-phy";
                        clocks = <&sata_ref_clk>;
                        ti,hwmods = "sata";
+                       ports-implemented = <0x1>;
                };
 
                rtc: rtc@48838000 {
index c3d939c9666cabb03752806cab94c002073a4d65..3f808a47df03dfad2b892fd6f833d57f2b18b439 100644 (file)
@@ -75,6 +75,6 @@
                ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
                ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
                ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
-               ti,min-output-imepdance;
+               ti,min-output-impedance;
        };
 };
index 34887a10c5f1712e62c0b953405ef330676a6940..47ba97229a48f23b247a0bd8bc2e874fa5c14c73 100644 (file)
                compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
                             "fsl,imx-audio-sgtl5000";
                model = "imx6q-nitrogen6_max-sgtl5000";
-               pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_sgtl5000>;
                ssi-controller = <&ssi1>;
                audio-codec = <&codec>;
                audio-routing =
 
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sgtl5000>;
                reg = <0x0a>;
                clocks = <&clks IMX6QDL_CLK_CKO>;
                VDDA-supply = <&reg_2p5v>;
index d80f21abea62b5bfbd15c4999b01f282b269e973..31d4cc62dbc71bc3b5a8e6aa4a327d9b356fbfed 100644 (file)
                compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
                             "fsl,imx-audio-sgtl5000";
                model = "imx6q-nitrogen6_som2-sgtl5000";
-               pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_sgtl5000>;
                ssi-controller = <&ssi1>;
                audio-codec = <&codec>;
                audio-routing =
 
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sgtl5000>;
                reg = <0x0a>;
                clocks = <&clks IMX6QDL_CLK_CKO>;
                VDDA-supply = <&reg_2p5v>;
index da8598402ab8b33137a74fac30a69432155fac17..38faa90007d7f0c7e3042a90aef486321cc4bfa0 100644 (file)
 &mmc1 {
        interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
        pinctrl-names = "default";
-       pinctrl-0 = <&mmc1_pins &mmc1_cd>;
+       pinctrl-0 = <&mmc1_pins>;
        wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>;                /* gpio_126 */
        cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>;              /* gpio_110 */
        vmmc-supply = <&vmmc1>;
                        OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat1.sdmmc1_dat1 */
                        OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat2.sdmmc1_dat2 */
                        OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat3.sdmmc1_dat3 */
-                       OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/
+                       OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
+                       OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
                >;
        };
 
                        OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4)       /* sys_boot6.gpio_8 */
                >;
        };
-
-       mmc1_cd: pinmux_mmc1_cd {
-               pinctrl-single,pins = <
-                       OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4)  /* cam_d11.gpio_110 */
-               >;
-       };
 };
 
 
index 7cd92babc41a688cf8bb474972d053ea7b8b33d6..0844737b72b27c53b0c8ed88940abf5d2937d637 100644 (file)
                        phy-names = "sata-phy";
                        clocks = <&sata_ref_clk>;
                        ti,hwmods = "sata";
+                       ports-implemented = <0x1>;
                };
 
                dss: dss@58000000 {
index 5ae4ec59e6ea4533ec35485a0a0bb361acdfffaf..c852b69229c977281a6f7e7192d88c3275e80817 100644 (file)
                };
 
                amba {
-                       compatible = "arm,amba-bus";
+                       compatible = "simple-bus";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index 735914f6ae446a4766f484ab888826efc99eefb1..7cae328398b12f962421cdaa774b5892246032a6 100644 (file)
        cpu-supply = <&reg_dcdc3>;
 };
 
+&de {
+       status = "okay";
+};
+
 &ehci0 {
        status = "okay";
 };
index 2b26175d55d1b7e3cc27611eb5883f4bc913d575..e78faaf9243c862d6eba52d43851f9451cfd0476 100644 (file)
        de: display-engine {
                compatible = "allwinner,sun6i-a31-display-engine";
                allwinner,pipelines = <&fe0>;
+               status = "disabled";
        };
 
        soc@01c00000 {
index 5ea4915f6d75b93eb06c974ab1b6abd009f7f769..10d307408f237f21d15fdbe2bf84de5b4db40f0a 100644 (file)
@@ -56,7 +56,7 @@
 };
 
 &pio {
-       mmc2_pins_nrst: mmc2@0 {
+       mmc2_pins_nrst: mmc2-rst-pin {
                allwinner,pins = "PC16";
                allwinner,function = "gpio_out";
                allwinner,drive = <SUN4I_PINCTRL_10_MA>;
index 64f4419f14e85dbb5efed880d25fc2b34009169e..b416abcbacd8080b4c666436340e3bb120c3e04a 100644 (file)
@@ -472,7 +472,7 @@ CONFIG_MESON_WATCHDOG=y
 CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_BCM2835_WDT=y
-CONFIG_BCM47XX_WATCHDOG=y
+CONFIG_BCM47XX_WDT=y
 CONFIG_BCM7038_WDT=m
 CONFIG_BCM_KONA_WDT=y
 CONFIG_MFD_ACT8945A=y
@@ -894,7 +894,7 @@ CONFIG_BCM2835_MBOX=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
 CONFIG_EFI_VARS=m
 CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_NVRAM=y
 CONFIG_BCM47XX_SPROM=y
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
index 4364040ed69689dcc1d4c9a25aab9c8308e1b77c..1e6c48dd7b11830acc9a03a8bd700a47f8dbcc01 100644 (file)
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
index 522b5feb4eaa34dcbd0e66b7f417a6e857076db8..b62eaeb147aa9a0b8caa73bf96a73a1d02e5f708 100644 (file)
@@ -94,6 +94,9 @@
 #define ARM_CPU_XSCALE_ARCH_V2         0x4000
 #define ARM_CPU_XSCALE_ARCH_V3         0x6000
 
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION          0x510002d0
+
 extern unsigned int processor_id;
 
 #ifdef CONFIG_CPU_CP15
index bfe2a2f5a644e80a9f80f71b49d727604c41e728..22b73112b75f2070e440068184f9655cff781afe 100644 (file)
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
 
 #define ftrace_return_address(n) return_address(n)
 
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+                                              const char *name)
+{
+       if (!strcmp(sym, "sys_mmap2"))
+               sym = "sys_mmap_pgoff";
+       else if (!strcmp(sym, "sys_statfs64_wrapper"))
+               sym = "sys_statfs64";
+       else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+               sym = "sys_fstatfs64";
+       else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+               sym = "sys_fadvise64_64";
+
+       /* Ignore case since sym may start with "SyS" instead of "sys" */
+       return !strcasecmp(sym, name);
+}
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* _ASM_ARM_FTRACE */
index a2e75b84e2ae6b4d3f37d7879bc024832c17d3c9..6dae1956c74d761beb8f1cb5f12dc1a8e912c4b5 100644 (file)
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
        return false;
 }
 
+static inline bool has_vhe(void)
+{
+       return false;
+}
+
 /* The section containing the hypervisor idmap text */
 extern char __hyp_idmap_text_start[];
 extern char __hyp_idmap_text_end[];
similarity index 94%
rename from arch/arm/include/asm/types.h
rename to arch/arm/include/uapi/asm/types.h
index a53cdb8f068c2f3a16ea4645d4a846fa2acb9ea0..9435a42f575e02c76199f861b9e0c96ffb77e123 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_TYPES_H
-#define _ASM_TYPES_H
+#ifndef _UAPI_ASM_TYPES_H
+#define _UAPI_ASM_TYPES_H
 
 #include <asm-generic/int-ll64.h>
 
@@ -37,4 +37,4 @@
 #define __UINTPTR_TYPE__       unsigned long
 #endif
 
-#endif /* _ASM_TYPES_H */
+#endif /* _UAPI_ASM_TYPES_H */
index 188180b5523de09f55647d2d31547684c450a8c7..be3b3fbd382fbbd4a4ef4baa34b5d3f906ab3562 100644 (file)
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
                return 0;
        }
 
+       /*
+        * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+        * whenever a WFI is issued, even if the core is not powered down, in
+        * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
+        * breakpoint and watchpoint registers are treated as undefined, so
+        * this results in boot time and runtime failures when these are
+        * accessed and we unexpectedly take a trap.
+        *
+        * It's not clear if/how this can be worked around, so we blacklist
+        * Scorpion CPUs to avoid these issues.
+       */
+       if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
+               pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+               return 0;
+       }
+
        has_ossr = core_has_os_save_restore();
 
        /* Determine how many BRPs/WRPs are available. */
index 22313cb5336257cffa870b15e21279a1b4684e99..9af0701f7094be972b0f8c60a0a6c8417ed844eb 100644 (file)
@@ -9,6 +9,7 @@
  */
 #include <linux/preempt.h>
 #include <linux/smp.h>
+#include <linux/uaccess.h>
 
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
 static inline void ipi_flush_tlb_page(void *arg)
 {
        struct tlb_args *ta = (struct tlb_args *)arg;
+       unsigned int __ua_flags = uaccess_save_and_enable();
 
        local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+
+       uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
 static inline void ipi_flush_tlb_range(void *arg)
 {
        struct tlb_args *ta = (struct tlb_args *)arg;
+       unsigned int __ua_flags = uaccess_save_and_enable();
 
        local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+
+       uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_range(void *arg)
index 11676787ad49042d5049ccd376d153d47ef7bd06..9d7446456e0c4217e0931f3640eb006cb0fb83d7 100644 (file)
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
        __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
        __cpu_init_stage2();
 
+       if (is_kernel_in_hyp_mode())
+               kvm_timer_init_vhe();
+
        kvm_arm_init_debug();
 }
 
index f6ba589cd312ecb257829d81f7f04c967b60e216..c821c1d5610ef25c56e1c0873452a019a602380d 100644 (file)
@@ -32,7 +32,6 @@
 #include "soc.h"
 
 #define OMAP1_DMA_BASE                 (0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT     17
 
 static u32 enable_1510_mode;
 
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
                goto exit_iounmap;
        }
 
-       d->lch_count            = OMAP1_LOGICAL_DMA_CH_COUNT;
-
        /* Valid attributes for omap1 plus processors */
        if (cpu_is_omap15xx())
                d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
        d->dev_caps             |= CLEAR_CSR_ON_READ;
        d->dev_caps             |= IS_WORD_16;
 
-       if (cpu_is_omap15xx())
-               d->chan_count = 9;
-       else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
-               if (!(d->dev_caps & ENABLE_1510_MODE))
-                       d->chan_count = 16;
+       /* available logical channels */
+       if (cpu_is_omap15xx()) {
+               d->lch_count = 9;
+       } else {
+               if (d->dev_caps & ENABLE_1510_MODE)
+                       d->lch_count = 9;
                else
-                       d->chan_count = 9;
+                       d->lch_count = 16;
        }
 
        p = dma_plat_info;
index 477910a48448d653b2de22339d60c61b967aa8e3..70c004794880e0efd567e269eebc0f7484ca5140 100644 (file)
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
        .nshutdown_gpio = 162,
        .dev_name = "/dev/ttyO1",
        .flow_cntrl = 1,
-       .baud_rate = 300000,
+       .baud_rate = 3000000,
 };
 
 static struct platform_device wl128x_device = {
index 8538910db202ab6a13e0e3588f27b2a3157c4bc2..a970e7fcba9e02fe6e2651cd5cfca76321d26314 100644 (file)
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
  */
 bool prcmu_is_cpu_in_wfi(int cpu)
 {
-       return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
-                    PRCM_ARM_WFI_STANDBY_WFI0;
+       return readl(PRCM_ARM_WFI_STANDBY) &
+               (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
 }
 
 /*
index 238fbeacd330ab7a8bf7a2a6fdf7f1a6e0365499..5d28e1cdc9986a18de73efc9c3a988c4556f1d88 100644 (file)
        };
 };
 
+&scpi_clocks {
+       status = "disabled";
+};
+
 &uart_AO {
        status = "okay";
        pinctrl-0 = <&uart_ao_a_pins>;
index 596240c38a9cdd7720077a8f97a5c0111366e550..b35307321b63981cc7c8dee4042d4c0ba99b7ee9 100644 (file)
@@ -55,7 +55,7 @@
                mboxes = <&mailbox 1 &mailbox 2>;
                shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
 
-               clocks {
+               scpi_clocks: clocks {
                        compatible = "arm,scpi-clocks";
 
                        scpi_dvfs: scpi_clocks@0 {
index 64226d5ae4715172a12c8509022fb6f13814d318..135890cd8a859708c9adf6d3e184bd7ec093a3e0 100644 (file)
                };
 
                amba {
-                       compatible = "arm,amba-bus";
+                       compatible = "simple-bus";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index 358089687a69b5946851ef7c5ac5c0a776d9d161..ef1b9e573af0f5bd2ed6792ba5c9450a64ecaa45 100644 (file)
@@ -27,7 +27,7 @@
                stdout-path = "serial0:115200n8";
        };
 
-       memory {
+       memory@0 {
                device_type = "memory";
                reg = <0x0 0x0 0x0 0x40000000>;
        };
index 68a908334c7b12846e74c16d5c7168d9276747dd..54dc28351c8cb85a0abbd4bbad554d56dbcbac79 100644 (file)
@@ -72,7 +72,7 @@
                             <1 10 0xf08>;
        };
 
-       amba_apu {
+       amba_apu: amba_apu@0 {
                compatible = "simple-bus";
                #address-cells = <2>;
                #size-cells = <1>;
                };
 
                i2c0: i2c@ff020000 {
-                       compatible = "cdns,i2c-r1p10";
+                       compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
                        status = "disabled";
                        interrupt-parent = <&gic>;
                        interrupts = <0 17 4>;
                };
 
                i2c1: i2c@ff030000 {
-                       compatible = "cdns,i2c-r1p10";
+                       compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
                        status = "disabled";
                        interrupt-parent = <&gic>;
                        interrupts = <0 18 4>;
index bfe632808d7724c0a51562efb60501e67f6bf157..90c39a6623797dd2c5309b8d70e7975cb938524c 100644 (file)
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define _virt_addr_valid(kaddr)        pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 #else
 #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(page)   (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(kaddr)  (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
 
 #define page_to_virt(page)     ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
 #define virt_to_page(vaddr)    ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
index fea10736b11fbd1204602c3b695da34ea255bf78..439f6b5d31f67576e57e67cdf1e8284bf57c08a7 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/ptrace.h>
 #include <asm/sections.h>
 #include <asm/sysreg.h>
+#include <asm/cpufeature.h>
 
 /*
  * __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
        return read_sysreg(CurrentEL) == CurrentEL_EL2;
 }
 
+static inline bool has_vhe(void)
+{
+       if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+               return true;
+
+       return false;
+}
+
 #ifdef CONFIG_ARM64_VHE
 extern void verify_cpu_run_el(void);
 #else
index b5c3933ed44163b2fb489a00553195afc1ff6806..d1ff83dfe5deae580cdb0ebf93f25b32a63f5a74 100644 (file)
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
        __uint128_t     vregs[32];
        __u32           fpsr;
        __u32           fpcr;
+       __u32           __reserved[2];
 };
 
 struct user_hwdebug_state {
index 923841ffe4a981669be9bea76d09540244550a1f..43512d4d7df219b4b7093927fc1d9107004b6775 100644 (file)
@@ -683,7 +683,7 @@ el0_inv:
        mov     x0, sp
        mov     x1, #BAD_SYNC
        mov     x2, x25
-       bl      bad_mode
+       bl      bad_el0_sync
        b       ret_to_user
 ENDPROC(el0_sync)
 
index fc35e06ccaaca863ad680ff09166c3f2ba034c54..a22161ccf4470afa644850cd47c18b561da0d0e1 100644 (file)
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
        /* (address, ctrl) registers */
        limit = regset->n * regset->size;
        while (count && offset < limit) {
+               if (count < PTRACE_HBP_ADDR_SZ)
+                       return -EINVAL;
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
                                         offset, offset + PTRACE_HBP_ADDR_SZ);
                if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
                        return ret;
                offset += PTRACE_HBP_ADDR_SZ;
 
+               if (!count)
+                       break;
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
                                         offset, offset + PTRACE_HBP_CTRL_SZ);
                if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct user_pt_regs newregs;
+       struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
        if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct user_fpsimd_state newstate;
+       struct user_fpsimd_state newstate =
+               target->thread.fpsimd_state.user_fpsimd;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
        if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       unsigned long tls;
+       unsigned long tls = target->thread.tp_value;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
        if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
                           unsigned int pos, unsigned int count,
                           const void *kbuf, const void __user *ubuf)
 {
-       int syscallno, ret;
+       int syscallno = task_pt_regs(target)->syscallno;
+       int ret;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
        if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
                          const void __user *ubuf)
 {
        int ret;
-       compat_ulong_t tls;
+       compat_ulong_t tls = target->thread.tp_value;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
        if (ret)
index 5b830be79c0120a331e32bdf417b45f4c0bb8911..659b2e6b6cf767ff94d2512ad0897aa236469139 100644 (file)
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
 }
 
 /*
- * bad_mode handles the impossible case in the exception vector.
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
  */
 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
-       siginfo_t info;
-       void __user *pc = (void __user *)instruction_pointer(regs);
        console_verbose();
 
        pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
                handler[reason], smp_processor_id(), esr,
                esr_get_class_string(esr));
+
+       die("Oops - bad mode", regs, 0);
+       local_irq_disable();
+       panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+       siginfo_t info;
+       void __user *pc = (void __user *)instruction_pointer(regs);
+       console_verbose();
+
+       pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
+               smp_processor_id(), esr, esr_get_class_string(esr));
        __show_regs(regs);
 
        info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
        info.si_code  = ILL_ILLOPC;
        info.si_addr  = pc;
 
-       arm64_notify_die("Oops - bad mode", regs, &info, 0);
+       current->thread.fault_address = 0;
+       current->thread.fault_code = 0;
+
+       force_sig_info(info.si_signo, &info, current);
 }
 
 void __pte_error(const char *file, int line, unsigned long val)
index 716d1226ba6925babc28bc6c14dc175e739c7f57..380ebe70509347f8c9329433a625527b65442aa6 100644 (file)
@@ -404,6 +404,8 @@ void __init mem_init(void)
        if (swiotlb_force == SWIOTLB_FORCE ||
            max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
                swiotlb_init(1);
+       else
+               swiotlb_force = SWIOTLB_NO_FORCE;
 
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
index 1c2a5e264fc71cfd52f2acb0b24ddb1aff792be7..e93c9494503ac8fc3cfaa8167ea3523abb3e2925 100644 (file)
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
 #define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)       (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)            (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+       long long c, old;
+
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == u))
+                       break;
+               old = atomic64_cmpxchg(v, c, c + i);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+       long long c, old, dec;
+
+       c = atomic64_read(v);
+       for (;;) {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+               old = atomic64_cmpxchg((v), c, dec);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+               return dec;
+}
+
 #define ATOMIC_OP(op)                                                  \
 static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
 {                                                                      \
index 393d311735c8b573bd5702eac1dcaaac1103600e..67e333aa7629c406745564cb24acc5903733ec41 100644 (file)
@@ -16,7 +16,7 @@
 struct task_struct;
 struct thread_struct;
 
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
 struct fpu_state_struct;
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 #define switch_fpu(prev, next)                                         \
index 1c64bc6330bc0b9f70b71038e678fd110e6e1456..0c4e470571ca0faa74d3e9fa38fa57a384cab4bf 100644 (file)
 #ifdef CONFIG_HUGETLB_PAGE
 static inline int hash__hugepd_ok(hugepd_t hpd)
 {
+       unsigned long hpdval = hpd_val(hpd);
        /*
         * if it is not a pte and have hugepd shift mask
         * set, then it is a hugepd directory pointer
         */
-       if (!(hpd.pd & _PAGE_PTE) &&
-           ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+       if (!(hpdval & _PAGE_PTE) &&
+           ((hpdval & HUGEPD_SHIFT_MASK) != 0))
                return true;
        return false;
 }
index f61cad3de4e69ec093674355332f7d3ee093cf2b..4c935f7504f783532d3521d04142dfdb5831f943 100644 (file)
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
                                              unsigned long phys);
 extern void hash__vmemmap_remove_mapping(unsigned long start,
                                     unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
index ede215167d1ad4f37f4d732c0eeaeee5cbd58bd4..7f4025a6c69ea5b71b340989b35197d8f9d17b91 100644 (file)
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
         * We have only four bits to encode, MMU page size
         */
        BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
-       return __va(hpd.pd & HUGEPD_ADDR_MASK);
+       return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
 }
 
 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
 {
-       return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+       return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
 }
 
 static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
 {
        BUG_ON(!hugepd_ok(hpd));
 #ifdef CONFIG_PPC_8xx
-       return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+       return (pte_t *)__va(hpd_val(hpd) &
+                            ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
 #else
-       return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+       return (pte_t *)((hpd_val(hpd) &
+                         ~HUGEPD_SHIFT_MASK) | PD_HUGE);
 #endif
 }
 
 static inline unsigned int hugepd_shift(hugepd_t hpd)
 {
 #ifdef CONFIG_PPC_8xx
-       return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+       return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
 #else
-       return hpd.pd & HUGEPD_SHIFT_MASK;
+       return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
 #endif
 }
 
index 172849727054e179ea1bd58e24e2d85118ed8f91..0cd8a3852763292eabe905b33960f888e875c978 100644 (file)
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 static inline int hugepd_ok(hugepd_t hpd)
 {
 #ifdef CONFIG_PPC_8xx
-       return ((hpd.pd & 0x4) != 0);
+       return ((hpd_val(hpd) & 0x4) != 0);
 #else
-       return (hpd.pd > 0);
+       /* We clear the top bit to indicate hugepd */
+       return ((hpd_val(hpd) & PD_HUGE) ==  0);
 #endif
 }
 
index 56398e7e61004d7766f8e77627df54e73f7e75bf..47120bf2670c49f224744445c5dc154f0de60b31 100644 (file)
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
 #include <asm/pgtable-types.h>
 #endif
 
-typedef struct { signed long pd; } hugepd_t;
 
 #ifndef CONFIG_HUGETLB_PAGE
 #define is_hugepd(pdep)                (0)
 #define pgd_huge(pgd)          (0)
 #endif /* CONFIG_HUGETLB_PAGE */
 
-#define __hugepd(x) ((hugepd_t) { (x) })
-
 struct page;
 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
index e157489ee7a1220156868099903d45509bedd7e6..ae0a23091a9b347d3e31ed96330772bc9caa3020 100644 (file)
@@ -65,6 +65,7 @@ struct power_pmu {
 #define PPMU_HAS_SSLOT         0x00000020 /* Has sampled slot in MMCRA */
 #define PPMU_HAS_SIER          0x00000040 /* Has SIER */
 #define PPMU_ARCH_207S         0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR           0x00000100 /* Do not use SIAR */
 
 /*
  * Values for flags to get_alternatives()
index 49c0a5a80efa2948764e247d3cbd76fc6fdf06a0..9c0f5db5cf461a92e72185701b4cbc1df168dfcc 100644 (file)
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
        return pmd_raw(old) == prev;
 }
 
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+       return be64_to_cpu(x.pdbe);
+}
+
 #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
index e7f4f3e0fcde94ba237fa2a269c32d40458cc8f2..8bd3b13fe2fb2e8bd1c5762c4e080c9cd921edaa 100644 (file)
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
 }
 #endif
 
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+       return x.pd;
+}
+
 #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
index c56ea8c84abb1771ff65f66ba91ffff02bff5fae..c4ced1d01d579fe0b7aeb13a71b21eb2ffb7072c 100644 (file)
 #define PPC_INST_MCRXR                 0x7c000400
 #define PPC_INST_MCRXR_MASK            0xfc0007fe
 #define PPC_INST_MFSPR_PVR             0x7c1f42a6
-#define PPC_INST_MFSPR_PVR_MASK                0xfc1fffff
+#define PPC_INST_MFSPR_PVR_MASK                0xfc1ffffe
 #define PPC_INST_MFTMR                 0x7c0002dc
 #define PPC_INST_MSGSND                        0x7c00019c
 #define PPC_INST_MSGCLR                        0x7c0001dc
 #define PPC_INST_RFDI                  0x4c00004e
 #define PPC_INST_RFMCI                 0x4c00004c
 #define PPC_INST_MFSPR_DSCR            0x7c1102a6
-#define PPC_INST_MFSPR_DSCR_MASK       0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_MASK       0xfc1ffffe
 #define PPC_INST_MTSPR_DSCR            0x7c1103a6
-#define PPC_INST_MTSPR_DSCR_MASK       0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_MASK       0xfc1ffffe
 #define PPC_INST_MFSPR_DSCR_USER       0x7c0302a6
-#define PPC_INST_MFSPR_DSCR_USER_MASK  0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER_MASK  0xfc1ffffe
 #define PPC_INST_MTSPR_DSCR_USER       0x7c0303a6
-#define PPC_INST_MTSPR_DSCR_USER_MASK  0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER_MASK  0xfc1ffffe
 #define PPC_INST_MFVSRD                        0x7c000066
 #define PPC_INST_MTVSRD                        0x7c000166
 #define PPC_INST_SLBFEE                        0x7c0007a7
index 8180bfd7ab931c5b6d16a1c1b7cb73689206571b..9de7f79e702b1d755bdc855a2da82876959eeade 100644 (file)
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
         *
         * For pHyp, we have to enable IO for log retrieval. Otherwise,
         * 0xFF's is always returned from PCI config space.
+        *
+        * When the @severity is EEH_LOG_PERM, the PE is going to be
+        * removed. Prior to that, the drivers for devices included in
+        * the PE will be closed. The drivers rely on working IO path
+        * to bring the devices to quiet state. Otherwise, PCI traffic
+        * from those devices after they are removed is like to cause
+        * another unexpected EEH error.
         */
        if (!(pe->type & EEH_PE_PHB)) {
-               if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+               if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+                   severity == EEH_LOG_PERM)
                        eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
 
                /*
index e4744ff38a1782de7aa60305fc3f72dc9c109cba..925a4ef9055932174b4dc5a8f0424b330149132b 100644 (file)
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 
        flush_fp_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.TS_FPR(i);
+       buf[32] = target->thread.fp_state.fpscr;
+
        /* copy to local buffer then write that out */
        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
        if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
        flush_altivec_to_thread(target);
        flush_vsx_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
        flush_fp_to_thread(target);
        flush_altivec_to_thread(target);
 
+       for (i = 0; i < 32; i++)
+               buf[i] = target->thread.TS_CKFPR(i);
+       buf[32] = target->thread.ckfp_state.fpscr;
+
        /* copy to local buffer then write that out */
        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
        if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
        flush_altivec_to_thread(target);
        flush_vsx_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        if (!ret)
index 80334937e14fdf8519236a9930e73707ef373d86..67e19a0821be25c8e5f458857e9fda31ca42adb6 100644 (file)
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
 {
        int rc = htab_bolt_mapping(start, end, __pa(start),
                                   pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
        return rc;
 }
 
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
 {
        int rc = htab_remove_mapping(start, end, mmu_linear_psize,
                                     mmu_kernel_ssize);
index d5026f3800b6129bc4c05ef303b4c37d80065cc0..37b5f91e381b77d545b5c336e60aecb0194dcd7d 100644 (file)
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 int hugepd_ok(hugepd_t hpd)
 {
        bool is_hugepd;
+       unsigned long hpdval;
+
+       hpdval = hpd_val(hpd);
 
        /*
         * We should not find this format in page directory, warn otherwise.
         */
-       is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+       is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
        WARN(is_hugepd, "Found wrong page directory format\n");
        return 0;
 }
index 289df38fb7e08bcde6228276fb9d8415042bdd30..8c3389cbcd12216ef7b71884163322255f76ea68 100644 (file)
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
 static unsigned nr_gpages;
 #endif
 
-#define hugepd_none(hpd)       ((hpd).pd == 0)
+#define hugepd_none(hpd)       (hpd_val(hpd) == 0)
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        for (i = 0; i < num_hugepd; i++, hpdp++) {
                if (unlikely(!hugepd_none(*hpdp)))
                        break;
-               else
+               else {
 #ifdef CONFIG_PPC_BOOK3S_64
-                       hpdp->pd = __pa(new) |
-                                  (shift_to_mmu_psize(pshift) << 2);
+                       *hpdp = __hugepd(__pa(new) |
+                                        (shift_to_mmu_psize(pshift) << 2));
 #elif defined(CONFIG_PPC_8xx)
-                       hpdp->pd = __pa(new) |
-                                  (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
-                                                             _PMD_PAGE_512K) |
-                                  _PMD_PRESENT;
+                       *hpdp = __hugepd(__pa(new) |
+                                        (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+                                         _PMD_PAGE_512K) | _PMD_PRESENT);
 #else
                        /* We use the old format for PPC_FSL_BOOK3E */
-                       hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+                       *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
 #endif
+               }
        }
        /* If we bailed from the for loop early, an error occurred, clean up */
        if (i < num_hugepd) {
                for (i = i - 1 ; i >= 0; i--, hpdp--)
-                       hpdp->pd = 0;
+                       *hpdp = __hugepd(0);
                kmem_cache_free(cachep, new);
        }
        spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
                return;
 
        for (i = 0; i < num_hugepd; i++, hpdp++)
-               hpdp->pd = 0;
+               *hpdp = __hugepd(0);
 
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
                 * if we have pdshift and shift value same, we don't
                 * use pgt cache for hugepd.
                 */
-               if (pdshift > shift) {
+               if (pdshift > shift)
                        pgtable_cache_add(pdshift - shift, NULL);
-                       if (!PGT_CACHE(pdshift - shift))
-                               panic("hugetlbpage_init(): could not create "
-                                     "pgtable cache for %d bit pagesize\n", shift);
-               }
 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
                else if (!hugepte_cache) {
                        /*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
        else if (mmu_psize_defs[MMU_PAGE_2M].shift)
                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
 #endif
-       else
-               panic("%s: Unable to set default huge page size\n", __func__);
-
        return 0;
 }
 
index a175cd82ae8c5f2807508e5a2ee2daf34e7fc05a..f2108c40e697dfa5128dc7b8f742e8a2db8063fd 100644 (file)
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
        align = max_t(unsigned long, align, minalign);
        name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
        new = kmem_cache_create(name, table_size, align, 0, ctor);
+       if (!new)
+               panic("Could not allocate pgtable cache for order %d", shift);
+
        kfree(name);
        pgtable_cache[shift - 1] = new;
+
        pr_debug("Allocated pgtable cache for order %d\n", shift);
 }
 
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
 {
        pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
 
-       if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+       if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
                pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
        /*
         * In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
         */
        if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
                pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
-       if (!PGT_CACHE(PGD_INDEX_SIZE))
-               panic("Couldn't allocate pgd cache");
-       if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
-               panic("Couldn't allocate pmd pgtable caches");
-       if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
-               panic("Couldn't allocate pud pgtable caches");
 }
index ebf9782bacf97dffb1a88a71416b7235c840638e..653ff6c74ebe3d112ed13a70006032d3e7a5dd23 100644 (file)
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
        else if (mmu_hash_ops.hpte_clear_all)
                mmu_hash_ops.hpte_clear_all();
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+       if (radix_enabled())
+               return -ENODEV;
+
+       return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+       if (radix_enabled())
+               return -ENODEV;
+
+       return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
index fd3e4034c04d2207a30cc82d6c65dffc6094c603..270eb9b74e2e13eff5f37b441f76a37a019ff73e 100644 (file)
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
         */
        if (TRAP(regs) != 0xf00)
                use_siar = 0;
+       else if ((ppmu->flags & PPMU_NO_SIAR))
+               use_siar = 0;
        else if (marked)
                use_siar = 1;
        else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
index 6447dc1c3d896cea18615d3b5bacc4bb6285fbb1..929b56d47ad9bf6bc79f7ae9dd0ab27b2142427d 100644 (file)
@@ -16,7 +16,7 @@ EVENT(PM_CYC,                                 0x0001e)
 EVENT(PM_ICT_NOSLOT_CYC,                       0x100f8)
 EVENT(PM_CMPLU_STALL,                          0x1e054)
 EVENT(PM_INST_CMPL,                            0x00002)
-EVENT(PM_BRU_CMPL,                             0x40060)
+EVENT(PM_BRU_CMPL,                             0x10012)
 EVENT(PM_BR_MPRED_CMPL,                                0x400f6)
 
 /* All L1 D cache load references counted at finish, gated by reject */
index 346010e8d463d36d2c411839ecb550d9534f61b1..7332634e18c95212f1448c578f2726b8ebf3aee0 100644 (file)
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
        .bhrb_filter_map        = power9_bhrb_filter_map,
        .get_constraint         = isa207_get_constraint,
        .disable_pmc            = isa207_disable_pmc,
-       .flags                  = PPMU_HAS_SIER | PPMU_ARCH_207S,
+       .flags                  = PPMU_NO_SIAR | PPMU_ARCH_207S,
        .n_generic              = ARRAY_SIZE(power9_generic_events),
        .generic_events         = power9_generic_events,
        .cache_events           = &power9_cache_events,
index d38e86fd5720f181ac10cc869b35e8b093ca5803..60c57657c772fef576e5c4703dfb2a17203978a7 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/xics.h>
 #include <asm/io.h>
 #include <asm/opal.h>
+#include <asm/kvm_ppc.h>
 
 static void icp_opal_teardown_cpu(void)
 {
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
         * Should we be flagging idle loop instead?
         * Or creating some task to be scheduled?
         */
-       opal_int_eoi((0x00 << 24) | XICS_IPI);
+       if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
+               force_external_irq_replay();
+}
+
+static unsigned int icp_opal_get_xirr(void)
+{
+       unsigned int kvm_xirr;
+       __be32 hw_xirr;
+       int64_t rc;
+
+       /* Handle an interrupt latched by KVM first */
+       kvm_xirr = kvmppc_get_xics_latch();
+       if (kvm_xirr)
+               return kvm_xirr;
+
+       /* Then ask OPAL */
+       rc = opal_int_get_xirr(&hw_xirr, false);
+       if (rc < 0)
+               return 0;
+       return be32_to_cpu(hw_xirr);
 }
 
 static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
        unsigned int xirr;
        unsigned int vec;
        unsigned int irq;
-       int64_t rc;
 
-       rc = opal_int_get_xirr(&xirr, false);
-       if (rc < 0)
-               return 0;
-       xirr = be32_to_cpu(xirr);
+       xirr = icp_opal_get_xirr();
        vec = xirr & 0x00ffffff;
        if (vec == XICS_IRQ_SPURIOUS)
                return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
        xics_mask_unknown_vec(vec);
 
        /* We might learn about it later, so EOI it */
-       opal_int_eoi(xirr);
+       if (opal_int_eoi(xirr) > 0)
+               force_external_irq_replay();
 
        return 0;
 }
index e659daffe368836baf4db7908cdb4cb83575f0d1..e00975361fec00fb89ad916f1b8a15539a449a6c 100644 (file)
@@ -69,7 +69,7 @@ CONFIG_CMA=y
 CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZPOOL=m
+CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
 CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
 CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
 CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_IMA=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
index 95ceac50bc6512313cf2d7cda8090c88596ae865..f05d2d6e10872a417cfb67a9624d7d74f56e5cc6 100644 (file)
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
+CONFIG_LIVEPATCH=y
 CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
-CONFIG_RBTREE_TEST=m
-CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_CORDIC=m
index bc7b176f57950d1f4f9d17cfe8721229fe4e34c2..2cf87343b59030f76267e47dc88672e536bf6e9b 100644 (file)
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
 CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_CORDIC=m
index 2d40ef0a6295d9a93c3527d6592eba2780736a2e..d00e368fb5e6ef949b6fb070321fa3a6dc9c8501 100644 (file)
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
 CONFIG_STATIC_KEYS_SELFTEST=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
 CONFIG_RAW_DRIVER=m
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
index d7697ab802f6c94813a27394baa255fa26a93ddc..8e136b88cdf4f13460b960f8db9d02e1ae88324f 100644 (file)
@@ -15,7 +15,9 @@
        BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
        asm volatile(                                                   \
                "       lctlg   %1,%2,%0\n"                             \
-               : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+               :                                                       \
+               : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high)    \
+               : "memory");                                            \
 }
 
 #define __ctl_store(array, low, high) {                                        \
index 7447ba509c30eb0b409598d062ea408603f313af..12020b55887bfd258e6545e687ec4a9de4fdb214 100644 (file)
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       if (MACHINE_HAS_VX)
+               convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+       else
+               memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
        /* If setting FPC, must validate it first. */
        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
                u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       for (i = 0; i < __NUM_VXRS_LOW; i++)
+               vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
        if (rc == 0)
                for (i = 0; i < __NUM_VXRS_LOW; i++)
index bec71e902be3f8030697084540130227ecb55088..6484a250021e2717c448eb8f4f0b987169c264f1 100644 (file)
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
        memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
        memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_BYTE);
+              sizeof(S390_lowcore.stfle_fac_list));
        if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
                ret = -EFAULT;
        kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        /* Populate the facility mask initially. */
        memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_BYTE);
+              sizeof(S390_lowcore.stfle_fac_list));
        for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
                if (i < kvm_s390_fac_list_mask_size())
                        kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
index 7a1897c51c5495f3f2b13d86ba8f6344e2234788..d56ef26d46816b834068609ceb940ce01901d731 100644 (file)
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
        return pgste;
 }
 
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
                                    unsigned long addr, pte_t *ptep,
                                    pgste_t pgste, pte_t old, pte_t new)
 {
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
        } else {
                *ptep = new;
        }
+       return old;
 }
 
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_direct(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_lazy(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
index d89b7011667cb4f1a6f3ad55238d2c815e229c41..e279572824b15e07616b98215fb51c1fa65f4c9f 100644 (file)
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
                          const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct pt_regs regs;
+       struct pt_regs regs = *task_pt_regs(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
                                 sizeof(regs));
index 05612a2529c8bba1e9aa9131a4cabaebe96d5736..496e60391fac68e231ebac4fae9ff74ae867ffc0 100644 (file)
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
         * all online cpus.
         */
        cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
-                         "perf/x86/amd/ibs:STARTING",
+                         "perf/x86/amd/ibs:starting",
                          x86_pmu_amd_ibs_starting_cpu,
                          x86_pmu_amd_ibs_dying_cpu);
 
index d611cab214a6050555eb4c5f6f5638ca46b4659e..eb1484c86bb4b4611450c49df01aab1a18d8fa5f 100644 (file)
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
+                       struct cpu_hw_events *sibling;
                        struct intel_excl_cntrs *c;
 
-                       c = per_cpu(cpu_hw_events, i).excl_cntrs;
+                       sibling = &per_cpu(cpu_hw_events, i);
+                       c = sibling->excl_cntrs;
                        if (c && c->core_id == core_id) {
                                cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
                                cpuc->excl_cntrs = c;
-                               cpuc->excl_thread_id = 1;
+                               if (!sibling->excl_thread_id)
+                                       cpuc->excl_thread_id = 1;
                                break;
                        }
                }
index 945e512a112a321771797869b39c496d7f5c2b8f..1e35dd06b090ee91189cb5a52fdf026f2ca5e74b 100644 (file)
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ir_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
index 57d8a856cdc5ce938efb73fd8fae144867abde76..d153be8929a68440ae5e5894497cbb7fa1ab9913 100644 (file)
@@ -6171,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
 
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
 
-       return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+       return emulator_write_emulated(ctxt, rip, instruction, 3,
+               &ctxt->exception);
 }
 
 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
index 3cd69832d7f4c6f3743bbb3a190c39672c89461d..3961103e91760a14d24eec1ad3caffa5ad1f2adb 100644 (file)
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
                        DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
                },
        },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
+       {
+               .callback = set_nouse_crs,
+               .ident = "Supermicro X8DTH",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
+               },
+       },
 
        /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
        {
index a8e67a155d04f6d937e890473b1647fbe95d210f..c3400b5444a7da9842622cb4b0c94b2f1b5ddd64 100644 (file)
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
 static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
 {
        LIST_HEAD(rq_list);
-       LIST_HEAD(driver_list);
 
        if (unlikely(blk_mq_hctx_stopped(hctx)))
                return;
index 82b0b571097960919ce6ed36a703402422ac3cc7..b0399e8f6d27df774b175cb2a3aa7c8f3cce7189 100644 (file)
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
 
        ACPI_FUNCTION_TRACE(tb_install_and_load_table);
 
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
        /* Install the table and load it into the namespace */
 
        status = acpi_tb_install_standard_table(address, flags, TRUE,
                                                override, &i);
        if (ACPI_FAILURE(status)) {
-               goto unlock_and_exit;
+               goto exit;
        }
 
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        status = acpi_tb_load_table(i, acpi_gbl_root_node);
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
-unlock_and_exit:
+exit:
        *table_index = i;
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        return_ACPI_STATUS(status);
 }
 
index 5fdf251a9f9797a2a00f479880d6449dfa6dd940..01e1b3d63fc0dc8ae0e0b17767dae535be2bec68 100644 (file)
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                goto release_and_exit;
        }
 
+       /* Acquire the table lock */
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
        if (reload) {
                /*
                 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                         new_table_desc.signature.integer));
 
                        status = AE_BAD_SIGNATURE;
-                       goto release_and_exit;
+                       goto unlock_and_exit;
                }
 
                /* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                /* Table is still loaded, this is an error */
 
                                status = AE_ALREADY_EXISTS;
-                               goto release_and_exit;
+                               goto unlock_and_exit;
                        } else {
                                /*
                                 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                 * indicate the re-installation.
                                 */
                                acpi_tb_uninstall_table(&new_table_desc);
+                               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
                                *table_index = i;
                                return_ACPI_STATUS(AE_OK);
                        }
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 
        /* Invoke table handler if present */
 
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        if (acpi_gbl_table_handler) {
                (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
                                             new_table_desc.pointer,
                                             acpi_gbl_table_handler_context);
        }
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+       /* Release the table lock */
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 
 release_and_exit:
 
index 9b6cebe227a08562985ce304463dcbe1f1ee1154..54abb26b736639ca54aa7051ae742d6657a501bc 100644 (file)
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
                if (acpi_sleep_state_supported(i))
                        sleep_states[i] = 1;
 
-       /*
-        * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
-        * the default suspend mode was not selected from the command line.
-        */
-       if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
-           mem_sleep_default > PM_SUSPEND_MEM)
-               mem_sleep_default = PM_SUSPEND_FREEZE;
-
        suspend_set_ops(old_suspend_ordering ?
                &acpi_suspend_ops_old : &acpi_suspend_ops);
        freeze_set_ops(&acpi_freeze_ops);
index 02ded25c82e4a06e1e79bf2f0a4855aa933b3df1..7f48156cbc0c0b47a22943b60bf374d8a86ea6e3 100644 (file)
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
                },
        },
-       {
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
-       /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
-       .callback = video_detect_force_native,
-       .ident = "HP Pavilion dv6",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
-               },
-       },
-
        { },
 };
 
index 8ab8ea1253e62310a68d9e6bf039d8d866ee4019..dacb6a8418aa927e8d75a86470b35b414bf48598 100644 (file)
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+       zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+       zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index 50a2020b5b724fd657c01b6d1a27c33a68a27fd0..9fd06eeb1a17b3880e2ad574f4ca081903a67961 100644 (file)
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 {
        struct request *req = blk_mq_rq_from_pdu(cmd);
-       int result, flags;
+       int result;
        struct nbd_request request;
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        if (type != NBD_CMD_WRITE)
                return 0;
 
-       flags = 0;
        bio = req->bio;
        while (bio) {
                struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 
                bio_for_each_segment(bvec, bio, iter) {
                        bool is_last = !next && bio_iter_last(bvec, iter);
+                       int flags = is_last ? 0 : MSG_MORE;
 
-                       if (is_last)
-                               flags = MSG_MORE;
                        dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
                                cmd, bvec.bv_len);
                        result = sock_send_bvec(nbd, index, &bvec, flags);
index b2bdfa81f9297cd588ffcb1168f7db1cd5a48228..265f1a7072e9a1fed8212520d509adaecdbef04b 100644 (file)
@@ -197,13 +197,13 @@ struct blkfront_info
        /* Number of pages per ring buffer. */
        unsigned int nr_ring_pages;
        struct request_queue *rq;
-       unsigned int feature_flush;
-       unsigned int feature_fua;
+       unsigned int feature_flush:1;
+       unsigned int feature_fua:1;
        unsigned int feature_discard:1;
        unsigned int feature_secdiscard:1;
+       unsigned int feature_persistent:1;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
-       unsigned int feature_persistent:1;
        /* Number of 4KB segments handled */
        unsigned int max_indirect_segments;
        int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
        }
        else
                grants = info->max_indirect_segments;
-       psegs = grants / GRANTS_PER_PSEG;
+       psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
 
        err = fill_grant_buffer(rinfo,
                                (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
                blkfront_setup_discard(info);
 
        info->feature_persistent =
-               xenbus_read_unsigned(info->xbdev->otherend,
-                                    "feature-persistent", 0);
+               !!xenbus_read_unsigned(info->xbdev->otherend,
+                                      "feature-persistent", 0);
 
        indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
                                        "feature-max-indirect-segments", 0);
-       info->max_indirect_segments = min(indirect_segments,
-                                         xen_blkif_max_segments);
+       if (indirect_segments > xen_blkif_max_segments)
+               indirect_segments = xen_blkif_max_segments;
+       if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
+               indirect_segments = 0;
+       info->max_indirect_segments = indirect_segments;
 }
 
 /*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
        if (!xen_domain())
                return -ENODEV;
 
+       if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
+               xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
        if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
                pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
                        xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
index 8b00e79c2683a4812126fb73c7134ea157554ef2..17857beb489294b2c1b5bbd068a18ba013ab3fae 100644 (file)
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
 {
        struct ports_device *portdev;
 
-       portdev = container_of(work, struct ports_device, control_work);
+       portdev = container_of(work, struct ports_device, config_work);
        if (!use_multiport(portdev)) {
                struct virtio_device *vdev;
                struct port *port;
index 8c8b495cbf0d502daa51c5c1403125b4e7ea15f3..cdc092a1d9effd7815fbd43314977059a52e0ec8 100644 (file)
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
        GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
                                GATE_BUS_TOP, 24, 0, 0),
        GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
-                               GATE_BUS_TOP, 27, 0, 0),
+                               GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
 };
 
 static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
 
        GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
-                       GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
                        GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
 
        GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
                        GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
-                       GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
                        GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
                        GATE_BUS_TOP, 5, 0, 0),
        GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
-                       GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
                        GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(0, "aclk166", "mout_user_aclk166",
                        GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
-                       GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
                        GATE_BUS_TOP, 16, 0, 0),
        GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
                        GATE_BUS_TOP, 17, 0, 0),
        GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
-                       GATE_BUS_TOP, 18, 0, 0),
+                       GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
        GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
                        GATE_BUS_TOP, 28, 0, 0),
        GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
                        GATE_BUS_TOP, 29, 0, 0),
 
        GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
-                       SRC_MASK_TOP2, 24, 0, 0),
+                       SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
 
        GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
                        SRC_MASK_TOP7, 20, 0, 0),
index 4da1dc2278bd7fc34caa9e00d29f71ec1ebd015f..670ff0f25b6712ea8e2875a5ecc2a4c46cb1c554 100644 (file)
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
        if (mct_int_type == MCT_INT_SPI) {
                if (evt->irq != -1)
                        disable_irq_nosync(evt->irq);
+               exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
        } else {
                disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
        }
index f91c25718d164c9d9339acf671d67937995fe076..a54d65aa776d064f025036cf9aa35790d83d82e8 100644 (file)
@@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                        limits = &performance_limits;
                        perf_limits = limits;
                }
-               if (policy->max >= policy->cpuinfo.max_freq) {
+               if (policy->max >= policy->cpuinfo.max_freq &&
+                   !limits->no_turbo) {
                        pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(perf_limits);
                        goto out;
@@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
            policy->policy != CPUFREQ_POLICY_PERFORMANCE)
                return -EINVAL;
 
+       /* When per-CPU limits are used, sysfs limits are not used */
+       if (!per_cpu_limits) {
+               unsigned int max_freq, min_freq;
+
+               max_freq = policy->cpuinfo.max_freq *
+                                               limits->max_sysfs_pct / 100;
+               min_freq = policy->cpuinfo.max_freq *
+                                               limits->min_sysfs_pct / 100;
+               cpufreq_verify_within_limits(policy, min_freq, max_freq);
+       }
+
        return 0;
 }
 
index 86bf3b84ada56d42758c2b3c57db91351b9f811b..a07ae9e37930767643302ccbec4a7284275a0f25 100644 (file)
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
 }
 
 /**
- * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
  * @gpiochip: the gpiochip to add the irqchip to
  * @irqchip: the irqchip to add to the gpiochip
  * @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
  * the pins on the gpiochip can generate a unique IRQ. Everything else
  * need to be open coded.
  */
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-                         struct irq_chip *irqchip,
-                         unsigned int first_irq,
-                         irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key)
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key)
 {
        struct device_node *of_node;
        bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
 
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
index 29d6d84d1c28b1e847715c08b6fac8f9d2a9aac3..41e41f90265df07af3299f65022feaf731e99ce7 100644 (file)
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
                }
                break;
        }
+
+       if (!(*out_ring && (*out_ring)->adev)) {
+               DRM_ERROR("Ring %d is not initialized on IP %d\n",
+                         ring, ip_type);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
index 9999dc71b998599f909464c32e98ca8cc1be5ddb..ccb5e02e7b20ffb3f8cb1776ea6a67a1b879cb33 100644 (file)
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v10_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v10_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
                dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v10_0_show_cursor(crtc);
 
                dce_v10_0_lock_cursor(crtc, false);
index 2006abbbfb6216d34c3fb3e18573e17f37561f18..a7af5b33a5e30e1279619c096295e12e415acafe 100644 (file)
@@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v11_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v11_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v11_0_show_cursor(crtc);
@@ -2640,7 +2637,6 @@ unpin:
 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v11_0_lock_cursor(crtc, true);
@@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
                dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v11_0_show_cursor(crtc);
 
                dce_v11_0_lock_cursor(crtc, false);
index b4e4ec630e8cfd5d38d55b61199f3cc60269e0c2..39df6a50637f5a9de0d98099c12e78188dc3ef4a 100644 (file)
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
        struct amdgpu_device *adev = crtc->dev->dev_private;
        int xorigin = 0, yorigin = 0;
 
+       int w = amdgpu_crtc->cursor_width;
+
        amdgpu_crtc->cursor_x = x;
        amdgpu_crtc->cursor_y = y;
 
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v6_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v6_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
                dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v6_0_show_cursor(crtc);
                dce_v6_0_lock_cursor(crtc, false);
        }
index 584abe834a3ce4658de0e54f6aae416729ad868f..28102bb1704d0b618318a9bcf4b9668bce899150 100644 (file)
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v8_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v8_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
                dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v8_0_show_cursor(crtc);
 
                dce_v8_0_lock_cursor(crtc, false);
index 762f8e82ceb7465f56aa8cfcd7124ce0fc28acda..e9a176891e13319d77e10b643a44b9e893e562ac 100644 (file)
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
 
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
-       kfree(amdgpu_encoder->enc_priv);
        drm_encoder_cleanup(encoder);
-       kfree(amdgpu_encoder);
+       kfree(encoder);
 }
 
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
index 45a573e63d4ae9778cff0b022ef53e22163e9a7b..e2b0b1646f995fd94d12e17a8cb1258bec34061f 100644 (file)
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
 MODULE_FIRMWARE("radeon/verde_mc.bin");
 MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        const char *chip_name;
        char fw_name[30];
        int err;
+       bool is_58_fw = false;
 
        DRM_DEBUG("\n");
 
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       /* this memory configuration requires special firmware */
+       if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               is_58_fw = true;
+
+       if (is_58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
        err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        WREG32(mmVM_CONTEXT1_CNTL,
               VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
               (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
-              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
+              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v6_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v6_0_set_fault_enable_default(adev, true);
 
        gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+               return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       else
+               return 0;
 }
 
 static int gmc_v6_0_sw_init(void *handle)
index 10bedfac27b8118dae6735a65ecebcbdd4109d2b..6e150db8f380417870cea60283ecacaf1c14fa7d 100644 (file)
@@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
 
 union power_info {
        struct _ATOM_POWERPLAY_INFO info;
@@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6817) ||
                    (adev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (adev->asic_type == CHIP_OLAND) {
-               if ((adev->pdev->revision == 0xC7) ||
-                   (adev->pdev->revision == 0x80) ||
-                   (adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6604) ||
-                   (adev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (adev->asic_type == CHIP_HAINAN) {
                if ((adev->pdev->revision == 0x81) ||
                    (adev->pdev->revision == 0x83) ||
@@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6665) ||
                    (adev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
@@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
                        ((adev->pdev->device == 0x6660) ||
                        (adev->pdev->device == 0x6663) ||
                        (adev->pdev->device == 0x6665) ||
-                       (adev->pdev->device == 0x6667))) ||
-                   ((adev->pdev->revision == 0xc3) &&
-                       (adev->pdev->device == 0x6665)))
+                        (adev->pdev->device == 0x6667))))
                        chip_name = "hainan_k";
+               else if ((adev->pdev->revision == 0xc3) &&
+                        (adev->pdev->device == 0x6665))
+                       chip_name = "banks_k_2";
                else
                        chip_name = "hainan";
                break;
index 96444e4d862af3f011c3b3de8481aafe39529414..7fb9137dd89b1c2bc064c3ea516c243721b5dafe 100644 (file)
 #include "smu/smu_7_0_1_sh_mask.h"
 
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
 static int uvd_v4_2_start(struct amdgpu_device *adev);
 static void uvd_v4_2_stop(struct amdgpu_device *adev);
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                enum amd_clockgating_state state);
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+                            bool sw_mode);
 /**
  * uvd_v4_2_ring_get_rptr - get read pointer
  *
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
 
        return r;
 }
-
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+                                bool enable);
 /**
  * uvd_v4_2_hw_init - start and test UVD block
  *
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
        uint32_t tmp;
        int r;
 
-       uvd_v4_2_init_cg(adev);
-       uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+       uvd_v4_2_enable_mgcg(adev, true);
        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
        r = uvd_v4_2_start(adev);
        if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring = &adev->uvd.ring;
        uint32_t rb_bufsz;
        int i, j, r;
-
        /* disable byte swapping */
        u32 lmi_swap_cntl = 0;
        u32 mp_swap_cntl = 0;
 
+       WREG32(mmUVD_CGC_GATE, 0);
+       uvd_v4_2_set_dcm(adev, true);
+
        uvd_v4_2_mc_resume(adev);
 
        /* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
 
        /* Unstall UMC and register bus */
        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+       uvd_v4_2_set_dcm(adev, false);
 }
 
 /**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
        WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
 }
 
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
-{
-       bool hw_mode = true;
-
-       if (hw_mode) {
-               uvd_v4_2_set_dcm(adev, false);
-       } else {
-               u32 tmp = RREG32(mmUVD_CGC_CTRL);
-               tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-               WREG32(mmUVD_CGC_CTRL, tmp);
-       }
-}
-
 static bool uvd_v4_2_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
-       bool gate = false;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-               return 0;
-
-       if (state == AMD_CG_STATE_GATE)
-               gate = true;
-
-       uvd_v4_2_enable_mgcg(adev, gate);
-
        return 0;
 }
 
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
         */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-               return 0;
-
        if (state == AMD_PG_STATE_GATE) {
                uvd_v4_2_stop(adev);
                return 0;
index 5fb0b7f5c065121218ea4befe1a9da8def2e3035..37ca685e5a9a9e358eaab6d32b9d5758fec90565 100644 (file)
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE           0x07
+
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0        0x8616
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1        0x8617
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2        0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 #define VCE_V3_0_FW_SIZE       (384 * 1024)
@@ -54,6 +58,9 @@
 
 #define FW_52_8_3      ((52 << 24) | (8 << 16) | (3 << 8))
 
+#define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+                                       | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
                WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
                data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-               data &= ~0xffc00000;
+               data &= ~0x3ff;
                WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
                data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
 
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                        vce_v3_0_set_vce_sw_clock_gating(adev, false);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
         * VCE team suggest use bit 3--bit 6 for busy status check
         */
        mutex_lock(&adev->grbm_idx_mutex);
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        mutex_unlock(&adev->grbm_idx_mutex);
 
        if (srbm_soft_reset) {
@@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                if (adev->vce.harvest_config & (1 << i))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
 
                if (enable) {
                        /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                vce_v3_0_set_vce_sw_clock_gating(adev, enable);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
index b0c63c5f54c9ca95bc29eb069191455d1b56c0cd..6bb79c94cb9ffb5d7bec4fb104f9e32b7c3b3046 100644 (file)
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_CG_STATE_UNGATE);
+                                                       AMD_CG_STATE_GATE);
                                cgs_set_powergating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_GATE);
+                                                       AMD_PG_STATE_UNGATE);
                                cz_dpm_update_vce_dpm(hwmgr);
                                cz_enable_disable_vce_dpm(hwmgr, true);
                                return 0;
index 4b14f259a147039e8e0eacc92da77f6266a2571e..0fb4e8c8f5e13866120de7325dd801c59f1d940f 100644 (file)
@@ -1402,14 +1402,22 @@ int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                                             cz_hwmgr->vce_dpm.hard_min_clk,
                                                PPSMC_MSG_SetEclkHardMin));
        } else {
-               /*EPR# 419220 -HW limitation to to */
-               cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
-               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                           PPSMC_MSG_SetEclkHardMin,
-                                           cz_get_eclk_level(hwmgr,
-                                    cz_hwmgr->vce_dpm.hard_min_clk,
-                                         PPSMC_MSG_SetEclkHardMin));
-
+               /*Program HardMin based on the vce_arbiter.ecclk */
+               if (hwmgr->vce_arbiter.ecclk == 0) {
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                           PPSMC_MSG_SetEclkHardMin, 0);
+               /* disable ECLK DPM 0. Otherwise VCE could hang if
+                * switching SCLK from DPM 0 to 6/7 */
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_SetEclkSoftMin, 1);
+               } else {
+                       cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                               PPSMC_MSG_SetEclkHardMin,
+                                               cz_get_eclk_level(hwmgr,
+                                               cz_hwmgr->vce_dpm.hard_min_clk,
+                                               PPSMC_MSG_SetEclkHardMin));
+               }
        }
        return 0;
 }
index 908011d2c8f5200e92cc8db772b85315d5c16343..7abda94fc2cf3bb43ec686ac4e699a43b77a11d0 100644 (file)
@@ -113,6 +113,7 @@ struct ast_private {
        struct ttm_bo_kmap_obj cache_kmap;
        int next_cursor;
        bool support_wide_screen;
+       bool DisableP2A;
 
        enum ast_tx_chip tx_chip_type;
        u8 dp501_maxclk;
index f75c6421db6239c9435ed39dc7d6244d13894920..533e762d036dc272afbdf4d2bce146b6f1b450d9 100644 (file)
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
        } else
                *need_post = false;
 
+       /* Check P2A Access */
+       ast->DisableP2A = true;
+       data = ast_read32(ast, 0xf004);
+       if (data != 0xFFFFFFFF)
+               ast->DisableP2A = false;
+
        /* Check if we support wide screen */
        switch (ast->chip) {
        case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                        ast->support_wide_screen = true;
                else {
                        ast->support_wide_screen = false;
-                       /* Read SCU7c (silicon revision register) */
-                       ast_write32(ast, 0xf004, 0x1e6e0000);
-                       ast_write32(ast, 0xf000, 0x1);
-                       data = ast_read32(ast, 0x1207c);
-                       data &= 0x300;
-                       if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
-                               ast->support_wide_screen = true;
-                       if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
-                               ast->support_wide_screen = true;
+                       if (ast->DisableP2A == false) {
+                               /* Read SCU7c (silicon revision register) */
+                               ast_write32(ast, 0xf004, 0x1e6e0000);
+                               ast_write32(ast, 0xf000, 0x1);
+                               data = ast_read32(ast, 0x1207c);
+                               data &= 0x300;
+                               if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+                                       ast->support_wide_screen = true;
+                               if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+                                       ast->support_wide_screen = true;
+                       }
                }
                break;
        }
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
        uint32_t data, data2;
        uint32_t denum, num, div, ref_pll;
 
-       ast_write32(ast, 0xf004, 0x1e6e0000);
-       ast_write32(ast, 0xf000, 0x1);
-
-
-       ast_write32(ast, 0x10000, 0xfc600309);
-
-       do {
-               if (pci_channel_offline(dev->pdev))
-                       return -EIO;
-       } while (ast_read32(ast, 0x10000) != 0x01);
-       data = ast_read32(ast, 0x10004);
-
-       if (data & 0x40)
+       if (ast->DisableP2A)
+       {
                ast->dram_bus_width = 16;
+               ast->dram_type = AST_DRAM_1Gx16;
+               ast->mclk = 396;
+       }
        else
-               ast->dram_bus_width = 32;
+       {
+               ast_write32(ast, 0xf004, 0x1e6e0000);
+               ast_write32(ast, 0xf000, 0x1);
+               data = ast_read32(ast, 0x10004);
+
+               if (data & 0x40)
+                       ast->dram_bus_width = 16;
+               else
+                       ast->dram_bus_width = 32;
+
+               if (ast->chip == AST2300 || ast->chip == AST2400) {
+                       switch (data & 0x03) {
+                       case 0:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       default:
+                       case 1:
+                               ast->dram_type = AST_DRAM_1Gx16;
+                               break;
+                       case 2:
+                               ast->dram_type = AST_DRAM_2Gx16;
+                               break;
+                       case 3:
+                               ast->dram_type = AST_DRAM_4Gx16;
+                               break;
+                       }
+               } else {
+                       switch (data & 0x0c) {
+                       case 0:
+                       case 4:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       case 8:
+                               if (data & 0x40)
+                                       ast->dram_type = AST_DRAM_1Gx16;
+                               else
+                                       ast->dram_type = AST_DRAM_512Mx32;
+                               break;
+                       case 0xc:
+                               ast->dram_type = AST_DRAM_1Gx32;
+                               break;
+                       }
+               }
 
-       if (ast->chip == AST2300 || ast->chip == AST2400) {
-               switch (data & 0x03) {
-               case 0:
-                       ast->dram_type = AST_DRAM_512Mx16;
-                       break;
-               default:
-               case 1:
-                       ast->dram_type = AST_DRAM_1Gx16;
-                       break;
-               case 2:
-                       ast->dram_type = AST_DRAM_2Gx16;
-                       break;
+               data = ast_read32(ast, 0x10120);
+               data2 = ast_read32(ast, 0x10170);
+               if (data2 & 0x2000)
+                       ref_pll = 14318;
+               else
+                       ref_pll = 12000;
+
+               denum = data & 0x1f;
+               num = (data & 0x3fe0) >> 5;
+               data = (data & 0xc000) >> 14;
+               switch (data) {
                case 3:
-                       ast->dram_type = AST_DRAM_4Gx16;
-                       break;
-               }
-       } else {
-               switch (data & 0x0c) {
-               case 0:
-               case 4:
-                       ast->dram_type = AST_DRAM_512Mx16;
+                       div = 0x4;
                        break;
-               case 8:
-                       if (data & 0x40)
-                               ast->dram_type = AST_DRAM_1Gx16;
-                       else
-                               ast->dram_type = AST_DRAM_512Mx32;
+               case 2:
+               case 1:
+                       div = 0x2;
                        break;
-               case 0xc:
-                       ast->dram_type = AST_DRAM_1Gx32;
+               default:
+                       div = 0x1;
                        break;
                }
+               ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        }
-
-       data = ast_read32(ast, 0x10120);
-       data2 = ast_read32(ast, 0x10170);
-       if (data2 & 0x2000)
-               ref_pll = 14318;
-       else
-               ref_pll = 12000;
-
-       denum = data & 0x1f;
-       num = (data & 0x3fe0) >> 5;
-       data = (data & 0xc000) >> 14;
-       switch (data) {
-       case 3:
-               div = 0x4;
-               break;
-       case 2:
-       case 1:
-               div = 0x2;
-               break;
-       default:
-               div = 0x1;
-               break;
-       }
-       ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        return 0;
 }
 
index 810c51d92b99f81a441f37d02e319ed25b7768e5..5331ee1df086e7ae3950e33efc38975b544abdd4 100644 (file)
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
        ast_open_key(ast);
        ast_set_def_ext_reg(dev);
 
-       if (ast->chip == AST2300 || ast->chip == AST2400)
-               ast_init_dram_2300(dev);
-       else
-               ast_init_dram_reg(dev);
+       if (ast->DisableP2A == false)
+       {
+               if (ast->chip == AST2300 || ast->chip == AST2400)
+                       ast_init_dram_2300(dev);
+               else
+                       ast_init_dram_reg(dev);
 
-       ast_init_3rdtx(dev);
+               ast_init_3rdtx(dev);
+       }
+       else
+       {
+               if (ast->tx_chip_type != AST_TX_NONE)
+                       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);        /* Enable DVO */
+       }
 }
 
 /* AST 2300 DRAM settings */
index eb9bf8786c249744418b601bae38bb78ae04f2a7..18eefdcbf1ba9f39bb225b371ac9b822bb277485 100644 (file)
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
 
        pm_runtime_enable(dev);
 
+       pm_runtime_get_sync(dev);
        phy_power_on(dp->phy);
 
        analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
                goto err_disable_pm_runtime;
        }
 
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
+
        return 0;
 
 err_disable_pm_runtime:
+
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
        pm_runtime_disable(dev);
 
        return ret;
index 04b3c161dfae6fc9c643229f2a312eb44abca62a..7f4cc6e172abaeab8b4007497daf309058c5e554 100644 (file)
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
         This is a KMS driver for emulated cirrus device in qemu.
         It is *NOT* intended for real cirrus devices. This requires
         the modesetting userspace X.org driver.
+
+        Cirrus is obsolete, the hardware was designed in the 90ies
+        and can't keep up with todays needs.  More background:
+        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+        Better alternatives are:
+          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
index 60697482b94c8136ea2720dbf3b9f81c94e1d823..50f5cf7b69d1dc55fd427efa61e7e8e5eeff80d0 100644 (file)
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
-                                  struct drm_crtc *crtc, s64 __user *fence_ptr)
+                                  struct drm_crtc *crtc, s32 __user *fence_ptr)
 {
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
 }
 
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
                                          struct drm_crtc *crtc)
 {
-       s64 __user *fence_ptr;
+       s32 __user *fence_ptr;
 
        fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                state->color_mgmt_changed |= replaced;
                return ret;
        } else if (property == config->prop_out_fence_ptr) {
-               s64 __user *fence_ptr = u64_to_user_ptr(val);
+               s32 __user *fence_ptr = u64_to_user_ptr(val);
 
                if (!fence_ptr)
                        return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  */
 
 struct drm_out_fence_state {
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
        struct sync_file *sync_file;
        int fd;
 };
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
                return 0;
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               u64 __user *fence_ptr;
+               s32 __user *fence_ptr;
 
                fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
 
index ac6a35212501eb56784cf14c25867fbfeda88f47..e6b19bc9021ae0634eb524af3e9f30d8b5f445ab 100644 (file)
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
                return NULL;
 
        mode->type |= DRM_MODE_TYPE_USERDEF;
+       /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
+       if (cmd->xres == 1366 && mode->hdisplay == 1368) {
+               mode->hdisplay = 1366;
+               mode->hsync_start--;
+               mode->hsync_end--;
+               drm_mode_set_name(mode);
+       }
        drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
        return mode;
 }
index ac953f037be7efb7edac1c67d43e46b20b3be365..cf8f0128c161ed6e1034322066c49d53a80b9e92 100644 (file)
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
        }
 
        if (dev->mode_config.delayed_event) {
+               /*
+                * FIXME:
+                *
+                * Use short (1s) delay to handle the initial delayed event.
+                * This delay should not be needed, but Optimus/nouveau will
+                * fail in a mysterious way if the delayed event is handled as
+                * soon as possible like it is done in
+                * drm_helper_probe_single_connector_modes() in case the poll
+                * was enabled before.
+                */
                poll = true;
-               delay = 0;
+               delay = HZ;
        }
 
        if (poll)
index 169ac96e8f0861f9648e0e3ca3292ca1da61556c..fe0e85b41310a8fa24ba0f6caaa598edc669a99c 100644 (file)
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
                struct list_head list;
                bool found;
 
+               /*
+                * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+                * drm_mm into giving out a low IOVA after address space
+                * rollover. This needs a proper fix.
+                */
                ret = drm_mm_insert_node_in_range(&mmu->mm, node,
                        size, 0, mmu->last_iova, ~0UL,
-                       DRM_MM_SEARCH_DEFAULT);
+                       mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
 
                if (ret != -ENOSPC)
                        break;
index 6ca1f3117fe8d524fbb4dfed89692c527c5bd41c..75eeb831ed6a1d5d00fa6659d967f9b0b2ad34ff 100644 (file)
@@ -46,7 +46,8 @@ enum decon_flag_bits {
        BIT_CLKS_ENABLED,
        BIT_IRQS_ENABLED,
        BIT_WIN_UPDATED,
-       BIT_SUSPENDED
+       BIT_SUSPENDED,
+       BIT_REQUEST_UPDATE
 };
 
 struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
                m->crtc_vsync_end = m->crtc_vsync_start + 1;
        }
 
-       decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
-
-       /* enable clock gate */
-       val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
-       writel(val, ctx->addr + DECON_CMU);
-
        if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
                decon_setup_trigger(ctx);
 
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 
        /* window enable */
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
                return;
 
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
        for (i = ctx->first_win; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
 
-       /* standalone update */
-       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+       if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+               decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 
        if (ctx->out_type & IFTYPE_I80)
                set_bit(BIT_WIN_UPDATED, &ctx->flags);
index 0d41ebc4aea63a6a7e101b5439739a8faad989fd..f7bce8603958da2760080fe2bd2b30ff83553d9d 100644 (file)
 #include "i915_drv.h"
 #include "gvt.h"
 
-#define MB_TO_BYTES(mb) ((mb) << 20ULL)
-#define BYTES_TO_MB(b) ((b) >> 20ULL)
-
-#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
-#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
-#define HOST_FENCE 4
-
 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        POSTING_READ(fence_reg_lo);
 }
 
+static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
+{
+       int i;
+
+       for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+               intel_vgpu_write_fence(vgpu, i, 0);
+}
+
 static void free_vgpu_fence(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
+       _clear_vgpu_fence(vgpu);
        for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
                reg = vgpu->fence.regs[i];
-               intel_vgpu_write_fence(vgpu, i, 0);
                list_add_tail(&reg->link,
                              &dev_priv->mm.fence_list);
        }
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
                        continue;
                list_del(pos);
                vgpu->fence.regs[i] = reg;
-               intel_vgpu_write_fence(vgpu, i, 0);
                if (++i == vgpu_fence_sz(vgpu))
                        break;
        }
        if (i != vgpu_fence_sz(vgpu))
                goto out_free_fence;
 
+       _clear_vgpu_fence(vgpu);
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
        intel_runtime_pm_put(dev_priv);
        return 0;
@@ -313,6 +315,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
        free_resource(vgpu);
 }
 
+/**
+ * intel_vgpu_reset_resource - reset resource state owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset resource state owned by a vGPU.
+ *
+ */
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       intel_runtime_pm_get(dev_priv);
+       _clear_vgpu_fence(vgpu);
+       intel_runtime_pm_put(dev_priv);
+}
+
 /**
  * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
  * @vgpu: vGPU
index 711c31c8d8b46c3c51e4f93a741daecf4b28ce31..4a6a2ed65732e1fde39457148165274deda52db6 100644 (file)
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        }
        return 0;
 }
+
+/**
+ * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
+ *
+ * @vgpu: a vGPU
+ * @primary: is the vGPU presented as primary
+ *
+ */
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+                              bool primary)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+       u16 *gmch_ctl;
+       int i;
+
+       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+              info->cfg_space_size);
+
+       if (!primary) {
+               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+       }
+
+       /* Show guest that there isn't any stolen memory.*/
+       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+                                gvt_aperture_pa_base(gvt), true);
+
+       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+                                            | PCI_COMMAND_MEMORY
+                                            | PCI_COMMAND_MASTER);
+       /*
+        * Clear the bar upper 32bit and let guest to assign the new value
+        */
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+               vgpu->cfg_space.bar[i].size = pci_resource_len(
+                                             gvt->dev_priv->drm.pdev, i * 2);
+               vgpu->cfg_space.bar[i].tracked = false;
+       }
+}
+
+/**
+ * intel_vgpu_reset_cfg_space - reset vGPU configuration space
+ *
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
+{
+       u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
+       bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
+                               INTEL_GVT_PCI_CLASS_VGA_OTHER;
+
+       if (cmd & PCI_COMMAND_MEMORY) {
+               trap_gttmmio(vgpu, false);
+               map_aperture(vgpu, false);
+       }
+
+       /**
+        * Currently we only do such reset when vGPU is not
+        * owned by any VM, so we simply restore entire cfg
+        * space to default value.
+        */
+       intel_vgpu_init_cfg_space(vgpu, primary);
+}
index d26a092c70e8c8fe2a14df28dd22e4253f739a16..e4563984cb1e8106cda73e8d0e12f12d10f6a1d2 100644 (file)
@@ -481,7 +481,6 @@ struct parser_exec_state {
        (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 
 static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
 
 /* ring ALL, type = 0 */
 static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
        struct intel_gvt *gvt = s->vgpu->gvt;
 
-       if (bypass_batch_buffer_scan)
-               return 0;
-
        if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
                /* BDW decides privilege based on address space */
                if (cmd_val(s, 0) & (1 << 8))
index f32bb6f6495ce0aafddf35d920298c5d315af9bc..34083731669d8cbe55b94de2e5b3585aa73a7039 100644 (file)
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
 #define get_desc_from_elsp_dwords(ed, i) \
        ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
 
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
-                            unsigned long add, int gmadr_bytes)
-{
-       if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
-               return -1;
-
-       *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
-               BATCH_BUFFER_ADDR_MASK;
-       if (gmadr_bytes == 8) {
-               *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
-                       add & BATCH_BUFFER_ADDR_HIGH_MASK;
-       }
-
-       return 0;
-}
-
 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
-       int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       struct intel_shadow_bb_entry *entry_obj;
 
        /* pin the gem object to ggtt */
-       if (!list_empty(&workload->shadow_bb)) {
-               struct intel_shadow_bb_entry *entry_obj =
-                       list_first_entry(&workload->shadow_bb,
-                                        struct intel_shadow_bb_entry,
-                                        list);
-               struct intel_shadow_bb_entry *temp;
+       list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+               struct i915_vma *vma;
 
-               list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
-                               list) {
-                       struct i915_vma *vma;
-
-                       vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
-                                                      4, 0);
-                       if (IS_ERR(vma)) {
-                               gvt_err("Cannot pin\n");
-                               return;
-                       }
-
-                       /* FIXME: we are not tracking our pinned VMA leaving it
-                        * up to the core to fix up the stray pin_count upon
-                        * free.
-                        */
-
-                       /* update the relocate gma with shadow batch buffer*/
-                       set_gma_to_bb_cmd(entry_obj,
-                                         i915_ggtt_offset(vma),
-                                         gmadr_bytes);
+               vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+               if (IS_ERR(vma)) {
+                       gvt_err("Cannot pin\n");
+                       return;
                }
+
+               /* FIXME: we are not tracking our pinned VMA leaving it
+                * up to the core to fix up the stray pin_count upon
+                * free.
+                */
+
+               /* update the relocate gma with shadow batch buffer*/
+               entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+               if (gmadr_bytes == 8)
+                       entry_obj->bb_start_cmd_va[2] = 0;
        }
 }
 
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
                INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
        }
 
-       vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+       vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
                        sizeof(struct intel_vgpu_workload), 0,
                        SLAB_HWCACHE_ALIGN,
                        NULL);
index 6c5fdf5b2ce2a9d407839a3a28a7e067a5630d8d..47dec4acf7ff12951eb592e2b115953e961f6bdf 100644 (file)
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
-       u64 pte;
 
-#ifdef readq
-       pte = readq(addr);
-#else
-       pte = ioread32(addr);
-       pte |= (u64)ioread32(addr + 4) << 32;
-#endif
-       return pte;
+       return readq(addr);
 }
 
 static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
 
-#ifdef writeq
        writeq(pte, addr);
-#else
-       iowrite32((u32)pte, addr);
-       iowrite32(pte >> 32, addr + 4);
-#endif
+
        I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
                        info->gtt_entry_size;
                mem = kzalloc(mm->has_shadow_page_table ?
                        mm->page_table_entry_size * 2
-                               : mm->page_table_entry_size,
-                       GFP_ATOMIC);
+                               : mm->page_table_entry_size, GFP_KERNEL);
                if (!mem)
                        return -ENOMEM;
                mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
        struct intel_vgpu_mm *mm;
        int ret;
 
-       mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
        if (!mm) {
                ret = -ENOMEM;
                goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        int page_entry_num = GTT_PAGE_SIZE >>
                                vgpu->gvt->device_info.gtt_entry_size_shift;
-       struct page *scratch_pt;
+       void *scratch_pt;
        unsigned long mfn;
        int i;
-       void *p;
 
        if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
                return -EINVAL;
 
-       scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+       scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
        if (!scratch_pt) {
                gvt_err("fail to allocate scratch page\n");
                return -ENOMEM;
        }
 
-       p = kmap_atomic(scratch_pt);
-       mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+       mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
        if (mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
-               kunmap_atomic(p);
-               __free_page(scratch_pt);
+               gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
+               free_page((unsigned long)scratch_pt);
                return -EFAULT;
        }
        gtt->scratch_pt[type].page_mfn = mfn;
-       gtt->scratch_pt[type].page = scratch_pt;
+       gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
        gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
                        vgpu->id, type, mfn);
 
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
         * scratch_pt[type] indicate the scratch pt/scratch page used by the
         * 'type' pt.
         * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
-        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
         * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
         */
        if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
                        se.val64 |= PPAT_CACHED_INDEX;
 
                for (i = 0; i < page_entry_num; i++)
-                       ops->set_entry(p, &se, i, false, 0, vgpu);
+                       ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
        }
 
-       kunmap_atomic(p);
-
        return 0;
 }
 
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
 int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
        int ret;
-       void *page_addr;
+       void *page;
 
        gvt_dbg_core("init gtt\n");
 
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                return -ENODEV;
        }
 
-       gvt->gtt.scratch_ggtt_page =
-               alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
-       if (!gvt->gtt.scratch_ggtt_page) {
+       page = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!page) {
                gvt_err("fail to allocate scratch ggtt page\n");
                return -ENOMEM;
        }
+       gvt->gtt.scratch_ggtt_page = virt_to_page(page);
 
-       page_addr = page_address(gvt->gtt.scratch_ggtt_page);
-
-       gvt->gtt.scratch_ggtt_mfn =
-               intel_gvt_hypervisor_virt_to_mfn(page_addr);
+       gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
        if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
                gvt_err("fail to translate scratch ggtt page\n");
                __free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
        for (offset = 0; offset < num_entries; offset++)
                ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
 }
+
+/**
+ * intel_vgpu_reset_gtt - reset the all GTT related status
+ * @vgpu: a vGPU
+ * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
+ *
+ * This function is called from vfio core to reset reset all
+ * GTT related status, including GGTT, PPGTT, scratch page.
+ *
+ */
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+{
+       int i;
+
+       ppgtt_free_all_shadow_page(vgpu);
+       if (!dmlr)
+               return;
+
+       intel_vgpu_reset_ggtt(vgpu);
+
+       /* clear scratch page for security */
+       for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+               if (vgpu->gtt.scratch_pt[i].page != NULL)
+                       memset(page_address(vgpu->gtt.scratch_pt[i].page),
+                               0, PAGE_SIZE);
+       }
+}
index b315ab3593ec37f2e73faf564a6d6c9fee9e7c81..f88eb5e89bea09f7b6e8aba2e521748d54d28b77 100644 (file)
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
 
 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
 
 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
index 398877c3d2fd98a19ba8f45712ef444a4fcb0896..e6bf5c533fbe5c795a7cef6baef2815aea797ce7 100644 (file)
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
        intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
        intel_gvt_clean_vgpu_types(gvt);
 
+       idr_destroy(&gvt->vgpu_idr);
+
        kfree(dev_priv->gvt);
        dev_priv->gvt = NULL;
 }
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        gvt_dbg_core("init gvt device\n");
 
+       idr_init(&gvt->vgpu_idr);
+
        mutex_init(&gvt->lock);
        gvt->dev_priv = dev_priv;
 
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        ret = intel_gvt_setup_mmio_info(gvt);
        if (ret)
-               return ret;
+               goto out_clean_idr;
 
        ret = intel_gvt_load_firmware(gvt);
        if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
        intel_gvt_free_firmware(gvt);
 out_clean_mmio_info:
        intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+       idr_destroy(&gvt->vgpu_idr);
        kfree(gvt);
        return ret;
 }
index 0af17016f33f24f40d338715e5c78bfac8058e92..e227caf5859ebdfd2c420bc994d42a5734ba4272 100644 (file)
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
 
 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
                              struct intel_vgpu_creation_params *param);
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
                                         struct intel_vgpu_type *type);
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
 
 
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
                             unsigned long *g_index);
 
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+               bool primary);
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
+
 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes);
 
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
 
 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
-int setup_vgpu_mmio(struct intel_vgpu *vgpu);
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 
 struct intel_gvt_ops {
index 522809710312c25767209656133651971dd6a01f..ab2ea157da4cd6ab0a39fbba8bae196806b843c6 100644 (file)
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
 static int new_mmio_info(struct intel_gvt *gvt,
                u32 offset, u32 flags, u32 size,
                u32 addr_mask, u32 ro_mask, u32 device,
-               void *read, void *write)
+               int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
+               int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
 {
        struct intel_gvt_mmio_info *info, *p;
        u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
                default:
                        /*should not hit here*/
                        gvt_err("invalid forcewake offset 0x%x\n", offset);
-                       return 1;
+                       return -EINVAL;
                }
        } else {
                ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
-static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes, unsigned long bitmap)
-{
-       struct intel_gvt_workload_scheduler *scheduler =
-               &vgpu->gvt->scheduler;
-
-       vgpu->resetting = true;
-
-       intel_vgpu_stop_schedule(vgpu);
-       /*
-        * The current_vgpu will set to NULL after stopping the
-        * scheduler when the reset is triggered by current vgpu.
-        */
-       if (scheduler->current_vgpu == NULL) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-       }
-
-       intel_vgpu_reset_execlist(vgpu, bitmap);
-
-       /* full GPU reset */
-       if (bitmap == 0xff) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_vgpu_clean_gtt(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-               setup_vgpu_mmio(vgpu);
-               populate_pvinfo_page(vgpu);
-               intel_vgpu_init_gtt(vgpu);
-       }
-
-       vgpu->resetting = false;
-
-       return 0;
-}
-
 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
+                           void *p_data, unsigned int bytes)
 {
+       unsigned int engine_mask = 0;
        u32 data;
-       u64 bitmap = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
        if (data & GEN6_GRDOM_FULL) {
                gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
-               bitmap = 0xff;
-       }
-       if (data & GEN6_GRDOM_RENDER) {
-               gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
-               bitmap |= (1 << RCS);
-       }
-       if (data & GEN6_GRDOM_MEDIA) {
-               gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
-               bitmap |= (1 << VCS);
-       }
-       if (data & GEN6_GRDOM_BLT) {
-               gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
-               bitmap |= (1 << BCS);
-       }
-       if (data & GEN6_GRDOM_VECS) {
-               gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
-               bitmap |= (1 << VECS);
-       }
-       if (data & GEN8_GRDOM_MEDIA2) {
-               gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
-               if (HAS_BSD2(vgpu->gvt->dev_priv))
-                       bitmap |= (1 << VCS2);
+               engine_mask = ALL_ENGINES;
+       } else {
+               if (data & GEN6_GRDOM_RENDER) {
+                       gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+                       engine_mask |= (1 << RCS);
+               }
+               if (data & GEN6_GRDOM_MEDIA) {
+                       gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+                       engine_mask |= (1 << VCS);
+               }
+               if (data & GEN6_GRDOM_BLT) {
+                       gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+                       engine_mask |= (1 << BCS);
+               }
+               if (data & GEN6_GRDOM_VECS) {
+                       gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+                       engine_mask |= (1 << VECS);
+               }
+               if (data & GEN8_GRDOM_MEDIA2) {
+                       gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+                       if (HAS_BSD2(vgpu->gvt->dev_priv))
+                               engine_mask |= (1 << VCS2);
+               }
        }
-       return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+
+       intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
+
+       return 0;
 }
 
 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
-static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
-       int rc = 0;
        unsigned int id = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                id = VECS;
                break;
        default:
-               rc = -EINVAL;
-               break;
+               return -EINVAL;
        }
        set_bit(id, (void *)vgpu->tlb_handle_pending);
 
-       return rc;
+       return 0;
 }
 
 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
index faaae07ae487277973533bbf907b6eefc2632a48..3f656e3a6e5a79a598934381d7f3a59e09eb8cd7 100644 (file)
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
        return NULL;
 }
 
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
-               char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+                                       struct device *dev, char *buf)
 {
        struct intel_vgpu_type *type;
        unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
                                type->fence);
 }
 
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
 static MDEV_TYPE_ATTR_RO(device_api);
 static MDEV_TYPE_ATTR_RO(description);
 
 static struct attribute *type_attrs[] = {
-       &mdev_type_attr_available_instance.attr,
+       &mdev_type_attr_available_instances.attr,
        &mdev_type_attr_device_api.attr,
        &mdev_type_attr_description.attr,
        NULL,
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        struct intel_vgpu_type *type;
        struct device *pdev;
        void *gvt;
+       int ret;
 
        pdev = mdev_parent_dev(mdev);
        gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        if (!type) {
                gvt_err("failed to find type %s to create\n",
                                                kobject_name(kobj));
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        vgpu = intel_gvt_ops->vgpu_create(gvt, type);
        if (IS_ERR_OR_NULL(vgpu)) {
-               gvt_err("create intel vgpu failed\n");
-               return -EINVAL;
+               ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
+               gvt_err("failed to create intel vgpu: %d\n", ret);
+               goto out;
        }
 
        INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 
        gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
                     dev_name(mdev_dev(mdev)));
-       return 0;
+       ret = 0;
+
+out:
+       return ret;
 }
 
 static int intel_vgpu_remove(struct mdev_device *mdev)
index 09c9450a19462e940eb4df2e71af1e10974ef878..4df078bc5d042b1f4fc411fbb0f98c83a3cba729 100644 (file)
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
        if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
                goto err;
 
-       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-       if (!mmio && !vgpu->mmio.disable_warn_untrack) {
-               gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
-                               vgpu->id, offset, bytes, *(u32 *)p_data);
-
-               if (offset == 0x206c) {
-                       gvt_err("------------------------------------------\n");
-                       gvt_err("vgpu%d: likely triggers a gfx reset\n",
-                       vgpu->id);
-                       gvt_err("------------------------------------------\n");
-                       vgpu->mmio.disable_warn_untrack = true;
-               }
-       }
-
        if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
                if (WARN_ON(!IS_ALIGNED(offset, bytes)))
                        goto err;
        }
 
+       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
        if (mmio) {
                if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
                        if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                                goto err;
                }
                ret = mmio->read(vgpu, offset, p_data, bytes);
-       } else
+       } else {
                ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 
+               if (!vgpu->mmio.disable_warn_untrack) {
+                       gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
+                               vgpu->id, offset, bytes, *(u32 *)p_data);
+
+                       if (offset == 0x206c) {
+                               gvt_err("------------------------------------------\n");
+                               gvt_err("vgpu%d: likely triggers a gfx reset\n",
+                                       vgpu->id);
+                               gvt_err("------------------------------------------\n");
+                               vgpu->mmio.disable_warn_untrack = true;
+                       }
+               }
+       }
+
        if (ret)
                goto err;
 
@@ -302,3 +303,56 @@ err:
        mutex_unlock(&gvt->lock);
        return ret;
 }
+
+
+/**
+ * intel_vgpu_reset_mmio - reset virtual MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+
+       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+       /* set the bit 0:2(Core C-State ) to C0 */
+       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+}
+
+/**
+ * intel_vgpu_init_mmio - init MMIO  space
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
+{
+       const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+
+       vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+       if (!vgpu->mmio.vreg)
+               return -ENOMEM;
+
+       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+       intel_vgpu_reset_mmio(vgpu);
+
+       return 0;
+}
+
+/**
+ * intel_vgpu_clean_mmio - clean MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
+{
+       vfree(vgpu->mmio.vreg);
+       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
index 87d5b5e366a3c97e7b2da79c1b8e66d97823e916..3bc620f56f351e774dc8658c9f06c79d0b24446b 100644 (file)
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
        *offset; \
 })
 
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+
 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
 
 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
index 81cd921770c6db7748ad8e880180c3fb6f4c448a..d9fb41ab71198cb19b1ade4796f687af49444c80 100644 (file)
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
                        vgpu->id))
                return -EINVAL;
 
-       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
-                       GFP_DMA32 | __GFP_ZERO,
-                       INTEL_GVT_OPREGION_PORDER);
+       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+                       __GFP_ZERO,
+                       get_order(INTEL_GVT_OPREGION_SIZE));
 
        if (!vgpu_opregion(vgpu)->va)
                return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
        if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
                map_vgpu_opregion(vgpu, false);
                free_pages((unsigned long)vgpu_opregion(vgpu)->va,
-                               INTEL_GVT_OPREGION_PORDER);
+                               get_order(INTEL_GVT_OPREGION_SIZE));
 
                vgpu_opregion(vgpu)->va = NULL;
        }
index 0dfe789d8f02b64ade88381b2a69fbca09f2862a..fbd023a16f18163d6dcb52bcf795675e3c16a4f7 100644 (file)
@@ -50,8 +50,7 @@
 #define INTEL_GVT_OPREGION_PARM                   0x204
 
 #define INTEL_GVT_OPREGION_PAGES       2
-#define INTEL_GVT_OPREGION_PORDER      1
-#define INTEL_GVT_OPREGION_SIZE                (2 * 4096)
+#define INTEL_GVT_OPREGION_SIZE                (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
 
 #define VGT_SPRSTRIDE(pipe)    _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
 
index 4db24225023520b879b12fce1a924cc236384bd5..e91885dffeff8d76d73f1942a9e3c91b5087639c 100644 (file)
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload;
+       struct intel_vgpu *vgpu;
        int event;
 
        mutex_lock(&gvt->lock);
 
        workload = scheduler->current_workload[ring_id];
+       vgpu = workload->vgpu;
 
-       if (!workload->status && !workload->vgpu->resetting) {
+       if (!workload->status && !vgpu->resetting) {
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
                for_each_set_bit(event, workload->pending_events,
                                 INTEL_GVT_EVENT_MAX)
-                       intel_vgpu_trigger_virtual_event(workload->vgpu,
-                                       event);
+                       intel_vgpu_trigger_virtual_event(vgpu, event);
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
        scheduler->current_workload[ring_id] = NULL;
 
-       atomic_dec(&workload->vgpu->running_workload_num);
-
        list_del_init(&workload->list);
        workload->complete(workload);
 
+       atomic_dec(&vgpu->running_workload_num);
        wake_up(&scheduler->workload_complete_wq);
        mutex_unlock(&gvt->lock);
 }
@@ -459,11 +459,11 @@ complete:
                gvt_dbg_sched("will complete workload %p\n, status: %d\n",
                                workload, workload->status);
 
-               complete_current_workload(gvt, ring_id);
-
                if (workload->req)
                        i915_gem_request_put(fetch_and_zero(&workload->req));
 
+               complete_current_workload(gvt, ring_id);
+
                if (need_force_wake)
                        intel_uncore_forcewake_put(gvt->dev_priv,
                                        FORCEWAKE_ALL);
index 3b30c28bff515f0fd1ce7e385ea89d6f17ccc834..2833dfa8c9aed8e9b6c8f86bbd2ad9a45c461b0c 100644 (file)
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
        struct drm_i915_gem_object *obj;
        void *va;
        unsigned long len;
-       void *bb_start_cmd_va;
+       u32 *bb_start_cmd_va;
 };
 
 #define workload_q_head(vgpu, ring_id) \
index 536d2b9d577732f57a1775f54bdb4a7bce8a7e39..7295bc8e12fb240eeaf6f9434d2bba713453ad3c 100644 (file)
 #include "gvt.h"
 #include "i915_pvinfo.h"
 
-static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       vfree(vgpu->mmio.vreg);
-       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
-}
-
-int setup_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-
-       if (vgpu->mmio.vreg)
-               memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
-       else {
-               vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
-               if (!vgpu->mmio.vreg)
-                       return -ENOMEM;
-       }
-
-       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
-       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
-       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
-
-       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
-
-       /* set the bit 0:2(Core C-State ) to C0 */
-       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
-       return 0;
-}
-
-static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
-       struct intel_vgpu_creation_params *param)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-       u16 *gmch_ctl;
-       int i;
-
-       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
-              info->cfg_space_size);
-
-       if (!param->primary) {
-               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-       }
-
-       /* Show guest that there isn't any stolen memory.*/
-       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
-       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
-
-       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
-                                gvt_aperture_pa_base(gvt), true);
-
-       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
-                                            | PCI_COMMAND_MEMORY
-                                            | PCI_COMMAND_MASTER);
-       /*
-        * Clear the bar upper 32bit and let guest to assign the new value
-        */
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
-
-       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
-               vgpu->cfg_space.bar[i].size = pci_resource_len(
-                                             gvt->dev_priv->drm.pdev, i * 2);
-               vgpu->cfg_space.bar[i].tracked = false;
-       }
-}
-
 void populate_pvinfo_page(struct intel_vgpu *vgpu)
 {
        /* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
                if (low_avail / min_low == 0)
                        break;
                gvt->types[i].low_gm_size = min_low;
-               gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+               gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
                gvt->types[i].fence = 4;
                gvt->types[i].max_instance = low_avail / min_low;
                gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
         */
        low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
                gvt->gm.vgpu_allocated_low_gm_size;
-       high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+       high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
                gvt->gm.vgpu_allocated_high_gm_size;
        fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
                gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_clean_gtt(vgpu);
        intel_gvt_hypervisor_detach_vgpu(vgpu);
        intel_vgpu_free_resource(vgpu);
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
        vfree(vgpu);
 
        intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->gvt = gvt;
        bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
 
-       setup_vgpu_cfg_space(vgpu, param);
+       intel_vgpu_init_cfg_space(vgpu, param->primary);
 
-       ret = setup_vgpu_mmio(vgpu);
+       ret = intel_vgpu_init_mmio(vgpu);
        if (ret)
-               goto out_free_vgpu;
+               goto out_clean_idr;
 
        ret = intel_vgpu_alloc_resource(vgpu, param);
        if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
 out_clean_vgpu_resource:
        intel_vgpu_free_resource(vgpu);
 out_clean_vgpu_mmio:
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
+out_clean_idr:
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
 out_free_vgpu:
        vfree(vgpu);
        mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 }
 
 /**
- * intel_gvt_reset_vgpu - reset a virtual GPU
+ * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
+ * @vgpu: virtual GPU
+ * @dmlr: vGPU Device Model Level Reset or GT Reset
+ * @engine_mask: engines to reset for GT reset
+ *
+ * This function is called when user wants to reset a virtual GPU through
+ * device model reset or GT reset. The caller should hold the gvt lock.
+ *
+ * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
+ * the whole vGPU to default state as when it is created. This vGPU function
+ * is required both for functionary and security concerns.The ultimate goal
+ * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
+ * assign a vGPU to a virtual machine we must isse such reset first.
+ *
+ * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
+ * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
+ * Unlike the FLR, GT reset only reset particular resource of a vGPU per
+ * the reset request. Guest driver can issue a GT reset by programming the
+ * virtual GDRST register to reset specific virtual GPU engine or all
+ * engines.
+ *
+ * The parameter dev_level is to identify if we will do DMLR or GT reset.
+ * The parameter engine_mask is to specific the engines that need to be
+ * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * the caller requests a full GT reset that we will reset all virtual
+ * GPU engines. For FLR, engine_mask is ignored.
+ */
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+       gvt_dbg_core("------------------------------------------\n");
+       gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
+                    vgpu->id, dmlr, engine_mask);
+       vgpu->resetting = true;
+
+       intel_vgpu_stop_schedule(vgpu);
+       /*
+        * The current_vgpu will set to NULL after stopping the
+        * scheduler when the reset is triggered by current vgpu.
+        */
+       if (scheduler->current_vgpu == NULL) {
+               mutex_unlock(&gvt->lock);
+               intel_gvt_wait_vgpu_idle(vgpu);
+               mutex_lock(&gvt->lock);
+       }
+
+       intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+
+       /* full GPU reset or device model level reset */
+       if (engine_mask == ALL_ENGINES || dmlr) {
+               intel_vgpu_reset_gtt(vgpu, dmlr);
+               intel_vgpu_reset_resource(vgpu);
+               intel_vgpu_reset_mmio(vgpu);
+               populate_pvinfo_page(vgpu);
+
+               if (dmlr)
+                       intel_vgpu_reset_cfg_space(vgpu);
+       }
+
+       vgpu->resetting = false;
+       gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
+       gvt_dbg_core("------------------------------------------\n");
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
  * @vgpu: virtual GPU
  *
  * This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
  */
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->gvt->lock);
+       intel_gvt_reset_vgpu_locked(vgpu, true, 0);
+       mutex_unlock(&vgpu->gvt->lock);
 }
index 445fec9c2841ad61282e538d4cfdbe1436e04fb3..b2c4a0b8a627e39c5828922083b78dfd667ad047 100644 (file)
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
        assert_forcewakes_inactive(dev_priv);
 
-       if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_poll_init(dev_priv);
 
        DRM_DEBUG_KMS("Device suspended\n");
index 243224aeabf82f111ab14c4d6995ce868c0d36c1..69bc3b0c43905eccf19ad142cf32c7064a09feb0 100644 (file)
@@ -1977,6 +1977,11 @@ struct drm_i915_private {
 
        struct i915_frontbuffer_tracking fb_tracking;
 
+       struct intel_atomic_helper {
+               struct llist_head free_list;
+               struct work_struct free_work;
+       } atomic_helper;
+
        u16 orig_clock;
 
        bool mchbar_need_disable;
index 3dd7fc662859a90803b142a8adf7f542f29c5948..4b23a78147135d2b056fb904eb0c2a2d9359f975 100644 (file)
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
                     struct drm_file *file)
 {
-       struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
        char __user *user_data = u64_to_user_ptr(args->data_ptr);
-       int ret;
 
        /* We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-       ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE |
-                                  I915_WAIT_LOCKED |
-                                  I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file));
-       if (ret)
-               return ret;
-
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
-               unsigned long unwritten;
-
-               /* The physical object once assigned is fixed for the lifetime
-                * of the obj, so we can safely drop the lock and continue
-                * to access vaddr.
-                */
-               mutex_unlock(&dev->struct_mutex);
-               unwritten = copy_from_user(vaddr, user_data, args->size);
-               mutex_lock(&dev->struct_mutex);
-               if (unwritten) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-       }
+       if (copy_from_user(vaddr, user_data, args->size))
+               return -EFAULT;
 
        drm_clflush_virt_range(vaddr, args->size);
-       i915_gem_chipset_flush(to_i915(dev));
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
 
-out:
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
-       return ret;
+       return 0;
 }
 
 void *i915_gem_object_alloc(struct drm_device *dev)
index bd08814b015cb238c639e2becec6917901890d4e..d534a316a16ee412420c7e5cc01fb37c03755480 100644 (file)
@@ -199,6 +199,7 @@ found:
        }
 
        /* Unbinding will emit any required flushes */
+       ret = 0;
        while (!list_empty(&eviction_list)) {
                vma = list_first_entry(&eviction_list,
                                       struct i915_vma,
index a792dcb902b51d337f46f2c1ad7ad1ea737f4c8a..e924a95160796d1c8aa708e1c9ac0312b1015eb8 100644 (file)
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                        return ret;
        }
 
+       trace_i915_vma_bind(vma, bind_flags);
        ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
index 86ecec5601d42dd59f937f7c8c2203b3aeb105bc..588470eb8d395df2719fe2e172f93f414ba88e92 100644 (file)
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
        struct edid *edid;
        struct i2c_adapter *i2c;
+       bool ret = false;
 
        BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
                 */
                if (!is_digital) {
                        DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-                       return true;
+                       ret = true;
+               } else {
+                       DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
                }
-
-               DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
        } else {
                DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
        }
 
        kfree(edid);
 
-       return false;
+       return ret;
 }
 
 static enum drm_connector_status
index 3dc8724df4004842c78bc985606da52a06e449c0..77f7b1d849a4e3fe28f321c133efc44383a2e0b7 100644 (file)
@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                         * We only keep the x/y offsets, so push all of the
                         * gtt offset into the x/y offsets.
                         */
-                       _intel_adjust_tile_offset(&x, &y, tile_size,
-                                                 tile_width, tile_height, pitch_tiles,
+                       _intel_adjust_tile_offset(&x, &y,
+                                                 tile_width, tile_height,
+                                                 tile_size, pitch_tiles,
                                                  gtt_offset_rotated * tile_size, 0);
 
                        gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2967,6 +2968,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
        unsigned int rotation = plane_state->base.rotation;
        int ret;
 
+       if (!plane_state->base.visible)
+               return 0;
+
        /* Rotate src coordinates to match rotated GTT view */
        if (drm_rotation_90_or_270(rotation))
                drm_rect_rotate(&plane_state->base.src,
@@ -6846,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        }
 
        state = drm_atomic_state_alloc(crtc->dev);
+       if (!state) {
+               DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+                             crtc->base.id, crtc->name);
+               return;
+       }
+
        state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
 
        /* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11253,7 @@ found:
        }
 
        old->restore_state = restore_state;
+       drm_atomic_state_put(state);
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -14512,8 +14523,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
                break;
 
        case FENCE_FREE:
-               drm_atomic_state_put(&state->base);
-               break;
+               {
+                       struct intel_atomic_helper *helper =
+                               &to_i915(state->base.dev)->atomic_helper;
+
+                       if (llist_add(&state->freed, &helper->free_list))
+                               schedule_work(&helper->free_work);
+                       break;
+               }
        }
 
        return NOTIFY_DONE;
@@ -16392,6 +16409,18 @@ fail:
        drm_modeset_acquire_fini(&ctx);
 }
 
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+}
+
 int intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev)
 
        dev->mode_config.funcs = &intel_mode_funcs;
 
+       INIT_WORK(&dev_priv->atomic_helper.free_work,
+                 intel_atomic_helper_free_state);
+
        intel_init_quirks(dev);
 
        intel_init_pm(dev_priv);
@@ -17024,7 +17056,8 @@ void intel_display_resume(struct drm_device *dev)
 
        if (ret)
                DRM_ERROR("Restoring old state failed with %i\n", ret);
-       drm_atomic_state_put(state);
+       if (state)
+               drm_atomic_state_put(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -17094,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
+       flush_work(&dev_priv->atomic_helper.free_work);
+       WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
        intel_disable_gt_powersave(dev_priv);
 
        /*
index cd132c216a67decc86e4b1fbc22d544a8230b0ad..cd72ae171eeb673de11f1ca6dcc6f1ab06fde81c 100644 (file)
@@ -370,6 +370,8 @@ struct intel_atomic_state {
        struct skl_wm_values wm_results;
 
        struct i915_sw_fence commit_ready;
+
+       struct llist_node freed;
 };
 
 struct intel_plane_state {
index beb08982dc0b5c3ea9711de7220c383e6ea41b92..8cf2d80f22540a35dc4245a842d8473ce7d852d9 100644 (file)
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
+       if (!ifbdev)
+               return;
+
        ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
 }
 
index d4961fa20c73d0e2d390673889ae5fa82f04dd07..beabc17e7c8af1a08e7dff250186b32e49f717a5 100644 (file)
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *batch,
                                                uint32_t index)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
-       /*
-        * WaDisableLSQCROPERFforOCL:kbl
-        * This WA is implemented in skl_init_clock_gating() but since
-        * this batch updates GEN8_L3SQCREG4 with default value we need to
-        * set this bit here to retain the WA during flush.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
-
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
index aeb637dc1fdf490caeb87f7f1938357916082ab8..91cb4c422ad5d52a5d7475e408dff744feba42d4 100644 (file)
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE);
 
-       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
-        * involving this register should also be added to WA batch as required.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               /* WaDisableLSQCROPERFforOCL:kbl */
-               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                          GEN8_LQSC_RO_PERF_DIS);
-
        /* WaToEnableHwFixForPushConstHWBug:kbl */
        if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
index 14ff87686a36ffb2580353bde1f0126964c2f8dc..686a580c711a99bfe61b5867e5adc75891e5e0a0 100644 (file)
@@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 {
        struct adreno_platform_config *config = pdev->dev.platform_data;
        struct msm_gpu *gpu = &adreno_gpu->base;
-       struct msm_mmu *mmu;
        int ret;
 
        adreno_gpu->funcs = funcs;
@@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       mmu = gpu->aspace->mmu;
-       if (mmu) {
+       if (gpu->aspace && gpu->aspace->mmu) {
+               struct msm_mmu *mmu = gpu->aspace->mmu;
                ret = mmu->funcs->attach(mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
                if (ret)
index 5f6cd8745dbce78d7fec38bb6fc4b37e89c57457..c396d459a9d062769471fdc84d01120c8d1e8525 100644 (file)
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
 
 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
-       int i;
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct drm_plane *plane;
-       struct drm_plane_state *plane_state;
-
-       for_each_plane_in_state(state, plane, plane_state, i)
-               mdp5_plane_complete_commit(plane, plane_state);
 
        if (mdp5_kms->smp)
                mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
index 17b0cc10117109bbc25a27de6a5e74d9b6f6c479..cdfc63d90c7b4bf4b7f1f116b410c347560663ef 100644 (file)
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
 
        /* assigned by crtc blender */
        enum mdp_mixer_stage_id stage;
-
-       bool pending : 1;
 };
 #define to_mdp5_plane_state(x) \
                container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
 
index c099da7bc212d52f9bdf2d42f50da98e359cf53f..25d9d0a97156765918643b7cdd440b6108d3c5ed 100644 (file)
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
        drm_printf(p, "\tzpos=%u\n", pstate->zpos);
        drm_printf(p, "\talpha=%u\n", pstate->alpha);
        drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
-       drm_printf(p, "\tpending=%u\n", pstate->pending);
 }
 
 static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
        if (mdp5_state && mdp5_state->base.fb)
                drm_framebuffer_reference(mdp5_state->base.fb);
 
-       mdp5_state->pending = false;
-
        return &mdp5_state->base;
 }
 
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
        DBG("%s: check (%d -> %d)", plane->name,
                        plane_enabled(old_state), plane_enabled(state));
 
-       /* We don't allow faster-than-vblank updates.. if we did add this
-        * some day, we would need to disallow in cases where hwpipe
-        * changes
-        */
-       if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
-               return -EBUSY;
-
        max_width = config->hw->lm.max_width << 16;
        max_height = config->hw->lm.max_height << 16;
 
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
                                     struct drm_plane_state *old_state)
 {
        struct drm_plane_state *state = plane->state;
-       struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
 
        DBG("%s: update", plane->name);
 
-       mdp5_state->pending = true;
-
        if (plane_enabled(state)) {
                int ret;
 
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
        return pstate->hwpipe->flush_mask;
 }
 
-/* called after vsync in thread context */
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state)
-{
-       struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
-
-       pstate->pending = false;
-}
-
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
 {
index d8bc59c7e26142b377623a6736c575b30f6eeea2..8098677a39167f51f5582893adf3694023c85768 100644 (file)
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+               if (!priv->aspace[id])
+                       continue;
                msm_gem_unmap_vma(priv->aspace[id],
                                &msm_obj->domain[id], msm_obj->sgt);
        }
index cef08da1da4e0bcb6a22fa73033562cdeea55006..6a157763dfc38f672f2b9384792679faf83b95d4 100644 (file)
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
                return ret;
 
        /* enable polling for external displays */
-       drm_kms_helper_poll_enable(dev);
+       if (!dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(dev);
 
        /* enable hotplug interrupts */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
index 59348fc41c77b0edf725e01655fe3ef1722a4342..bc85a45f91cde756d9763e34108ab53c711b9bd4 100644 (file)
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        pci_set_master(pdev);
 
        ret = nouveau_do_resume(drm_dev, true);
-       drm_kms_helper_poll_enable(drm_dev);
+
+       if (!drm_dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(drm_dev);
+
        /* do magic */
        nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
index 8d5ed5bfdacb1d6c59e9db9df47a74be41462a9a..42c1fa53d4314f195b637c3de4323c86b095ae0f 100644 (file)
@@ -165,6 +165,8 @@ struct nouveau_drm {
        struct backlight_device *backlight;
        struct list_head bl_connectors;
        struct work_struct hpd_work;
+       struct work_struct fbcon_work;
+       int fbcon_new_state;
 #ifdef CONFIG_ACPI
        struct notifier_block acpi_nb;
 #endif
index 2f2a3dcd4ad777addbdcbfbb8cdc14922c7082eb..fa2d0a978cccbaac7a5640b5b6911779b6e8328f 100644 (file)
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
        .fb_probe = nouveau_fbcon_create,
 };
 
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+       struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+       int state = READ_ONCE(drm->fbcon_new_state);
+
+       if (state == FBINFO_STATE_RUNNING)
+               pm_runtime_get_sync(drm->dev->dev);
+
+       console_lock();
+       if (state == FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_restore(drm->dev);
+       drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+       if (state != FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_save_disable(drm->dev);
+       console_unlock();
+
+       if (state == FBINFO_STATE_RUNNING) {
+               pm_runtime_mark_last_busy(drm->dev->dev);
+               pm_runtime_put_sync(drm->dev->dev);
+       }
+}
+
 void
 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
-               console_lock();
-               if (state == FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_restore(dev);
-               drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-               if (state != FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_save_disable(dev);
-               console_unlock();
-       }
+
+       if (!drm->fbcon)
+               return;
+
+       drm->fbcon_new_state = state;
+       /* Since runtime resume can happen as a result of a sysfs operation,
+        * it's possible we already have the console locked. So handle fbcon
+        * init/deinit from a seperate work thread
+        */
+       schedule_work(&drm->fbcon_work);
 }
 
 int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
                return -ENOMEM;
 
        drm->fbcon = fbcon;
+       INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index 00ea0002b539b9e9b5b0a063f62deb3b7638fd56..e0c143b865f39cb36074b5e524638530cadeede9 100644 (file)
@@ -366,11 +366,10 @@ static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
        /* if we are running in a VM, make sure the device
-        * torn down properly on reboot/shutdown.
-        * unfortunately we can't detect certain
-        * hypervisors so just do this all the time.
+        * torn down properly on reboot/shutdown
         */
-       radeon_pci_remove(pdev);
+       if (radeon_device_is_virtual())
+               radeon_pci_remove(pdev);
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index e8a38d29685547a69fb1d657e833cd9cf5637cff..414776811e71e0a4e9f7c97309b6c173c8f502a3 100644 (file)
@@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
 static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev)
        int err;
        int new_fw = 0;
        bool new_smc = false;
+       bool si58_fw = false;
+       bool banks2_fw = false;
 
        DRM_DEBUG("\n");
 
@@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev)
                     ((rdev->pdev->device == 0x6660) ||
                      (rdev->pdev->device == 0x6663) ||
                      (rdev->pdev->device == 0x6665) ||
-                     (rdev->pdev->device == 0x6667))) ||
-                   ((rdev->pdev->revision == 0xc3) &&
-                    (rdev->pdev->device == 0x6665)))
+                     (rdev->pdev->device == 0x6667))))
                        new_smc = true;
+               else if ((rdev->pdev->revision == 0xc3) &&
+                        (rdev->pdev->device == 0x6665))
+                       banks2_fw = true;
                new_chip_name = "hainan";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
        default: BUG();
        }
 
+       /* this memory configuration requires special firmware */
+       if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               si58_fw = true;
+
        DRM_INFO("Loading %s Microcode\n", new_chip_name);
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+       if (si58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
        err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
        if (err) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       if (new_smc)
+       if (banks2_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+       else if (new_smc)
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
        else
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
index 13ba73fd9b68849bd34d27207eb282eb6ff62793..2944916f7102ae0395d11157f366c199d398076d 100644 (file)
@@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6817) ||
                    (rdev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (rdev->family == CHIP_OLAND) {
-               if ((rdev->pdev->revision == 0xC7) ||
-                   (rdev->pdev->revision == 0x80) ||
-                   (rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6604) ||
-                   (rdev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (rdev->family == CHIP_HAINAN) {
                if ((rdev->pdev->revision == 0x81) ||
                    (rdev->pdev->revision == 0x83) ||
@@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6665) ||
                    (rdev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
index a0fd3e66bc4b39c1f5092c340ad2c705bbf431f3..7aadce1f7e7a0e56b302bded53ad8c0d5977cc22 100644 (file)
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
 
        }
 
-       __drm_atomic_helper_crtc_destroy_state(state);
+       drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
index db920771bfb5641c9d5d8e53a8f73cf7ee501bb7..ab3016982466c3ca35ba479050ee107d26eb50ac 100644 (file)
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
                                          args->shader_rec_count);
        struct vc4_bo *bo;
 
-       if (uniforms_offset < shader_rec_offset ||
+       if (shader_rec_offset < args->bin_cl_size ||
+           uniforms_offset < shader_rec_offset ||
            exec_size < uniforms_offset ||
            args->shader_rec_count >= (UINT_MAX /
                                          sizeof(struct vc4_shader_state)) ||
            temp_size < exec_size) {
                DRM_ERROR("overflow in exec arguments\n");
+               ret = -EINVAL;
                goto fail;
        }
 
index 08886a3097577242f5c9e025fd6446d81bcc6dec..5cdd003605f57c99faf31832e3f3dd38a75b7402 100644 (file)
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
                }
 
                ret = vc4_full_res_bounds_check(exec, *obj, surf);
-               if (!ret)
+               if (ret)
                        return ret;
 
                return 0;
index dd21f950e129d4f96d71120d64e3ab499645333d..cde9f37581064828809555bb4808c38f7497d94e 100644 (file)
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
        info->fbops = &virtio_gpufb_ops;
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
-       info->screen_base = obj->vmap;
+       info->screen_buffer = obj->vmap;
        info->screen_size = obj->gem_base.size;
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &vfbdev->helper,
index 717704e9ae07bd97d64f38321d2d49f07de62b83..c0303f61c26a94f1998f6883d42a0fc8cb41f432 100644 (file)
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        int brightness;
-       char data[8];
+       char *data;
+
+       data = kmalloc(8, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_STATUS,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 8,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 5) {
                dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
        brightness = data[4];
        if (brightness < 0 || brightness > 3) {
                dev_warn(dev,
                         "Read invalid backlight brightness: %02hhx.\n",
                         data[4]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
-       return brightness;
+       ret = brightness;
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        const char *macro_mode;
-       char data[8];
+       char *data;
+
+       data = kmalloc(2, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_GET_MODE,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 2,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 1) {
                dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
        default:
                dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
                         data[0]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+       ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        int current_profile;
-       char data[8];
+       char *data;
+
+       data = kmalloc(8, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_STATUS,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 8,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 8) {
                dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
        current_profile = data[7];
        if (current_profile < 1 || current_profile > 3) {
                dev_warn(dev, "Read invalid current profile: %02hhx.\n",
                         data[7]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+       ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static ssize_t k90_store_current_profile(struct device *dev,
index b9779bcbd1403f00114f9565c543df79583baa38..8aeca038cc7331244eeeb5dc0468b22ef66e7d44 100644 (file)
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
                return retval;
        }
 
+       if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+               wacom_wac->shared->touch = hdev;
+       else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+               wacom_wac->shared->pen = hdev;
+
 out:
        mutex_unlock(&wacom_udev_list_lock);
        return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
        if (error)
                goto fail;
 
-       error = wacom_add_shared_data(hdev);
-       if (error)
-               goto fail;
-
        /*
         * Bamboo Pad has a generic hid handling for the Pen, and we switch it
         * into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 
        wacom_update_name(wacom, wireless ? " (WL)" : "");
 
-       if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
-               wacom_wac->shared->touch = hdev;
-       else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
-               wacom_wac->shared->pen = hdev;
+       error = wacom_add_shared_data(hdev);
+       if (error)
+               goto fail;
 
        if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
             (features->quirks & WACOM_QUIRK_BATTERY)) {
index b1a9a3ca6d564c72d3f445e663b196af87743ed1..0884dc9554fdf632e684aa3689292368d5fb7e3b 100644 (file)
@@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
 
        wacom_report_events(hdev, report);
 
+       /*
+        * Non-input reports may be sent prior to the device being
+        * completely initialized. Since only their events need
+        * to be processed, exit after 'wacom_report_events' has
+        * been called to prevent potential crashes in the report-
+        * processing functions.
+        */
+       if (report->type != HID_INPUT_REPORT)
+               return;
+
        if (WACOM_PAD_FIELD(field)) {
                wacom_wac_pad_battery_report(hdev, report);
                if (wacom->wacom_wac.pad_input)
index e7dcfac877ca2eb7b86601a9115b4c2fc27f4c01..3e70a9c5d79d5a50ba3be228cb4174d1f66c98d7 100644 (file)
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
        if (!src_addr || !src_addr->sa_family) {
                src_addr = (struct sockaddr *) &id->route.addr.src_addr;
                src_addr->sa_family = dst_addr->sa_family;
-               if (dst_addr->sa_family == AF_INET6) {
+               if (IS_ENABLED(CONFIG_IPV6) &&
+                   dst_addr->sa_family == AF_INET6) {
                        struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
                        struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
                        src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
index 1e62a5f0cb28203e0732b9915840fd9f02d45701..4609b921f899c9d7481b86825f18fe076a6f732c 100644 (file)
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
 
        if (access & IB_ACCESS_ON_DEMAND) {
+               put_pid(umem->pid);
                ret = ib_umem_odp_get(context, umem);
                if (ret) {
                        kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
        if (!page_list) {
+               put_pid(umem->pid);
                kfree(umem);
                return ERR_PTR(-ENOMEM);
        }
index 9d5fe1853da46e54e9d635c5d101b688f4a8119a..6262dc035f3cea4c9613d96f67ec13e76a18643e 100644 (file)
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
index f1510cc76d2dbe7027e81f5495f53bd7b6588536..9398143d7c5e93a01d5b35c67ae0fdd4e0126483 100644 (file)
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        skb_trim(skb, dlen);
        mutex_lock(&ep->com.mutex);
 
-       /* update RX credits */
-       update_rx_credits(ep, dlen);
-
        switch (ep->com.state) {
        case MPA_REQ_SENT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_reply(ep, skb);
                break;
        case MPA_REQ_WAIT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_request(ep, skb);
                break;
        case FPDU_MODE: {
                struct c4iw_qp_attributes attrs;
+
+               update_rx_credits(ep, dlen);
                BUG_ON(!ep->com.qp);
                if (status)
                        pr_err("%s Unexpected streaming data." \
index 19c6477af19f1416d17c15363e239307542b438d..bec82a600d77c7990432c756ecb8124df6a52852 100644 (file)
@@ -504,6 +504,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
                goto skip_cqe;
        }
 
+       /*
+        * Special cqe for drain WR completions...
+        */
+       if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+               *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+               *cqe = *hw_cqe;
+               goto skip_cqe;
+       }
+
        /*
         * Gotta tweak READ completions:
         *      1) the cqe doesn't contain the sq_wptr from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                                c4iw_invalidate_mr(qhp->rhp,
                                                   CQE_WRID_FR_STAG(&cqe));
                        break;
+               case C4IW_DRAIN_OPCODE:
+                       wc->opcode = IB_WC_SEND;
+                       break;
                default:
                        printk(KERN_ERR MOD "Unexpected opcode %d "
                               "in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                }
        }
 out:
-       if (wq) {
-               if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
-                       if (t4_sq_empty(wq))
-                               complete(&qhp->sq_drained);
-                       if (t4_rq_empty(wq))
-                               complete(&qhp->rq_drained);
-               }
+       if (wq)
                spin_unlock(&qhp->lock);
-       }
        return ret;
 }
 
index 516b0ae6dc3f6d061cc3c7d8ba412196785a599c..40c0e7b9fc6e4e671eb4fb03682f9130594c90cb 100644 (file)
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                }
        }
 
+       rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+       if (!rdev->free_workq) {
+               err = -ENOMEM;
+               goto err_free_status_page;
+       }
+
        rdev->status_page->db_off = 0;
 
        return 0;
+err_free_status_page:
+       free_page((unsigned long)rdev->status_page);
 destroy_ocqp_pool:
        c4iw_ocqp_pool_destroy(rdev);
 destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
+       destroy_workqueue(rdev->free_workq);
        kfree(rdev->wr_log);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
index 4788e1a46fdee23cce2956cc17ba8d09b0f3eb56..8cd4d054a87ed0ea27f323d47f82fdc7b3e10bff 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/kref.h>
 #include <linux/timer.h>
 #include <linux/io.h>
+#include <linux/workqueue.h>
 
 #include <asm/byteorder.h>
 
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
        struct list_head qpids;
        struct list_head cqids;
        struct mutex lock;
+       struct kref kref;
 };
 
 enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
        atomic_t wr_log_idx;
        struct wr_log_entry *wr_log;
        int wr_log_size;
+       struct workqueue_struct *free_workq;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
        wait_queue_head_t wait;
        struct timer_list timer;
        int sq_sig_all;
-       struct completion rq_drained;
-       struct completion sq_drained;
+       struct work_struct free_work;
+       struct c4iw_ucontext *ucontext;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
        u32 key;
        spinlock_t mmap_lock;
        struct list_head mmaps;
+       struct kref kref;
 };
 
 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
        return container_of(c, struct c4iw_ucontext, ibucontext);
 }
 
+void _c4iw_free_ucontext(struct kref *kref);
+
+static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_put(&ucontext->kref, _c4iw_free_ucontext);
+}
+
+static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_get(&ucontext->kref);
+}
+
 struct c4iw_mm_entry {
        struct list_head entry;
        u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
        return IB_QPS_ERR;
 }
 
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
-void c4iw_drain_rq(struct ib_qp *qp);
-void c4iw_drain_sq(struct ib_qp *qp);
 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
 
 #endif
index 49b51b7e0fd786bf49dc2187e6c661f14087f3ae..3345e1c312f771cfaa8e31858624ca9892267467 100644 (file)
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
        return -ENOSYS;
 }
 
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+void _c4iw_free_ucontext(struct kref *kref)
 {
-       struct c4iw_dev *rhp = to_c4iw_dev(context->device);
-       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_dev *rhp;
        struct c4iw_mm_entry *mm, *tmp;
 
-       PDBG("%s context %p\n", __func__, context);
+       ucontext = container_of(kref, struct c4iw_ucontext, kref);
+       rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+       PDBG("%s ucontext %p\n", __func__, ucontext);
        list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
                kfree(mm);
        c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
        kfree(ucontext);
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+
+       PDBG("%s context %p\n", __func__, context);
+       c4iw_put_ucontext(ucontext);
        return 0;
 }
 
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
        c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
        INIT_LIST_HEAD(&context->mmaps);
        spin_lock_init(&context->mmap_lock);
+       kref_init(&context->kref);
 
        if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
                if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
        dev->ibdev.get_port_immutable = c4iw_port_immutable;
        dev->ibdev.get_dev_fw_str = get_dev_fw_str;
-       dev->ibdev.drain_sq = c4iw_drain_sq;
-       dev->ibdev.drain_rq = c4iw_drain_rq;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index cda5542e13a206347447a49f18f9e8cb930e7c8c..04c1c382dedb42bda631f3c62c80395df848a3c9 100644 (file)
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
        return 0;
 }
 
-static void _free_qp(struct kref *kref)
+static void free_qp_work(struct work_struct *work)
+{
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_qp *qhp;
+       struct c4iw_dev *rhp;
+
+       qhp = container_of(work, struct c4iw_qp, free_work);
+       ucontext = qhp->ucontext;
+       rhp = qhp->rhp;
+
+       PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+       destroy_qp(&rhp->rdev, &qhp->wq,
+                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+       if (ucontext)
+               c4iw_put_ucontext(ucontext);
+       kfree(qhp);
+}
+
+static void queue_qp_free(struct kref *kref)
 {
        struct c4iw_qp *qhp;
 
        qhp = container_of(kref, struct c4iw_qp, kref);
        PDBG("%s qhp %p\n", __func__, qhp);
-       kfree(qhp);
+       queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
 }
 
 void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
 void c4iw_qp_rem_ref(struct ib_qp *qp)
 {
        PDBG("%s ib_qp %p\n", __func__, qp);
-       kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
+       kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
 }
 
 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
        return 0;
 }
 
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *schp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       schp = to_c4iw_cq(qhp->ibqp.send_cq);
+       cq = &schp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(1) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&schp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&schp->lock, flag);
+
+       spin_lock_irqsave(&schp->comp_handler_lock, flag);
+       (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                  schp->ibcq.cq_context);
+       spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *rchp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+       cq = &rchp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(0) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&rchp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&rchp->lock, flag);
+
+       spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+       (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+                                  rchp->ibcq.cq_context);
+       spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                   struct ib_send_wr **bad_wr)
 {
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_sq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_rq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_rq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                }
                break;
        case C4IW_QP_STATE_CLOSING:
-               if (!internal) {
+
+               /*
+                * Allow kernel users to move to ERROR for qp draining.
+                */
+               if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+                                 C4IW_QP_STATE_ERROR)) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        struct c4iw_dev *rhp;
        struct c4iw_qp *qhp;
        struct c4iw_qp_attributes attrs;
-       struct c4iw_ucontext *ucontext;
 
        qhp = to_c4iw_qp(ib_qp);
        rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        spin_unlock_irq(&rhp->lock);
        free_ird(rhp, qhp->attr.max_ird);
 
-       ucontext = ib_qp->uobject ?
-                  to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
-       destroy_qp(&rhp->rdev, &qhp->wq,
-                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
        c4iw_qp_rem_ref(ib_qp);
 
        PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.max_ird = 0;
        qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        spin_lock_init(&qhp->lock);
-       init_completion(&qhp->sq_drained);
-       init_completion(&qhp->rq_drained);
        mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
        kref_init(&qhp->kref);
+       INIT_WORK(&qhp->free_work, free_qp_work);
 
        ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
        if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                        ma_sync_key_mm->len = PAGE_SIZE;
                        insert_mmap(ucontext, ma_sync_key_mm);
                }
+
+               c4iw_get_ucontext(ucontext);
+               qhp->ucontext = ucontext;
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
 }
-
-static void move_qp_to_err(struct c4iw_qp *qp)
-{
-       struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
-
-       (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-}
-
-void c4iw_drain_sq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_sq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->sq_drained);
-}
-
-void c4iw_drain_rq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_rq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->rq_drained);
-}
index 862381aa83c824bb8712e408df39e49f47f11975..640d22148a3eeb86fdc2d5c795dcf02271bf57fb 100644 (file)
@@ -179,6 +179,7 @@ struct t4_cqe {
                        __be32 wrid_hi;
                        __be32 wrid_low;
                } gen;
+               u64 drain_cookie;
        } u;
        __be64 reserved;
        __be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
 /* generic accessor macros */
 #define CQE_WRID_HI(x)         (be32_to_cpu((x)->u.gen.wrid_hi))
 #define CQE_WRID_LOW(x)                (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x)    ((x)->u.drain_cookie)
 
 /* macros for flit 3 of the cqe */
 #define CQE_GENBIT_S   63
index 29e97df9e1a7f87c784ebf33f4ebccfae217f433..4c000d60d5c6f865ae17aa28654497e3dbbb913c 100644 (file)
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        if (netif_carrier_ok(iwdev->netdev))
index aff9fb14768be9006e05145b2bdd7b6f7dbee8ff..5a31f3c6a4211d507cc4634c49df53021bba505b 100644 (file)
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-
-       if (netdev->mtu  >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu  >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu  >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu  >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        props->lmc = 0;
index 7b74d09a8217ca0f30de8d5065bb4b829100c56f..3ac8aa5ef37de2c5242125077eef78035d565901 100644 (file)
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
        return 0;
 }
 
-void qedr_unaffiliated_event(void *context,
-                            u8 event_code)
+void qedr_unaffiliated_event(void *context, u8 event_code)
 {
        pr_err("unaffiliated event not implemented yet\n");
 }
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
                if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
                        goto sysfs_err;
 
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
        DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
        return dev;
 
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
        ib_dealloc_device(&dev->ibdev);
 }
 
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
 {
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
-       return 0;
+       if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
 }
 
 static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
        qedr_remove(dev);
 }
 
+static void qedr_open(struct qedr_dev *dev)
+{
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
 static void qedr_mac_address_change(struct qedr_dev *dev)
 {
        union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
 
        ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+       qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
 
        if (rc)
                DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
 {
        switch (event) {
        case QEDE_UP:
-               qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+               qedr_open(dev);
                break;
        case QEDE_DOWN:
                qedr_close(dev);
index 620badd7d4fbd7f6ceaa61aff95ef1e41161f0be..bb32e4792ec9f022d201c0585bcce7a7cbae179c 100644 (file)
@@ -113,6 +113,8 @@ struct qedr_device_attr {
        struct qed_rdma_events events;
 };
 
+#define QEDR_ENET_STATE_BIT    (0)
+
 struct qedr_dev {
        struct ib_device        ibdev;
        struct qed_dev          *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
        struct qedr_cq          *gsi_sqcq;
        struct qedr_cq          *gsi_rqcq;
        struct qedr_qp          *gsi_qp;
+
+       unsigned long enet_state;
 };
 
 #define QEDR_MAX_SQ_PBL                        (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
 #define QEDR_ROCE_MAX_CNQ_SIZE         (0x4000)
 
 #define QEDR_MAX_PORT                  (1)
+#define QEDR_PORT                      (1)
 
 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
@@ -251,9 +256,6 @@ struct qedr_cq {
 
        u16 icid;
 
-       /* Lock to protect completion handler */
-       spinlock_t comp_handler_lock;
-
        /* Lock to protect multiplem CQ's */
        spinlock_t cq_lock;
        u8 arm_flags;
index 63890ebb72bdff1c525e87786f59b124b2ea8d3f..a9a8d8745d2e7f9ca20a5c849e042fdc6af261d6 100644 (file)
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
        qedr_inc_sw_gsi_cons(&qp->sq);
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
 
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
        }
 
        if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
-               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
-       else
                packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+       else
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
 
        packet->roce_mode = roce_mode;
        memcpy(packet->header.vaddr, ud_header_buffer, header_size);
index 57c8de2080773b161272774a69eaebf02cc411ed..c7d6c9a783bd615627e720eb8f043444c99c5f3c 100644 (file)
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
                            struct ib_ucontext *context, struct ib_udata *udata)
 {
        struct qedr_dev *dev = get_qedr_dev(ibdev);
-       struct qedr_ucontext *uctx = NULL;
-       struct qedr_alloc_pd_uresp uresp;
        struct qedr_pd *pd;
        u16 pd_id;
        int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       if (rc)
+               goto err;
 
-       uresp.pd_id = pd_id;
        pd->pd_id = pd_id;
 
        if (udata && context) {
+               struct qedr_alloc_pd_uresp uresp;
+
+               uresp.pd_id = pd_id;
+
                rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
-               if (rc)
+               if (rc) {
                        DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
-               uctx = get_qedr_ucontext(context);
-               uctx->pd = pd;
-               pd->uctx = uctx;
+                       dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+                       goto err;
+               }
+
+               pd->uctx = get_qedr_ucontext(context);
+               pd->uctx->pd = pd;
        }
 
        return &pd->ibpd;
+
+err:
+       kfree(pd);
+       return ERR_PTR(rc);
 }
 
 int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
        return ERR_PTR(-EFAULT);
 }
 
-enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
 {
        switch (qp_state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
        return IB_QPS_ERR;
 }
 
-enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+                                       enum ib_qp_state qp_state)
 {
        switch (qp_state) {
        case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
        int status = 0;
 
        if (new_state == qp->state)
-               return 1;
+               return 0;
 
        switch (qp->state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
                /* ERR->XXX */
                switch (new_state) {
                case QED_ROCE_QP_STATE_RESET:
+                       if ((qp->rq.prod != qp->rq.cons) ||
+                           (qp->sq.prod != qp->sq.cons)) {
+                               DP_NOTICE(dev,
+                                         "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+                                         qp->rq.prod, qp->rq.cons, qp->sq.prod,
+                                         qp->sq.cons);
+                               status = -EINVAL;
+                       }
                        break;
                default:
                        status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                         qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
                DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
                         qp_params.remote_mac_addr);
-;
 
                qp_params.mtu = qp->mtu;
                qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
 
        qp_attr->qp_state = qedr_get_ibqp_state(params.state);
        qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
-       qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+       qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
        qp_attr->path_mig_state = IB_MIG_MIGRATED;
        qp_attr->rq_psn = params.rq_psn;
        qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
        qp_attr->cap.max_recv_wr = qp->rq.max_wr;
        qp_attr->cap.max_send_sge = qp->sq.max_sges;
        qp_attr->cap.max_recv_sge = qp->rq.max_sges;
-       qp_attr->cap.max_inline_data = qp->max_inline_data;
+       qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
        qp_init_attr->cap = qp_attr->cap;
 
        memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
        return rc;
 }
 
-struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+                                      int max_page_list_len)
 {
        struct qedr_pd *pd = get_qedr_pd(ibpd);
        struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
        return 0;
 }
 
-enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
 {
        switch (opcode) {
        case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
        }
 }
 
-inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
 {
        int wq_is_full, err_wr, pbl_is_full;
        struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
        return true;
 }
 
-int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                     struct ib_send_wr **bad_wr)
 {
        struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
                                  IB_WC_SUCCESS, 0);
                break;
        case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
-               DP_ERR(dev,
-                      "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
-                      cq->icid, qp->icid);
+               if (qp->state != QED_ROCE_QP_STATE_ERR)
+                       DP_ERR(dev,
+                              "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                              cq->icid, qp->icid);
                cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
                                  IB_WC_WR_FLUSH_ERR, 1);
                break;
index 231a1ce1f4bec845d6ecfc58f53cd0d4ea5762a1..bd8fbd3d2032d390cc41268e7d32544ff0c98308 100644 (file)
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "failed to allocate interrupts\n");
                ret = -ENOMEM;
-               goto err_netdevice;
+               goto err_free_cq_ring;
        }
 
        /* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
 err_free_intrs:
        pvrdma_free_irq(dev);
        pvrdma_disable_msi_all(dev);
-err_netdevice:
-       unregister_netdevice_notifier(&dev->nb_netdev);
 err_free_cq_ring:
        pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
 err_free_async_ring:
index 54891370d18a5beef151816c882b40fb705a9ec5..c2aa52638dcb81ea4539b61c43d61f55edefb2b3 100644 (file)
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
        struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
-       struct pvrdma_alloc_ucontext_resp uresp;
+       struct pvrdma_alloc_ucontext_resp uresp = {0};
        int ret;
        void *ptr;
 
index 342e78163613dfdc719b171e1396d01fd44432eb..4abdeb359fb4f52cbacde5c3e1f488296c6064c5 100644 (file)
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
        }
 
        spin_lock_bh(&dev_list_lock);
-       list_add_tail(&rxe_dev_list, &rxe->list);
+       list_add_tail(&rxe->list, &rxe_dev_list);
        spin_unlock_bh(&dev_list_lock);
        return rxe;
 }
index 486d576e55bc016dda1f8ddad6b8f00941f66727..44b2108253bd988ec1f5222da999575ed37d3bed 100644 (file)
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
        del_timer_sync(&qp->rnr_nak_timer);
 
        rxe_cleanup_task(&qp->req.task);
-       if (qp_type(qp) == IB_QPT_RC)
-               rxe_cleanup_task(&qp->comp.task);
+       rxe_cleanup_task(&qp->comp.task);
 
        /* flush out any receive wr's or pending requests */
        __rxe_do_task(&qp->req.task);
index 9104e6b8cac9f7b66322d7ebc573ab6a32fa525a..e71af717e71b0f5e6d82eb36b65d887c6005dc09 100644 (file)
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                                                   SHOST_DIX_GUARD_CRC);
                }
 
-               /*
-                * Limit the sg_tablesize and max_sectors based on the device
-                * max fastreg page list length.
-                */
-               shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
-                       ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-
                if (iscsi_host_add(shost,
                                   ib_conn->device->ib_device->dma_device)) {
                        mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
        shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
 
+       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+                iser_conn, shost->sg_tablesize,
+                shost->max_sectors);
+
        if (cmds_max > max_cmds) {
                iser_info("cmds_max changed from %u to %u\n",
                          cmds_max, max_cmds);
index 0be6a7c5ddb5aea294be7c10a8247895adbc2dde..9d0b22ad58c15759c3b92472083da15af5c42bac 100644 (file)
@@ -496,7 +496,6 @@ struct ib_conn {
  * @rx_descs:         rx buffers array (cyclic buffer)
  * @num_rx_descs:     number of rx descriptors
  * @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
  */
 struct iser_conn {
        struct ib_conn               ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
        struct iser_rx_desc          *rx_descs;
        u32                          num_rx_descs;
        unsigned short               scsi_sg_tablesize;
-       unsigned int                 scsi_max_sectors;
        bool                         snd_w_inv;
 };
 
index 8ae7a3beddb728ee22825e28382b37bc467bbe94..6a9d1cb548ee8f7f34cfe1a1f9ad7c54de133271 100644 (file)
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
        sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
                                 device->ib_device->attrs.max_fast_reg_page_list_len);
 
-       if (sg_tablesize > sup_sg_tablesize) {
-               sg_tablesize = sup_sg_tablesize;
-               iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
-       } else {
-               iser_conn->scsi_max_sectors = max_sectors;
-       }
-
-       iser_conn->scsi_sg_tablesize = sg_tablesize;
-
-       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
-                iser_conn, iser_conn->scsi_sg_tablesize,
-                iser_conn->scsi_max_sectors);
+       iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
 }
 
 /**
index 8ddc071231931157ec459a6db4a0947d7b05f539..79bf48477ddb104097471a7a6040bcef2dfa0533 100644 (file)
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        struct srp_fr_desc *d;
        struct ib_mr *mr;
        int i, ret = -EINVAL;
+       enum ib_mr_type mr_type;
 
        if (pool_size <= 0)
                goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        spin_lock_init(&pool->lock);
        INIT_LIST_HEAD(&pool->free_list);
 
+       if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+               mr_type = IB_MR_TYPE_SG_GAPS;
+       else
+               mr_type = IB_MR_TYPE_MEM_REG;
+
        for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
-               mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
-                                max_page_list_len);
+               mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
                if (IS_ERR(mr)) {
                        ret = PTR_ERR(mr);
                        if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
                indirect_sg_entries = cmd_sg_entries;
        }
 
+       if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+               pr_warn("Clamping indirect_sg_entries to %u\n",
+                       SG_MAX_SEGMENTS);
+               indirect_sg_entries = SG_MAX_SEGMENTS;
+       }
+
        srp_remove_wq = create_workqueue("srp_remove");
        if (!srp_remove_wq) {
                ret = -ENOMEM;
index 5dcfa2913ceb4afd4fc53186af4ff3565a283042..3b11422b1ccead13edafb6b5406910f2eda50c87 100644 (file)
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
                                ((CAPI_MSG *) msg)->header.ncci = 0;
                                ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
                                ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
-                               PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
+                               ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
+                               ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
                                ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
                                w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
                                if (w != _QUEUE_FULL)
index 0ea4efb3de6683ee2dca71b3b7e20bc7f5f1e761..ebb5e391b800e0bbbca2fd9da926a4ff191839e0 100644 (file)
@@ -30,8 +30,9 @@
 
 #include "cec-priv.h"
 
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx);
 
 /*
  * 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
 
        /* Mark it as an error */
        data->msg.tx_ts = ktime_get_ns();
-       data->msg.tx_status = CEC_TX_STATUS_ERROR |
-                             CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+                              CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_error_cnt++;
        data->attempts = 0;
-       data->msg.tx_error_cnt = 1;
        /* Queue transmitted message for monitoring purposes */
        cec_queue_msg_monitor(data->adap, &data->msg, 1);
 
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
        [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
        [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
        [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
-       [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+       [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
        [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
 };
 
@@ -1250,30 +1251,49 @@ configured:
                for (i = 1; i < las->num_log_addrs; i++)
                        las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        }
+       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        adap->is_configured = true;
        adap->is_configuring = false;
        cec_post_state_event(adap);
-       mutex_unlock(&adap->lock);
 
+       /*
+        * Now post the Report Features and Report Physical Address broadcast
+        * messages. Note that these are non-blocking transmits, meaning that
+        * they are just queued up and once adap->lock is unlocked the main
+        * thread will kick in and start transmitting these.
+        *
+        * If after this function is done (but before one or more of these
+        * messages are actually transmitted) the CEC adapter is unconfigured,
+        * then any remaining messages will be dropped by the main thread.
+        */
        for (i = 0; i < las->num_log_addrs; i++) {
+               struct cec_msg msg = {};
+
                if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
                    (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
                        continue;
 
-               /*
-                * Report Features must come first according
-                * to CEC 2.0
-                */
-               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
-                       cec_report_features(adap, i);
-               cec_report_phys_addr(adap, i);
+               msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+               /* Report Features must come first according to CEC 2.0 */
+               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+                   adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+                       cec_fill_msg_report_features(adap, &msg, i);
+                       cec_transmit_msg_fh(adap, &msg, NULL, false);
+               }
+
+               /* Report Physical Address */
+               cec_msg_report_physical_addr(&msg, adap->phys_addr,
+                                            las->primary_device_type[i]);
+               dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+                       las->log_addr[i],
+                       cec_phys_addr_exp(adap->phys_addr));
+               cec_transmit_msg_fh(adap, &msg, NULL, false);
        }
-       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
-               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-       mutex_lock(&adap->lock);
        adap->kthread_config = NULL;
-       mutex_unlock(&adap->lock);
        complete(&adap->config_completion);
+       mutex_unlock(&adap->lock);
        return 0;
 
 unconfigure:
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
 
 /* High-level core CEC message handling */
 
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx)
 {
-       struct cec_msg msg = { };
        const struct cec_log_addrs *las = &adap->log_addrs;
        const u8 *features = las->features[la_idx];
        bool op_is_dev_features = false;
        unsigned int idx;
 
-       /* This is 2.0 and up only */
-       if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
-               return 0;
-
        /* Report Features */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       msg.len = 4;
-       msg.msg[1] = CEC_MSG_REPORT_FEATURES;
-       msg.msg[2] = adap->log_addrs.cec_version;
-       msg.msg[3] = las->all_device_types[la_idx];
+       msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+       msg->len = 4;
+       msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+       msg->msg[2] = adap->log_addrs.cec_version;
+       msg->msg[3] = las->all_device_types[la_idx];
 
        /* Write RC Profiles first, then Device Features */
        for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
-               msg.msg[msg.len++] = features[idx];
+               msg->msg[msg->len++] = features[idx];
                if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
                        if (op_is_dev_features)
                                break;
                        op_is_dev_features = true;
                }
        }
-       return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
-       const struct cec_log_addrs *las = &adap->log_addrs;
-       struct cec_msg msg = { };
-
-       /* Report Physical Address */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       cec_msg_report_physical_addr(&msg, adap->phys_addr,
-                                    las->primary_device_type[la_idx]);
-       dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
-               las->log_addr[la_idx],
-                       cec_phys_addr_exp(adap->phys_addr));
-       return cec_transmit_msg(adap, &msg, false);
 }
 
 /* Transmit the Feature Abort message */
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
        }
 
        case CEC_MSG_GIVE_FEATURES:
-               if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
-                       return cec_report_features(adap, la_idx);
-               return 0;
+               if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+                       return cec_feature_abort(adap, msg);
+               cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+               return cec_transmit_msg(adap, &tx_cec_msg, false);
 
        default:
                /*
index bc5e8cfe7ca235134cfcaa5723f27fa146d5d0c5..8f11d7e459931bb5a8ed74569781dc83d4bd6422 100644 (file)
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
                skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
                                          ETH_ALEN);
                skb_pull(h->priv->ule_skb, ETH_ALEN);
+       } else {
+               /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
+               eth_zero_addr(dest_addr);
        }
 
        /* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
        if (!h->priv->ule_bridged) {
                skb_push(h->priv->ule_skb, ETH_HLEN);
                h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
-               if (!h->priv->ule_dbit) {
-                       /*
-                        * dest_addr buffer is only valid if
-                        * h->priv->ule_dbit == 0
-                        */
-                       memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
-                       eth_zero_addr(h->ethh->h_source);
-               } else /* zeroize source and dest */
-                       memset(h->ethh, 0, ETH_ALEN * 2);
-
+               memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+               eth_zero_addr(h->ethh->h_source);
                h->ethh->h_proto = htons(h->priv->ule_sndu_type);
        }
        /* else:  skb is in correct state; nothing to do. */
index b31fa6fae009171a8edda132b224a52631a24b39..b979ea148251deab48fbfca7a0141aa408010f5e 100644 (file)
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
 config VIDEO_S5K4ECGX
         tristate "Samsung S5K4ECGX sensor support"
         depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       select CRC32
         ---help---
           This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
           camera sensor with an embedded SoC image signal processor.
index 59872b31f832cb7983337e1ce1290a5eea1aa36a..f4e92bdfe1926cb71be5a9dc29de7b7608903950 100644 (file)
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
  * I2C Driver
  */
 
-#ifdef CONFIG_PM
-
-static int smiapp_suspend(struct device *dev)
+static int __maybe_unused smiapp_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
        return 0;
 }
 
-static int smiapp_resume(struct device *dev)
+static int __maybe_unused smiapp_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
        return rval;
 }
 
-#else
-
-#define smiapp_suspend NULL
-#define smiapp_resume  NULL
-
-#endif /* CONFIG_PM */
-
 static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
 {
        struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (IS_ERR(sensor->xshutdown))
                return PTR_ERR(sensor->xshutdown);
 
-       pm_runtime_enable(&client->dev);
-
-       rval = pm_runtime_get_sync(&client->dev);
-       if (rval < 0) {
-               rval = -ENODEV;
-               goto out_power_off;
-       }
+       rval = smiapp_power_on(&client->dev);
+       if (rval < 0)
+               return rval;
 
        rval = smiapp_identify_module(sensor);
        if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (rval < 0)
                goto out_media_entity_cleanup;
 
+       pm_runtime_set_active(&client->dev);
+       pm_runtime_get_noresume(&client->dev);
+       pm_runtime_enable(&client->dev);
        pm_runtime_set_autosuspend_delay(&client->dev, 1000);
        pm_runtime_use_autosuspend(&client->dev);
        pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
        smiapp_cleanup(sensor);
 
 out_power_off:
-       pm_runtime_put(&client->dev);
-       pm_runtime_disable(&client->dev);
+       smiapp_power_off(&client->dev);
 
        return rval;
 }
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
 
        v4l2_async_unregister_subdev(subdev);
 
-       pm_runtime_suspend(&client->dev);
        pm_runtime_disable(&client->dev);
+       if (!pm_runtime_status_suspended(&client->dev))
+               smiapp_power_off(&client->dev);
+       pm_runtime_set_suspended(&client->dev);
 
        for (i = 0; i < sensor->ssds_used; i++) {
                v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
index 3a0fe8cc64e94c5199730e00a6ce0e6fcb41ffb2..48646a7f3fb00c2e35944e656089a9031b2eaf01 100644 (file)
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
        tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
 
-       /* Svideo should enable YCrCb output and disable GPCL output
-        * For Composite and TV, it should be the reverse
+       /*
+        * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
+        * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
+        * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
+        * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
+        * INTREQ/GPCL/VBLK to logic 1.
         */
        val = tvp5150_read(sd, TVP5150_MISC_CTL);
        if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        }
 
        if (decoder->input == TVP5150_SVIDEO)
-               val = (val & ~0x40) | 0x10;
+               val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
        else
-               val = (val & ~0x10) | 0x40;
+               val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
        tvp5150_write(sd, TVP5150_MISC_CTL, val);
 };
 
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
        },{     /* Automatic offset and AGC enabled */
                TVP5150_ANAL_CHL_CTL, 0x15
        },{     /* Activate YCrCb output 0x9 or 0xd ? */
-               TVP5150_MISC_CTL, 0x6f
+               TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
+                                 TVP5150_MISC_CTL_INTREQ_OE |
+                                 TVP5150_MISC_CTL_YCBCR_OE |
+                                 TVP5150_MISC_CTL_SYNC_OE |
+                                 TVP5150_MISC_CTL_VBLANK |
+                                 TVP5150_MISC_CTL_CLOCK_OE,
        },{     /* Activates video std autodetection for all standards */
                TVP5150_AUTOSW_MSK, 0x0
        },{     /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
 
        f = &format->format;
 
-       tvp5150_reset(sd, 0);
-
        f->width = decoder->rect.width;
        f->height = decoder->rect.height / 2;
 
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
 static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
-       /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
-       int val = 0x09;
-
-       /* Output format: 8-bit 4:2:2 YUV with discrete sync */
-       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
-               val = 0x0d;
+       int val;
 
-       /* Initializes TVP5150 to its default values */
-       /* # set PCLK (27MHz) */
-       tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+       /* Enable or disable the video output signals. */
+       val = tvp5150_read(sd, TVP5150_MISC_CTL);
+       if (val < 0)
+               return val;
+
+       val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
+                TVP5150_MISC_CTL_CLOCK_OE);
+
+       if (enable) {
+               /*
+                * Enable the YCbCr and clock outputs. In discrete sync mode
+                * (non-BT.656) additionally enable the the sync outputs.
+                */
+               val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
+               if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+                       val |= TVP5150_MISC_CTL_SYNC_OE;
+       }
 
-       if (enable)
-               tvp5150_write(sd, TVP5150_MISC_CTL, val);
-       else
-               tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+       tvp5150_write(sd, TVP5150_MISC_CTL, val);
 
        return 0;
 }
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
                res = core->hdl.error;
                goto err;
        }
-       v4l2_ctrl_handler_setup(&core->hdl);
 
        /* Default is no cropping */
        core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
        core->rect.left = 0;
        core->rect.width = TVP5150_H_MAX;
 
+       tvp5150_reset(sd, 0);   /* Calls v4l2_ctrl_handler_setup() */
+
        res = v4l2_async_register_subdev(sd);
        if (res < 0)
                goto err;
index 25a994944918703f064eee85a6017ca7010dca4b..30a48c28d05ab5d46d9eff505005ebf3f90e6409 100644 (file)
@@ -9,6 +9,15 @@
 #define TVP5150_ANAL_CHL_CTL         0x01 /* Analog channel controls */
 #define TVP5150_OP_MODE_CTL          0x02 /* Operation mode controls */
 #define TVP5150_MISC_CTL             0x03 /* Miscellaneous controls */
+#define TVP5150_MISC_CTL_VBLK_GPCL     BIT(7)
+#define TVP5150_MISC_CTL_GPCL          BIT(6)
+#define TVP5150_MISC_CTL_INTREQ_OE     BIT(5)
+#define TVP5150_MISC_CTL_HVLK          BIT(4)
+#define TVP5150_MISC_CTL_YCBCR_OE      BIT(3)
+#define TVP5150_MISC_CTL_SYNC_OE       BIT(2)
+#define TVP5150_MISC_CTL_VBLANK                BIT(1)
+#define TVP5150_MISC_CTL_CLOCK_OE      BIT(0)
+
 #define TVP5150_AUTOSW_MSK           0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
 
 /* Reserved 05h */
index 979634000597f79124befabb48af909d4b8f6a6e..d5c911c09e2b792e767970f32c463862617cec19 100644 (file)
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
 static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
 {
        free_irq(pci_dev->irq, (void *)cobalt);
-
-       if (cobalt->msi_enabled)
-               pci_disable_msi(pci_dev);
+       pci_free_irq_vectors(pci_dev);
 }
 
 static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
           from being generated. */
        cobalt_set_interrupt(cobalt, false);
 
-       if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
+       if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
                cobalt_err("Could not enable MSI\n");
-               cobalt->msi_enabled = false;
                ret = -EIO;
                goto err_release;
        }
        msi_config_show(cobalt, pci_dev);
-       cobalt->msi_enabled = true;
 
        /* Register IRQ */
        if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
index ed00dc9d93995e03ddf128dfb476dbc73ae086a3..00f773ec359ad954a15859329fb541fd5ab2a848 100644 (file)
@@ -287,8 +287,6 @@ struct cobalt {
        u32 irq_none;
        u32 irq_full_fifo;
 
-       bool msi_enabled;
-
        /* omnitek dma */
        int dma_channels;
        int first_fifo_channel;
index 07fa08be9e994a3f7d5952251b73f871fe4ecdca..d54ebe7e02150f7240f58da51a32d025026bdf34 100644 (file)
@@ -97,14 +97,13 @@ struct pctv452e_state {
        u8 c;      /* transaction counter, wraps around...  */
        u8 initialized; /* set to 1 if 0x15 has been sent */
        u16 last_rc_key;
-
-       unsigned char data[80];
 };
 
 static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                         unsigned int write_len, unsigned int read_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        unsigned int rlen;
        int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                return -EIO;
        }
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = cmd;
-       state->data[3] = write_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = cmd;
+       buf[3] = write_len;
 
-       memcpy(state->data + 4, data, write_len);
+       memcpy(buf + 4, data, write_len);
 
        rlen = (read_len > 0) ? 64 : 0;
-       ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
-                                 state->data, rlen, /* delay_ms */ 0);
+       ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+                                 buf, rlen, /* delay_ms */ 0);
        if (0 != ret)
                goto failed;
 
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
-       memcpy(data, state->data + 4, read_len);
+       memcpy(data, buf + 4, read_len);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return 0;
 
 failed:
        err("CI error %d; %02X %02X %02X -> %*ph.",
-            ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+            ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
                                u8 *rcv_buf, u8 rcv_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        int ret;
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
        ret = -EINVAL;
        if (snd_len > 64 - 7 || rcv_len > 64 - 7)
                goto failed;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_I2C;
-       state->data[3] = snd_len + 3;
-       state->data[4] = addr << 1;
-       state->data[5] = snd_len;
-       state->data[6] = rcv_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = PCTV_CMD_I2C;
+       buf[3] = snd_len + 3;
+       buf[4] = addr << 1;
+       buf[5] = snd_len;
+       buf[6] = rcv_len;
 
-       memcpy(state->data + 7, snd_buf, snd_len);
+       memcpy(buf + 7, snd_buf, snd_len);
 
-       ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
-                                 state->data, /* rcv_len */ 64,
+       ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+                                 buf, /* rcv_len */ 64,
                                  /* delay_ms */ 0);
        if (ret < 0)
                goto failed;
 
        /* TT USB protocol error. */
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
        /* I2C device didn't respond as expected. */
        ret = -EREMOTEIO;
-       if (state->data[5] < snd_len || state->data[6] < rcv_len)
+       if (buf[5] < snd_len || buf[6] < rcv_len)
                goto failed;
 
-       memcpy(rcv_buf, state->data + 7, rcv_len);
-       mutex_unlock(&state->ca_mutex);
+       memcpy(rcv_buf, buf + 7, rcv_len);
 
+       kfree(buf);
        return rcv_len;
 
 failed:
        err("I2C error %d; %02X %02X  %02X %02X %02X -> %*ph",
             ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
-            7, state->data);
+            7, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
 static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
-       u8 *rx;
+       u8 *b0, *rx;
        int ret;
 
        info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
        if (state->initialized)
                return 0;
 
-       rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
-       if (!rx)
+       b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b0)
                return -ENOMEM;
 
-       mutex_lock(&state->ca_mutex);
+       rx = b0 + 5;
+
        /* hmm where shoud this should go? */
        ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
        if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
                        __func__, ret);
 
        /* this is a one-time initialization, dont know where to put */
-       state->data[0] = 0xaa;
-       state->data[1] = state->c++;
-       state->data[2] = PCTV_CMD_RESET;
-       state->data[3] = 1;
-       state->data[4] = 0;
+       b0[0] = 0xaa;
+       b0[1] = state->c++;
+       b0[2] = PCTV_CMD_RESET;
+       b0[3] = 1;
+       b0[4] = 0;
        /* reset board */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
-       state->data[1] = state->c++;
-       state->data[4] = 1;
+       b0[1] = state->c++;
+       b0[4] = 1;
        /* reset board (again?) */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
        state->initialized = 1;
 
 ret:
-       mutex_unlock(&state->ca_mutex);
-       kfree(rx);
+       kfree(b0);
        return ret;
 }
 
 static int pctv452e_rc_query(struct dvb_usb_device *d)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *b, *rx;
        int ret, i;
        u8 id;
 
-       mutex_lock(&state->ca_mutex);
+       b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       rx = b + CMD_BUFFER_SIZE;
+
        id = state->c++;
 
        /* prepare command header  */
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_IR;
-       state->data[3] = 0;
+       b[0] = SYNC_BYTE_OUT;
+       b[1] = id;
+       b[2] = PCTV_CMD_IR;
+       b[3] = 0;
 
        /* send ir request */
-       ret = dvb_usb_generic_rw(d, state->data, 4,
-                                state->data, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
        if (ret != 0)
                goto ret;
 
        if (debug > 3) {
-               info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
-               for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
-                       info(" %02x", state->data[i + 3]);
+               info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
+               for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+                       info(" %02x", rx[i+3]);
 
                info("\n");
        }
 
-       if ((state->data[3] == 9) &&  (state->data[12] & 0x01)) {
+       if ((rx[3] == 9) &&  (rx[12] & 0x01)) {
                /* got a "press" event */
-               state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
+               state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
                if (debug > 2)
                        info("%s: cmd=0x%02x sys=0x%02x\n",
-                               __func__, state->data[6], state->data[7]);
+                               __func__, rx[6], rx[7]);
 
                rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
        } else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
                state->last_rc_key = 0;
        }
 ret:
-       mutex_unlock(&state->ca_mutex);
+       kfree(b);
        return ret;
 }
 
index a0547dbf980645104d862fdc39e2ad740107ad15..76382c858c35435b98e061a7dda49d5dacad9585 100644 (file)
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
        struct ms_id_register id_reg;
 
        if (!(*mrq)) {
-               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
                                  sizeof(struct ms_id_register));
                *mrq = &card->current_mrq;
                return 0;
index b44306b886cb6d7a383abae4d985cd18ea1d48e3..73db08558e4dd6d100d44e04cd58649e63ee94e8 100644 (file)
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
 
                if (!slot)
                        continue;
-               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
                        dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
-                       dw_mci_setup_bus(slot, true);
-               }
+
+               /* Force setup bus to guarantee available clock output */
+               dw_mci_setup_bus(slot, true);
        }
 
        /* Now that slots are all setup, we can enable card detect */
index 7be393c96b1a0481c1f181443a7b521b647378f5..cf7c18947189a1f5c2834ad5b0d14ac615686294 100644 (file)
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
 
        dev->irq = pdev->irq;
        priv->base = addr;
+       priv->device = &pdev->dev;
 
        if (!c_can_pci_data->freq) {
                dev_err(&pdev->dev, "no clock frequency defined\n");
index 680d1ff07a55ddd60ceb09eb42bb98faaa42ad9f..6749b1829469411315dedac1634b7be974cf21d8 100644 (file)
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
        netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
                HECC_DEF_NAPI_WEIGHT);
 
-       clk_enable(priv->clk);
+       err = clk_prepare_enable(priv->clk);
+       if (err) {
+               dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+               goto probe_exit_clk;
+       }
+
        err = register_candev(ndev);
        if (err) {
                dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
        struct ti_hecc_priv *priv = netdev_priv(ndev);
 
        unregister_candev(ndev);
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
        clk_put(priv->clk);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
        hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
        priv->can.state = CAN_STATE_SLEEPING;
 
-       clk_disable(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return 0;
 }
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
        struct ti_hecc_priv *priv = netdev_priv(dev);
+       int err;
 
-       clk_enable(priv->clk);
+       err = clk_prepare_enable(priv->clk);
+       if (err)
+               return err;
 
        hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
        priv->can.state = CAN_STATE_ERROR_ACTIVE;
index 5b7ba25e006522a6143d6c73a9b8141a7ac76f85..8a280e7d66bddc998763288a5756b2ae6a7f70bc 100644 (file)
 #define PCS_V1_WINDOW_SELECT           0x03fc
 #define PCS_V2_WINDOW_DEF              0x9060
 #define PCS_V2_WINDOW_SELECT           0x9064
+#define PCS_V2_RV_WINDOW_DEF           0x1060
+#define PCS_V2_RV_WINDOW_SELECT                0x1064
 
 /* PCS register entry bit positions and sizes */
 #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
index aaf0350076a90a1dcd91502b9ce8e4a81cff7174..a7d16db5c4b21d8f9d80d9801259419da8def379 100644 (file)
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
        offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
 
        spin_lock_irqsave(&pdata->xpcs_lock, flags);
-       XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+       XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
        mmd_data = XPCS16_IOREAD(pdata, offset);
        spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
 
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
        offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
 
        spin_lock_irqsave(&pdata->xpcs_lock, flags);
-       XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+       XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
        XPCS16_IOWRITE(pdata, offset, mmd_data);
        spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
 }
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
 
        /* Flush Tx queues */
        ret = xgbe_flush_tx_queues(pdata);
-       if (ret)
+       if (ret) {
+               netdev_err(pdata->netdev, "error flushing TX queues\n");
                return ret;
+       }
 
        /*
         * Initialize DMA related features
index f8648e4dbca3f33f4fb64a818ee39c3369725d19..3aa457c8ca21d30f768eaf57b2f94eedeaadf40d 100644 (file)
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
 
        DBGPR("-->xgbe_start\n");
 
-       hw_if->init(pdata);
+       ret = hw_if->init(pdata);
+       if (ret)
+               return ret;
 
        xgbe_napi_enable(pdata, 1);
 
index e76b7f65b805171ca81945efb75a44c3c1d7242d..c2730f15bd8b62d2e0487e4eef11f6518a476c39 100644 (file)
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct xgbe_prv_data *pdata;
        struct device *dev = &pdev->dev;
        void __iomem * const *iomap_table;
+       struct pci_dev *rdev;
        unsigned int ma_lo, ma_hi;
        unsigned int reg;
        int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (netif_msg_probe(pdata))
                dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);
 
+       /* Set the PCS indirect addressing definition registers */
+       rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+       if (rdev &&
+           (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
+               pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+               pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+       } else {
+               pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+               pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+       }
+       pci_dev_put(rdev);
+
        /* Configure the PCS indirect addressing support */
-       reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+       reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
        pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
        pdata->xpcs_window <<= 6;
        pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
index f52a9bd05baca559d7afd6e1184010d103e1c08a..00108815b55eeb1417bd83bea4c9fbb7aa101fe1 100644 (file)
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
 
        /* XPCS indirect addressing lock */
        spinlock_t xpcs_lock;
+       unsigned int xpcs_window_def_reg;
+       unsigned int xpcs_window_sel_reg;
        unsigned int xpcs_window;
        unsigned int xpcs_window_size;
        unsigned int xpcs_window_mask;
index 4c80e0689db9b5085c5996e65fac15331c3ad3fa..391bb5c09a6a95e404ea5842c5987b8ae1758901 100644 (file)
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
                return -ENOMEM;
        }
 
-       alx_reinit_rings(alx);
-
        return 0;
 }
 
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
        if (alx->qnapi[0] && alx->qnapi[0]->rxq)
                kfree(alx->qnapi[0]->rxq->bufs);
 
-       if (!alx->descmem.virt)
+       if (alx->descmem.virt)
                dma_free_coherent(&alx->hw.pdev->dev,
                                  alx->descmem.size,
                                  alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
        alx_free_rings(alx);
        alx_free_napis(alx);
        alx_disable_advanced_intr(alx);
+       alx_init_intr(alx, false);
 
        err = alx_alloc_napis(alx);
        if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
        if (err)
                goto out_free_rings;
 
+       /* must be called after alx_request_irq because the chip stops working
+        * if we copy the dma addresses in alx_init_ring_ptrs twice when
+        * requesting msi-x interrupts failed
+        */
+       alx_reinit_rings(alx);
+
        netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
        netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
 
index 3b14d51442280b8a399b0d9b7145bebfc1560597..c483618b57bd7ef93f8522a91814a5dd9d9b0eed 100644 (file)
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
                priv->old_link = 0;
                priv->old_duplex = -1;
                priv->old_pause = -1;
+       } else {
+               phydev = NULL;
        }
 
        /* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
        enet_dmac_writel(priv, priv->dma_chan_int_mask,
                         ENETDMAC_IRMASK, priv->tx_chan);
 
-       if (priv->has_phy)
+       if (phydev)
                phy_start(phydev);
        else
                bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
        free_irq(dev->irq, dev);
 
 out_phy_disconnect:
-       if (priv->has_phy)
+       if (phydev)
                phy_disconnect(phydev);
 
        return ret;
index 2b46f9b09a03cd1185a1f138c017d25c33b41574..3d83b90280146c74f95c03b3644ea41d5bf5e901 100644 (file)
@@ -1096,7 +1096,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
 {
 #ifdef CONFIG_INET
        struct tcphdr *th;
-       int len, nw_off, tcp_opt_len;
+       int len, nw_off, tcp_opt_len = 0;
 
        if (tcp_ts)
                tcp_opt_len = 12;
@@ -5441,17 +5441,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
        if ((link_info->support_auto_speeds | diff) !=
            link_info->support_auto_speeds) {
                /* An advertised speed is no longer supported, so we need to
-                * update the advertisement settings.  See bnxt_reset() for
-                * comments about the rtnl_lock() sequence below.
+                * update the advertisement settings.  Caller holds RTNL
+                * so we can modify link settings.
                 */
-               clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-               rtnl_lock();
                link_info->advertising = link_info->support_auto_speeds;
-               if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
-                   (link_info->autoneg & BNXT_AUTONEG_SPEED))
+               if (link_info->autoneg & BNXT_AUTONEG_SPEED)
                        bnxt_hwrm_set_link_setting(bp, true, false);
-               set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
-               rtnl_unlock();
        }
        return 0;
 }
@@ -6367,29 +6362,37 @@ bnxt_restart_timer:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
 
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
 {
-       /* bnxt_reset_task() calls bnxt_close_nic() which waits
-        * for BNXT_STATE_IN_SP_TASK to clear.
-        * If there is a parallel dev_close(), bnxt_close() may be holding
+       /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+        * set.  If the device is being closed, bnxt_close() may be holding
         * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
         * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
         */
        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
        rtnl_lock();
-       if (test_bit(BNXT_STATE_OPEN, &bp->state))
-               bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
        set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
        rtnl_unlock();
 }
 
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+       bnxt_rtnl_lock_sp(bp);
+       if (test_bit(BNXT_STATE_OPEN, &bp->state))
+               bnxt_reset_task(bp, silent);
+       bnxt_rtnl_unlock_sp(bp);
+}
+
 static void bnxt_cfg_ntp_filters(struct bnxt *);
 
 static void bnxt_sp_task(struct work_struct *work)
 {
        struct bnxt *bp = container_of(work, struct bnxt, sp_task);
-       int rc;
 
        set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
        smp_mb__after_atomic();
@@ -6403,16 +6406,6 @@ static void bnxt_sp_task(struct work_struct *work)
 
        if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
                bnxt_cfg_ntp_filters(bp);
-       if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
-               if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
-                                      &bp->sp_event))
-                       bnxt_hwrm_phy_qcaps(bp);
-
-               rc = bnxt_update_link(bp, true);
-               if (rc)
-                       netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
-                                  rc);
-       }
        if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
                bnxt_hwrm_exec_fwd_req(bp);
        if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6433,18 +6426,39 @@ static void bnxt_sp_task(struct work_struct *work)
                bnxt_hwrm_tunnel_dst_port_free(
                        bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
        }
+       if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+               bnxt_hwrm_port_qstats(bp);
+
+       /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
+        * must be the last functions to be called before exiting.
+        */
+       if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+               int rc = 0;
+
+               if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+                                      &bp->sp_event))
+                       bnxt_hwrm_phy_qcaps(bp);
+
+               bnxt_rtnl_lock_sp(bp);
+               if (test_bit(BNXT_STATE_OPEN, &bp->state))
+                       rc = bnxt_update_link(bp, true);
+               bnxt_rtnl_unlock_sp(bp);
+               if (rc)
+                       netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+                                  rc);
+       }
+       if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+               bnxt_rtnl_lock_sp(bp);
+               if (test_bit(BNXT_STATE_OPEN, &bp->state))
+                       bnxt_get_port_module_status(bp);
+               bnxt_rtnl_unlock_sp(bp);
+       }
        if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
                bnxt_reset(bp, false);
 
        if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
                bnxt_reset(bp, true);
 
-       if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
-               bnxt_get_port_module_status(bp);
-
-       if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
-               bnxt_hwrm_port_qstats(bp);
-
        smp_mb__before_atomic();
        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 }
index a6e7afa878befd1abe00404b2cd4becfa174d103..c1b6716679208a69bc66f017a68e5b6e35c60064 100644 (file)
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
        }
 
        /* try reuse page */
-       if (unlikely(page_count(page) != 1))
+       if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
                return false;
 
        /* change offset to the other half */
index c6ba75c595e07a1275d0dea6442ab7c657229e66..b618be6d14cd2589b0dd183c62cc59c1137bf71c 100644 (file)
@@ -1607,8 +1607,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netdev->netdev_ops = &ibmveth_netdev_ops;
        netdev->ethtool_ops = &netdev_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
-       netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
-               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+       netdev->hw_features = NETIF_F_SG;
+       if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+               netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                      NETIF_F_RXCSUM;
+       }
 
        netdev->features |= netdev->hw_features;
 
index 25ae0c5bce3ac957066d1531b03ecddbaa33d8f0..9e757684816d48b903f62cdac2d6a1123e6c3305 100644 (file)
@@ -2515,7 +2515,7 @@ static int mtk_remove(struct platform_device *pdev)
 }
 
 const struct of_device_id of_mtk_match[] = {
-       { .compatible = "mediatek,mt7623-eth" },
+       { .compatible = "mediatek,mt2701-eth" },
        {},
 };
 MODULE_DEVICE_TABLE(of, of_mtk_match);
index d9c9f86a30df953fa555934c5406057dcaf28960..d5a9372ed84d3127cf16c07bc43666651fe2486d 100644 (file)
@@ -1732,8 +1732,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
-       memset(channel, 0, sizeof(*channel));
-
        channel->max_rx = MAX_RX_RINGS;
        channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
 
@@ -1752,10 +1750,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
        int xdp_count;
        int err = 0;
 
-       if (channel->other_count || channel->combined_count ||
-           channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
-           channel->rx_count > MAX_RX_RINGS ||
-           !channel->tx_count || !channel->rx_count)
+       if (!channel->tx_count || !channel->rx_count)
                return -EINVAL;
 
        tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
index 6c1a5cb43f8cf1254a324d1976a86efb27ebb4ef..6236ce95b8e6dfdbd9c70a11e20136a82a55014e 100644 (file)
@@ -560,7 +560,6 @@ static int mlx5e_set_channels(struct net_device *dev,
                              struct ethtool_channels *ch)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       int ncv = priv->profile->max_nch(priv->mdev);
        unsigned int count = ch->combined_count;
        bool arfs_enabled;
        bool was_opened;
@@ -571,16 +570,6 @@ static int mlx5e_set_channels(struct net_device *dev,
                            __func__);
                return -EINVAL;
        }
-       if (ch->rx_count || ch->tx_count) {
-               netdev_info(dev, "%s: separate rx/tx count not supported\n",
-                           __func__);
-               return -EINVAL;
-       }
-       if (count > ncv) {
-               netdev_info(dev, "%s: count (%d) > max (%d)\n",
-                           __func__, count, ncv);
-               return -EINVAL;
-       }
 
        if (priv->params.num_channels == count)
                return 0;
index 3d2e1a1886a56adfb4d13f036c214e9889b9c74a..fd8dff6acc12dc0da3fa720861f67129928c3b25 100644 (file)
@@ -190,6 +190,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
                return false;
        }
 
+       if (unlikely(page_is_pfmemalloc(dma_info->page)))
+               return false;
+
        cache->page_cache[cache->tail] = *dma_info;
        cache->tail = tail_next;
        return true;
index 01d0efa9c5c7419b6e2fa99ed6b99562811bcbd5..9e494a446b7ea7812a409b87f92bde19a0355286 100644 (file)
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
 
 static int
 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
-                                 struct mlxsw_sp_nexthop_group *nh_grp)
+                                 struct mlxsw_sp_nexthop_group *nh_grp,
+                                 bool reallocate)
 {
        u32 adj_index = nh_grp->adj_index; /* base */
        struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
                        continue;
                }
 
-               if (nh->update) {
+               if (nh->update || reallocate) {
                        err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
                                                          adj_index, nh);
                        if (err)
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
                /* Nothing was added or removed, so no need to reallocate. Just
                 * update MAC on existing adjacency indexes.
                 */
-               err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+               err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
+                                                       false);
                if (err) {
                        dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
                        goto set_trap;
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
        nh_grp->adj_index_valid = 1;
        nh_grp->adj_index = adj_index;
        nh_grp->ecmp_size = ecmp_size;
-       err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+       err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
        if (err) {
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
                goto set_trap;
index 05e32f4322eb1a46eca22e5cd53f00682c0431d9..02c5d47cfc6d4d1369c82868dc4b9f34143e1342 100644 (file)
@@ -320,7 +320,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                list_del(&p_pkt->list_entry);
                b_last_packet = list_empty(&p_tx->active_descq);
                list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
-               if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
                        p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -332,7 +332,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
                        b_last_frag =
                                p_tx->cur_completing_bd_idx == p_pkt->bd_used;
                        tx_frag = p_pkt->bds_set[0].tx_frag;
-                       if (p_ll2_conn->gsi_enable)
+                       if (p_ll2_conn->conn.gsi_enable)
                                qed_ll2b_release_tx_gsi_packet(p_hwfn,
                                                               p_ll2_conn->
                                                               my_id,
@@ -401,7 +401,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
 
                spin_unlock_irqrestore(&p_tx->lock, flags);
                tx_frag = p_pkt->bds_set[0].tx_frag;
-               if (p_ll2_conn->gsi_enable)
+               if (p_ll2_conn->conn.gsi_enable)
                        qed_ll2b_complete_tx_gsi_packet(p_hwfn,
                                                        p_ll2_conn->my_id,
                                                        p_pkt->cookie,
@@ -573,7 +573,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
 
                list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
 
-               if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+               if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
                        struct qed_ooo_buffer *p_buffer;
 
                        p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -761,7 +761,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
                rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
                                               p_buffer->vlan, bd_flags,
                                               l4_hdr_offset_w,
-                                              p_ll2_conn->tx_dest, 0,
+                                              p_ll2_conn->conn.tx_dest, 0,
                                               first_frag,
                                               p_buffer->packet_length,
                                               p_buffer, true);
@@ -881,7 +881,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
        u16 buf_idx;
        int rc = 0;
 
-       if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
                return rc;
 
        if (!rx_num_ooo_buffers)
@@ -924,7 +924,7 @@ static void
 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
                                 struct qed_ll2_info *p_ll2_conn)
 {
-       if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
                return;
 
        qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -936,7 +936,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
 {
        struct qed_ooo_buffer *p_buffer;
 
-       if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
                return;
 
        qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -968,23 +968,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
-       struct qed_ll2_info *ll2_info;
+       struct qed_ll2_conn ll2_info;
        int rc;
 
-       ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
-       if (!ll2_info)
-               return -ENOMEM;
-       ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
-       ll2_info->mtu = params->mtu;
-       ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
-       ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
-       ll2_info->tx_tc = OOO_LB_TC;
-       ll2_info->tx_dest = CORE_TX_DEST_LB;
-
-       rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+       ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
+       ll2_info.mtu = params->mtu;
+       ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+       ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+       ll2_info.tx_tc = OOO_LB_TC;
+       ll2_info.tx_dest = CORE_TX_DEST_LB;
+
+       rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
                                        QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
                                        handle);
-       kfree(ll2_info);
        if (rc) {
                DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
                goto out;
@@ -1029,7 +1025,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
                                     struct qed_ll2_info *p_ll2_conn,
                                     u8 action_on_error)
 {
-       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
        struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
        struct core_rx_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -1055,7 +1051,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        p_ramrod->sb_index = p_rx->rx_sb_index;
        p_ramrod->complete_event_flg = 1;
 
-       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
        DMA_REGPAIR_LE(p_ramrod->bd_base,
                       p_rx->rxq_chain.p_phys_addr);
        cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1063,8 +1059,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
                       qed_chain_get_pbl_phys(&p_rx->rcq_chain));
 
-       p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
-       p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+       p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
+       p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
        p_ramrod->queue_id = p_ll2_conn->queue_id;
        p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
                                                                          : 1;
@@ -1079,14 +1075,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
        }
 
        p_ramrod->action_on_error.error_type = action_on_error;
-       p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
                                     struct qed_ll2_info *p_ll2_conn)
 {
-       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+       enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
        struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
        struct core_tx_start_ramrod_data *p_ramrod = NULL;
        struct qed_spq_entry *p_ent = NULL;
@@ -1098,7 +1094,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
                return 0;
 
-       if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
                p_ll2_conn->tx_stats_en = 0;
        else
                p_ll2_conn->tx_stats_en = 1;
@@ -1119,7 +1115,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
 
        p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
        p_ramrod->sb_index = p_tx->tx_sb_index;
-       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+       p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
        p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
        p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
 
@@ -1129,7 +1125,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
        p_ramrod->pbl_size = cpu_to_le16(pbl_size);
 
        memset(&pq_params, 0, sizeof(pq_params));
-       pq_params.core.tc = p_ll2_conn->tx_tc;
+       pq_params.core.tc = p_ll2_conn->conn.tx_tc;
        pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
        p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
 
@@ -1146,7 +1142,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
        }
 
-       p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+       p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
        return qed_spq_post(p_hwfn, p_ent, NULL);
 }
 
@@ -1247,7 +1243,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_LL2,
                   "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
-                  p_ll2_info->conn_type, rx_num_desc);
+                  p_ll2_info->conn.conn_type, rx_num_desc);
 
 out:
        return rc;
@@ -1285,7 +1281,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_LL2,
                   "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
-                  p_ll2_info->conn_type, tx_num_desc);
+                  p_ll2_info->conn.conn_type, tx_num_desc);
 
 out:
        if (rc)
@@ -1296,7 +1292,7 @@ out:
 }
 
 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_info *p_params,
+                              struct qed_ll2_conn *p_params,
                               u16 rx_num_desc,
                               u16 tx_num_desc,
                               u8 *p_connection_handle)
@@ -1325,15 +1321,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
        if (!p_ll2_info)
                return -EBUSY;
 
-       p_ll2_info->conn_type = p_params->conn_type;
-       p_ll2_info->mtu = p_params->mtu;
-       p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
-       p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
-       p_ll2_info->tx_tc = p_params->tx_tc;
-       p_ll2_info->tx_dest = p_params->tx_dest;
-       p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
-       p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
-       p_ll2_info->gsi_enable = p_params->gsi_enable;
+       p_ll2_info->conn = *p_params;
 
        rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
        if (rc)
@@ -1394,9 +1382,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
 
        SET_FIELD(action_on_error,
                  CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
-                 p_ll2_conn->ai_err_packet_too_big);
+                 p_ll2_conn->conn.ai_err_packet_too_big);
        SET_FIELD(action_on_error,
-                 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+                 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
 
        return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
 }
@@ -1623,7 +1611,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
                   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
                   p_ll2->queue_id,
                   p_ll2->cid,
-                  p_ll2->conn_type,
+                  p_ll2->conn.conn_type,
                   prod_idx,
                   first_frag_len,
                   num_of_bds,
@@ -1699,7 +1687,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
                   (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
                   "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
                   p_ll2_conn->queue_id,
-                  p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+                  p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
 }
 
 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1840,7 +1828,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
                qed_ll2_rxq_flush(p_hwfn, connection_handle);
        }
 
-       if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+       if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
                qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
 
        return rc;
@@ -2016,7 +2004,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
 
 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 {
-       struct qed_ll2_info ll2_info;
+       struct qed_ll2_conn ll2_info;
        struct qed_ll2_buffer *buffer, *tmp_buffer;
        enum qed_ll2_conn_type conn_type;
        struct qed_ptt *p_ptt;
@@ -2064,6 +2052,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
 
        /* Prepare the temporary ll2 information */
        memset(&ll2_info, 0, sizeof(ll2_info));
+
        ll2_info.conn_type = conn_type;
        ll2_info.mtu = params->mtu;
        ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2143,7 +2132,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
        }
 
        ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
-
        return 0;
 
 release_terminate_all:
index c7f2975590ee578095b2854d6b4412c16f4872e4..db3e4fc78e090f223d01a2780102c66293168b33 100644 (file)
@@ -135,15 +135,8 @@ struct qed_ll2_tx_queue {
        bool b_completing_packet;
 };
 
-struct qed_ll2_info {
-       /* Lock protecting the state of LL2 */
-       struct mutex mutex;
+struct qed_ll2_conn {
        enum qed_ll2_conn_type conn_type;
-       u32 cid;
-       u8 my_id;
-       u8 queue_id;
-       u8 tx_stats_id;
-       bool b_active;
        u16 mtu;
        u8 rx_drop_ttl0_flg;
        u8 rx_vlan_removal_en;
@@ -151,10 +144,21 @@ struct qed_ll2_info {
        enum core_tx_dest tx_dest;
        enum core_error_handle ai_err_packet_too_big;
        enum core_error_handle ai_err_no_buf;
+       u8 gsi_enable;
+};
+
+struct qed_ll2_info {
+       /* Lock protecting the state of LL2 */
+       struct mutex mutex;
+       struct qed_ll2_conn conn;
+       u32 cid;
+       u8 my_id;
+       u8 queue_id;
+       u8 tx_stats_id;
+       bool b_active;
        u8 tx_stats_en;
        struct qed_ll2_rx_queue rx_queue;
        struct qed_ll2_tx_queue tx_queue;
-       u8 gsi_enable;
 };
 
 /**
@@ -172,7 +176,7 @@ struct qed_ll2_info {
  * @return 0 on success, failure otherwise
  */
 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
-                              struct qed_ll2_info *p_params,
+                              struct qed_ll2_conn *p_params,
                               u16 rx_num_desc,
                               u16 tx_num_desc,
                               u8 *p_connection_handle);
index bd4cad2b343b79fb3763b5a743708874a6c6783a..c3c8c5018e9397a43c8f3404b2c9568315ac7c25 100644 (file)
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        struct qed_roce_ll2_info *roce_ll2;
-       struct qed_ll2_info ll2_params;
+       struct qed_ll2_conn ll2_params;
        int rc;
 
        if (!params) {
index 89ac1e3f617599238d35fa444ff66c20abd48dc6..301f48755093bb084ce7741b24091b4c14eea67b 100644 (file)
@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
        .get_mdio_data = ravb_get_mdio_data,
 };
 
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+       struct ravb_private *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &priv->stats[q];
+       struct ravb_tx_desc *desc;
+       int free_num = 0;
+       int entry;
+       u32 size;
+
+       for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+               bool txed;
+
+               entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+                                            NUM_TX_DESC);
+               desc = &priv->tx_ring[q][entry];
+               txed = desc->die_dt == DT_FEMPTY;
+               if (free_txed_only && !txed)
+                       break;
+               /* Descriptor type must be checked before all other reads */
+               dma_rmb();
+               size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+               /* Free the original skb. */
+               if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+                       dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+                                        size, DMA_TO_DEVICE);
+                       /* Last packet descriptor? */
+                       if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+                               entry /= NUM_TX_DESC;
+                               dev_kfree_skb_any(priv->tx_skb[q][entry]);
+                               priv->tx_skb[q][entry] = NULL;
+                               if (txed)
+                                       stats->tx_packets++;
+                       }
+                       free_num++;
+               }
+               if (txed)
+                       stats->tx_bytes += size;
+               desc->die_dt = DT_EEMPTY;
+       }
+       return free_num;
+}
+
 /* Free skb's and DMA buffers for Ethernet AVB */
 static void ravb_ring_free(struct net_device *ndev, int q)
 {
@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        kfree(priv->rx_skb[q]);
        priv->rx_skb[q] = NULL;
 
-       /* Free TX skb ringbuffer */
-       if (priv->tx_skb[q]) {
-               for (i = 0; i < priv->num_tx_ring[q]; i++)
-                       dev_kfree_skb(priv->tx_skb[q][i]);
-       }
-       kfree(priv->tx_skb[q]);
-       priv->tx_skb[q] = NULL;
-
        /* Free aligned TX buffers */
        kfree(priv->tx_align[q]);
        priv->tx_align[q] = NULL;
 
        if (priv->rx_ring[q]) {
+               for (i = 0; i < priv->num_rx_ring[q]; i++) {
+                       struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+                       if (!dma_mapping_error(ndev->dev.parent,
+                                              le32_to_cpu(desc->dptr)))
+                               dma_unmap_single(ndev->dev.parent,
+                                                le32_to_cpu(desc->dptr),
+                                                PKT_BUF_SZ,
+                                                DMA_FROM_DEVICE);
+               }
                ring_size = sizeof(struct ravb_ex_rx_desc) *
                            (priv->num_rx_ring[q] + 1);
                dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        }
 
        if (priv->tx_ring[q]) {
+               ravb_tx_free(ndev, q, false);
+
                ring_size = sizeof(struct ravb_tx_desc) *
                            (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
                dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
                                  priv->tx_desc_dma[q]);
                priv->tx_ring[q] = NULL;
        }
+
+       /* Free TX skb ringbuffer.
+        * SKBs are freed by ravb_tx_free() call above.
+        */
+       kfree(priv->tx_skb[q]);
+       priv->tx_skb[q] = NULL;
 }
 
 /* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
        return 0;
 }
 
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats[q];
-       struct ravb_tx_desc *desc;
-       int free_num = 0;
-       int entry;
-       u32 size;
-
-       for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-               entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
-                                            NUM_TX_DESC);
-               desc = &priv->tx_ring[q][entry];
-               if (desc->die_dt != DT_FEMPTY)
-                       break;
-               /* Descriptor type must be checked before all other reads */
-               dma_rmb();
-               size = le16_to_cpu(desc->ds_tagl) & TX_DS;
-               /* Free the original skb. */
-               if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
-                       dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
-                                        size, DMA_TO_DEVICE);
-                       /* Last packet descriptor? */
-                       if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
-                               entry /= NUM_TX_DESC;
-                               dev_kfree_skb_any(priv->tx_skb[q][entry]);
-                               priv->tx_skb[q][entry] = NULL;
-                               stats->tx_packets++;
-                       }
-                       free_num++;
-               }
-               stats->tx_bytes += size;
-               desc->die_dt = DT_EEMPTY;
-       }
-       return free_num;
-}
-
 static void ravb_get_tx_tstamp(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
                        spin_lock_irqsave(&priv->lock, flags);
                        /* Clear TX interrupt */
                        ravb_write(ndev, ~mask, TIS);
-                       ravb_tx_free(ndev, q);
+                       ravb_tx_free(ndev, q, true);
                        netif_wake_subqueue(ndev, q);
                        mmiowb();
                        spin_unlock_irqrestore(&priv->lock, flags);
@@ -1567,7 +1582,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        priv->cur_tx[q] += NUM_TX_DESC;
        if (priv->cur_tx[q] - priv->dirty_tx[q] >
-           (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+           (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+           !ravb_tx_free(ndev, q, true))
                netif_stop_subqueue(ndev, q);
 
 exit:
index 8b6810bad54b73fc90da579521e1e20050933aac..99d3df788ce81e6f423c7458466812607df68ed1 100644 (file)
@@ -69,7 +69,6 @@ struct gtp_dev {
        struct socket           *sock0;
        struct socket           *sock1u;
 
-       struct net              *net;
        struct net_device       *dev;
 
        unsigned int            hash_size;
@@ -316,7 +315,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
 
        netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
 
-       xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+       xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
 
        switch (udp_sk(sk)->encap_type) {
        case UDP_ENCAP_GTP0:
@@ -612,7 +611,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                                    pktinfo.fl4.saddr, pktinfo.fl4.daddr,
                                    pktinfo.iph->tos,
                                    ip4_dst_hoplimit(&pktinfo.rt->dst),
-                                   htons(IP_DF),
+                                   0,
                                    pktinfo.gtph_port, pktinfo.gtph_port,
                                    true, false);
                break;
@@ -658,7 +657,7 @@ static void gtp_link_setup(struct net_device *dev)
 static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
 static void gtp_hashtable_free(struct gtp_dev *gtp);
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-                           int fd_gtp0, int fd_gtp1, struct net *src_net);
+                           int fd_gtp0, int fd_gtp1);
 
 static int gtp_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +674,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
        fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
        fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
 
-       err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+       err = gtp_encap_enable(dev, gtp, fd0, fd1);
        if (err < 0)
                goto out_err;
 
@@ -821,7 +820,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
 }
 
 static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
-                           int fd_gtp0, int fd_gtp1, struct net *src_net)
+                           int fd_gtp0, int fd_gtp1)
 {
        struct udp_tunnel_sock_cfg tuncfg = {NULL};
        struct socket *sock0, *sock1u;
@@ -858,7 +857,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
 
        gtp->sock0 = sock0;
        gtp->sock1u = sock1u;
-       gtp->net = src_net;
 
        tuncfg.sk_user_data = gtp;
        tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1374,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
 MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
 MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
index 5c26653eceb5660c0cd12b1a6669a58525fe7207..4026185658381df004a7d641e2be7bcb9a45b509 100644 (file)
@@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
                        return -EINVAL;
 
                if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
-                                           macvtap_is_little_endian(q)))
+                                           macvtap_is_little_endian(q), true))
                        BUG();
 
                if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
index e741bf614c4eeca4f4c041bff53880a5ca8c7439..b0492ef2cdaa0d360928e3e65a14808e336c40ec 100644 (file)
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
 MODULE_LICENSE("GPL");
 
+static int bcm63xx_config_intr(struct phy_device *phydev)
+{
+       int reg, err;
+
+       reg = phy_read(phydev, MII_BCM63XX_IR);
+       if (reg < 0)
+               return reg;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+               reg &= ~MII_BCM63XX_IR_GMASK;
+       else
+               reg |= MII_BCM63XX_IR_GMASK;
+
+       err = phy_write(phydev, MII_BCM63XX_IR, reg);
+       return err;
+}
+
 static int bcm63xx_config_init(struct phy_device *phydev)
 {
        int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = bcm_phy_ack_intr,
-       .config_intr    = bcm_phy_config_intr,
+       .config_intr    = bcm63xx_config_intr,
 }, {
        /* same phy as above, with just a different OUI */
        .phy_id         = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = bcm_phy_ack_intr,
-       .config_intr    = bcm_phy_config_intr,
+       .config_intr    = bcm63xx_config_intr,
 } };
 
 module_phy_driver(bcm63xx_driver);
index 800b39f0627943343c4276de637b30be4692352f..a10d0e7fc5f7010537560552cd822e59fd2d8469 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/phy.h>
 
 #define TI_DP83848C_PHY_ID             0x20005ca0
+#define TI_DP83620_PHY_ID              0x20005ce0
 #define NS_DP83848C_PHY_ID             0x20005c90
 #define TLK10X_PHY_ID                  0x2000a210
 #define TI_DP83822_PHY_ID              0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
 static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
        { TI_DP83848C_PHY_ID, 0xfffffff0 },
        { NS_DP83848C_PHY_ID, 0xfffffff0 },
+       { TI_DP83620_PHY_ID, 0xfffffff0 },
        { TLK10X_PHY_ID, 0xfffffff0 },
        { TI_DP83822_PHY_ID, 0xfffffff0 },
        { }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
 static struct phy_driver dp83848_driver[] = {
        DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
        DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+       DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
        DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
        DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
 };
index b5b73ff4329ada5e98cae3dfd8ad46d9f9c8c234..a3e3733813a798073c8762216eaf919edf219655 100644 (file)
@@ -2094,6 +2094,8 @@ static struct phy_driver marvell_drivers[] = {
                .ack_interrupt = &marvell_ack_interrupt,
                .config_intr = &marvell_config_intr,
                .did_interrupt = &m88e1121_did_interrupt,
+               .get_wol = &m88e1318_get_wol,
+               .set_wol = &m88e1318_set_wol,
                .resume = &marvell_resume,
                .suspend = &marvell_suspend,
                .get_sset_count = marvell_get_sset_count,
index 9a77289109b721ed5190f214bb4d6f22d2098424..e55809c5beb71a6c1a3a0a60420cb3c263f6a33b 100644 (file)
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
        .get_stats      = kszphy_get_stats,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
+}, {
+       .phy_id         = PHY_ID_KSZ8795,
+       .phy_id_mask    = MICREL_PHY_ID_MASK,
+       .name           = "Micrel KSZ8795",
+       .features       = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+       .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+       .config_init    = kszphy_config_init,
+       .config_aneg    = ksz8873mll_config_aneg,
+       .read_status    = ksz8873mll_read_status,
+       .get_sset_count = kszphy_get_sset_count,
+       .get_strings    = kszphy_get_strings,
+       .get_stats      = kszphy_get_stats,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
 } };
 
 module_phy_driver(ksphy_driver);
index 48da6e93c3f783e07f61ae24151e3114ac8dc1ae..7cc1b7dcfe058fe48cb7209499caa1073ad2c1e9 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/mdio.h>
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
  * phy_trigger_machine - trigger the state machine to run
  *
  * @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
  *
  * Description: There has been a change in state which requires that the
  *   state machine runs.
  */
 
-static void phy_trigger_machine(struct phy_device *phydev)
+static void phy_trigger_machine(struct phy_device *phydev, bool sync)
 {
-       cancel_delayed_work_sync(&phydev->state_queue);
+       if (sync)
+               cancel_delayed_work_sync(&phydev->state_queue);
+       else
+               cancel_delayed_work(&phydev->state_queue);
        queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
 }
 
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
        phydev->state = PHY_HALTED;
        mutex_unlock(&phydev->lock);
 
-       phy_trigger_machine(phydev);
+       phy_trigger_machine(phydev, false);
 }
 
 /**
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
        }
 
        /* reschedule state queue work to run as soon as possible */
-       phy_trigger_machine(phydev);
+       phy_trigger_machine(phydev, true);
        return;
 
 ignore:
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
        if (do_resume)
                phy_resume(phydev);
 
-       phy_trigger_machine(phydev);
+       phy_trigger_machine(phydev, true);
 }
 EXPORT_SYMBOL(phy_start);
 
index fa62bdf2f52694dece215d2b62e548026e482c38..94ca42e630bbead0c4fcae0b2ef6c8b19296bb69 100644 (file)
@@ -12,6 +12,7 @@
  */
 #include <linux/leds.h>
 #include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
 #include <linux/netdevice.h>
 
 static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
                                            sizeof(struct phy_led_trigger) *
                                                   phy->phy_num_led_triggers,
                                            GFP_KERNEL);
-       if (!phy->phy_led_triggers)
-               return -ENOMEM;
+       if (!phy->phy_led_triggers) {
+               err = -ENOMEM;
+               goto out_clear;
+       }
 
        for (i = 0; i < phy->phy_num_led_triggers; i++) {
                err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
        while (i--)
                phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
        devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+out_clear:
+       phy->phy_num_led_triggers = 0;
        return err;
 }
 EXPORT_SYMBOL_GPL(phy_led_triggers_register);
index 13890ac3cb37dc9ebe07caf432181bda9d6e038b..8a7d6b905362389ace72731c2a8e1f163c298c47 100644 (file)
@@ -1396,7 +1396,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                        return -EINVAL;
 
                if (virtio_net_hdr_from_skb(skb, &gso,
-                                           tun_is_little_endian(tun))) {
+                                           tun_is_little_endian(tun), true)) {
                        struct skb_shared_info *sinfo = skb_shinfo(skb);
                        pr_err("unexpected GSO type: "
                               "0x%x, gso_size %d, hdr_len %d\n",
index 620ba8e530b58d612f9de4447124dc5ae630ed82..f5552aaaa77a59bf558da6c22218a919bf99ec94 100644 (file)
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
 #define SAMSUNG_VENDOR_ID      0x04e8
 #define LENOVO_VENDOR_ID       0x17ef
 #define NVIDIA_VENDOR_ID       0x0955
+#define HP_VENDOR_ID           0x03f0
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* AnyDATA ADU960S - handled by qmi_wwan */
 {
        USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
index 6fe1cdb0174f6cf91f8445dbe86a3e977d327255..24d5272cdce51091a26a116205d8ca2ae5be03d9 100644 (file)
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
                                              USB_CDC_PROTO_NONE),
                .driver_info        = (unsigned long)&qmi_wwan_info,
        },
+       {       /* HP lt2523 (Novatel E371) */
+               USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
+                                             USB_CLASS_COMM,
+                                             USB_CDC_SUBCLASS_ETHERNET,
+                                             USB_CDC_PROTO_NONE),
+               .driver_info        = (unsigned long)&qmi_wwan_info,
+       },
        {       /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
                USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
                .driver_info = (unsigned long)&qmi_wwan_info,
index d59d7737708b1c4eb8e4d2ea0ce3137f06f7a752..986243c932ccd6fe19c592805c1c63274f5e5555 100644 (file)
@@ -32,7 +32,7 @@
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "6"
+#define NET_VERSION            "8"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                if (!list_empty(&tp->rx_done))
                        napi_schedule(napi);
+               else if (!skb_queue_empty(&tp->tx_queue) &&
+                        !list_empty(&tp->tx_free))
+                       napi_schedule(napi);
        }
 
        return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
                if (!netif_carrier_ok(netdev)) {
                        tp->rtl_ops.enable(tp);
                        set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+                       netif_stop_queue(netdev);
                        napi_disable(&tp->napi);
                        netif_carrier_on(netdev);
                        rtl_start_rx(tp);
                        napi_enable(&tp->napi);
+                       netif_wake_queue(netdev);
+                       netif_info(tp, link, netdev, "carrier on\n");
                }
        } else {
                if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
                        napi_disable(&tp->napi);
                        tp->rtl_ops.disable(tp);
                        napi_enable(&tp->napi);
+                       netif_info(tp, link, netdev, "carrier off\n");
                }
        }
 }
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
        if (!netif_running(netdev))
                return 0;
 
+       netif_stop_queue(netdev);
        napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
        if (netif_carrier_ok(netdev)) {
-               netif_stop_queue(netdev);
                mutex_lock(&tp->control);
                tp->rtl_ops.disable(tp);
                mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
        if (netif_carrier_ok(netdev)) {
                mutex_lock(&tp->control);
                tp->rtl_ops.enable(tp);
+               rtl_start_rx(tp);
                rtl8152_set_rx_mode(netdev);
                mutex_unlock(&tp->control);
-               netif_wake_queue(netdev);
        }
 
        napi_enable(&tp->napi);
+       netif_wake_queue(netdev);
+       usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+       if (!list_empty(&tp->rx_done))
+               napi_schedule(&tp->napi);
 
        return 0;
 }
@@ -3572,6 +3584,8 @@ static bool delay_autosuspend(struct r8152 *tp)
         */
        if (!sw_linking && tp->rtl_ops.in_nway(tp))
                return true;
+       else if (!skb_queue_empty(&tp->tx_queue))
+               return true;
        else
                return false;
 }
@@ -3581,10 +3595,15 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
        struct net_device *netdev = tp->netdev;
        int ret = 0;
 
+       set_bit(SELECTIVE_SUSPEND, &tp->flags);
+       smp_mb__after_atomic();
+
        if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                u32 rcr = 0;
 
                if (delay_autosuspend(tp)) {
+                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                       smp_mb__after_atomic();
                        ret = -EBUSY;
                        goto out1;
                }
@@ -3601,6 +3620,8 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
                        if (!(ocp_data & RXFIFO_EMPTY)) {
                                rxdy_gated_en(tp, false);
                                ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+                               clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                               smp_mb__after_atomic();
                                ret = -EBUSY;
                                goto out1;
                        }
@@ -3620,8 +3641,6 @@ static int rtl8152_runtime_suspend(struct r8152 *tp)
                }
        }
 
-       set_bit(SELECTIVE_SUSPEND, &tp->flags);
-
 out1:
        return ret;
 }
@@ -3677,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
        if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        tp->rtl_ops.autosuspend_en(tp, false);
-                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        napi_disable(&tp->napi);
                        set_bit(WORK_ENABLE, &tp->flags);
                        if (netif_carrier_ok(tp->netdev))
                                rtl_start_rx(tp);
                        napi_enable(&tp->napi);
+                       clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+                       smp_mb__after_atomic();
+                       if (!list_empty(&tp->rx_done))
+                               napi_schedule(&tp->napi);
                } else {
                        tp->rtl_ops.up(tp);
                        netif_carrier_off(tp->netdev);
index f9bf94887ff159b187cfe547b0601298d4e7f1f0..bd22cf306a9267cecf8c7ce03dccff8ebd16089e 100644 (file)
@@ -49,8 +49,16 @@ module_param(gso, bool, 0444);
  */
 DECLARE_EWMA(pkt_len, 1, 64)
 
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
 /* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+                                  1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
 
 #define VIRTNET_DRIVER_VERSION "1.0.0"
 
@@ -1110,7 +1118,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                hdr = skb_vnet_hdr(skb);
 
        if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
-                                   virtio_is_little_endian(vi->vdev)))
+                                   virtio_is_little_endian(vi->vdev), false))
                BUG();
 
        if (vi->mergeable_rx_bufs)
@@ -1710,6 +1718,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
        u16 xdp_qp = 0, curr_qp;
        int i, err;
 
+       if (prog && prog->xdp_adjust_head) {
+               netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
            virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1893,8 +1906,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
                        put_page(vi->rq[i].alloc_frag.page);
 }
 
-static bool is_xdp_queue(struct virtnet_info *vi, int q)
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
 {
+       /* For small receive mode always use kfree_skb variants */
+       if (!vi->mergeable_rx_bufs)
+               return false;
+
        if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
                return false;
        else if (q < vi->curr_queue_pairs)
@@ -1911,7 +1928,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
        for (i = 0; i < vi->max_queue_pairs; i++) {
                struct virtqueue *vq = vi->sq[i].vq;
                while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
-                       if (!is_xdp_queue(vi, i))
+                       if (!is_xdp_raw_buffer_queue(vi, i))
                                dev_kfree_skb(buf);
                        else
                                put_page(virt_to_head_page(buf));
index 19b1653e1bd66f661bc78742f0a77f756873c9ce..2e48ce22eabfef6dbd8ca3497731b42257264ba9 100644 (file)
@@ -2268,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
                                = container_of(p, struct vxlan_fdb, hlist);
                        unsigned long timeout;
 
-                       if (f->state & NUD_PERMANENT)
+                       if (f->state & (NUD_PERMANENT | NUD_NOARP))
                                continue;
 
                        timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2354,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
 }
 
 /* Purge the forwarding table */
-static void vxlan_flush(struct vxlan_dev *vxlan)
+static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
 {
        unsigned int h;
 
@@ -2364,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
                hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
                        struct vxlan_fdb *f
                                = container_of(p, struct vxlan_fdb, hlist);
+                       if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
+                               continue;
                        /* the all_zeros_mac entry is deleted at vxlan_uninit */
                        if (!is_zero_ether_addr(f->eth_addr))
                                vxlan_fdb_destroy(vxlan, f);
@@ -2385,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
 
        del_timer_sync(&vxlan->age_timer);
 
-       vxlan_flush(vxlan);
+       vxlan_flush(vxlan, false);
        vxlan_sock_release(vxlan);
 
        return ret;
@@ -2890,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        memcpy(&vxlan->cfg, conf, sizeof(*conf));
        if (!vxlan->cfg.dst_port) {
                if (conf->flags & VXLAN_F_GPE)
-                       vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+                       vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
                else
                        vxlan->cfg.dst_port = default_port;
        }
@@ -3058,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
+       vxlan_flush(vxlan, true);
+
        spin_lock(&vn->sock_lock);
        if (!hlist_unhashed(&vxlan->hlist))
                hlist_del_rcu(&vxlan->hlist);
index e30ffd29b7e913f2514c4c925979ffd474190acc..579521327b0357d87dcc1baa4e0ff801e1401fe4 100644 (file)
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->num_queues;
        unsigned long rx_bytes = 0;
        unsigned long rx_packets = 0;
        unsigned long tx_bytes = 0;
        unsigned long tx_packets = 0;
        unsigned int index;
 
+       spin_lock(&vif->lock);
        if (vif->queues == NULL)
                goto out;
 
        /* Aggregate tx and rx stats from each queue */
-       for (index = 0; index < num_queues; ++index) {
+       for (index = 0; index < vif->num_queues; ++index) {
                queue = &vif->queues[index];
                rx_bytes += queue->stats.rx_bytes;
                rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
        }
 
 out:
+       spin_unlock(&vif->lock);
+
        vif->dev->stats.rx_bytes = rx_bytes;
        vif->dev->stats.rx_packets = rx_packets;
        vif->dev->stats.tx_bytes = tx_bytes;
index 3124eaec942745fe9eb6cd4a49ee8bc633d80e67..85b742e1c42fa75bc771db4e8b91f80f3fe68d75 100644 (file)
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
 static void backend_disconnect(struct backend_info *be)
 {
        if (be->vif) {
+               unsigned int queue_index;
+
                xen_unregister_watchers(be->vif);
 #ifdef CONFIG_DEBUG_FS
                xenvif_debugfs_delif(be->vif);
 #endif /* CONFIG_DEBUG_FS */
                xenvif_disconnect_data(be->vif);
+               for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+                       xenvif_deinit_queue(&be->vif->queues[queue_index]);
+
+               spin_lock(&be->vif->lock);
+               vfree(be->vif->queues);
+               be->vif->num_queues = 0;
+               be->vif->queues = NULL;
+               spin_unlock(&be->vif->lock);
+
                xenvif_disconnect_ctrl(be->vif);
        }
 }
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
 err:
        if (be->vif->num_queues > 0)
                xenvif_disconnect_data(be->vif); /* Clean up existing queues */
+       for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+               xenvif_deinit_queue(&be->vif->queues[queue_index]);
        vfree(be->vif->queues);
        be->vif->queues = NULL;
        be->vif->num_queues = 0;
index 40f26b69beb11459f0566fc1d1d739aa75e643bf..2c7c29fa268d6eb1e7a865590edd5c01f087b4a8 100644 (file)
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
        queue->rx.req_prod_pvt = req_prod;
 
        /* Not enough requests? Try again later. */
-       if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+       if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
                mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
                return;
        }
index 6307088b375f2d899002f9fc9fae16c387357598..a518cb1b59d4238b675fccd695f45003af380296 100644 (file)
@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 {
        resource_size_t allocated = 0, available = 0;
        struct nd_region *nd_region = to_nd_region(dev->parent);
+       struct nd_namespace_common *ndns = to_ndns(dev);
        struct nd_mapping *nd_mapping;
        struct nvdimm_drvdata *ndd;
        struct nd_label_id label_id;
@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
        u8 *uuid = NULL;
        int rc, i;
 
-       if (dev->driver || to_ndns(dev)->claim)
+       if (dev->driver || ndns->claim)
                return -EBUSY;
 
        if (is_namespace_pmem(dev)) {
@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
                nd_namespace_pmem_set_resource(nd_region, nspm,
                                val * nd_region->ndr_mappings);
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               /*
-                * Try to delete the namespace if we deleted all of its
-                * allocation, this is not the seed device for the
-                * region, and it is not actively claimed by a btt
-                * instance.
-                */
-               if (val == 0 && nd_region->ns_seed != dev
-                               && !nsblk->common.claim)
-                       nd_device_unregister(dev, ND_ASYNC);
        }
 
+       /*
+        * Try to delete the namespace if we deleted all of its
+        * allocation, this is not the seed device for the region, and
+        * it is not actively claimed by a btt instance.
+        */
+       if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+               nd_device_unregister(dev, ND_ASYNC);
+
        return rc;
 }
 
index 7282d7495bf1f0a1bf6685012dafb1d9cc60bfa4..5b536be5a12eb97023745a59f65283280b7b3675 100644 (file)
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
 
        rc = memcpy_from_pmem(mem + off, pmem_addr, len);
        kunmap_atomic(mem);
-       return rc;
+       if (rc)
+               return -EIO;
+       return 0;
 }
 
 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
index fcc9dcfdf67517d1352bef388a50d2dbf6c9a129..e65041c640cbc5bad3c284b77690605f071edfc3 100644 (file)
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
                return 0;
 
        freq->sg_table.sgl = freq->first_sgl;
-       ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
-                       freq->sg_table.sgl);
+       ret = sg_alloc_table_chained(&freq->sg_table,
+                       blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
        if (ret)
                return -ENOMEM;
 
        op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
-       WARN_ON(op->nents > rq->nr_phys_segments);
+       WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
        dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, dir);
index 6f5074153dcd9ea921f35751bbd622c18f227e49..be8c800078e2a6cbffa43208aa0326994d8e4bf5 100644 (file)
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
 {
        struct nvmet_subsys *subsys = to_subsys(item);
 
+       nvmet_subsys_del_ctrls(subsys);
        nvmet_subsys_put(subsys);
 }
 
index b1d66ed655c9ec36261fb4d31bca06555cccca39..fc5ba2f9e15f47fe8bd13795bdb9d6caaa532b93 100644 (file)
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
        pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
                ctrl->cntlid, ctrl->kato);
 
-       ctrl->ops->delete_ctrl(ctrl);
+       nvmet_ctrl_fatal_error(ctrl);
 }
 
 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
        list_del(&ctrl->subsys_entry);
        mutex_unlock(&subsys->lock);
 
+       flush_work(&ctrl->async_event_work);
+       cancel_work_sync(&ctrl->fatal_err_work);
+
        ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
        nvmet_subsys_put(subsys);
 
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
        kfree(subsys);
 }
 
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+       struct nvmet_ctrl *ctrl;
+
+       mutex_lock(&subsys->lock);
+       list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+               ctrl->ops->delete_ctrl(ctrl);
+       mutex_unlock(&subsys->lock);
+}
+
 void nvmet_subsys_put(struct nvmet_subsys *subsys)
 {
        kref_put(&subsys->ref, nvmet_subsys_free);
index 173e842f19c975a4a849c4120fcfcdcee73c26c1..ba57f9852bde33b0ff3d0655d4c08313632a3a8f 100644 (file)
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                        (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
        struct fcnvme_ls_disconnect_acc *acc =
                        (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
-       struct nvmet_fc_tgt_queue *queue;
+       struct nvmet_fc_tgt_queue *queue = NULL;
        struct nvmet_fc_tgt_assoc *assoc;
        int ret = 0;
        bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                assoc = nvmet_fc_find_target_assoc(tgtport,
                                be64_to_cpu(rqst->associd.association_id));
                iod->assoc = assoc;
-               if (!assoc)
+               if (assoc) {
+                       if (rqst->discon_cmd.scope ==
+                                       FCNVME_DISCONN_CONNECTION) {
+                               queue = nvmet_fc_find_target_queue(tgtport,
+                                               be64_to_cpu(
+                                                       rqst->discon_cmd.id));
+                               if (!queue) {
+                                       nvmet_fc_tgt_a_put(assoc);
+                                       ret = VERR_NO_CONN;
+                               }
+                       }
+               } else
                        ret = VERR_NO_ASSOC;
        }
 
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                        FCNVME_LS_DISCONNECT);
 
 
-       if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
-               queue = nvmet_fc_find_target_queue(tgtport,
-                                       be64_to_cpu(rqst->discon_cmd.id));
-               if (queue) {
-                       int qid = queue->qid;
+       /* are we to delete a Connection ID (queue) */
+       if (queue) {
+               int qid = queue->qid;
 
-                       nvmet_fc_delete_target_queue(queue);
+               nvmet_fc_delete_target_queue(queue);
 
-                       /* release the get taken by find_target_queue */
-                       nvmet_fc_tgt_q_put(queue);
+               /* release the get taken by find_target_queue */
+               nvmet_fc_tgt_q_put(queue);
 
-                       /* tear association down if io queue terminated */
-                       if (!qid)
-                               del_assoc = true;
-               }
+               /* tear association down if io queue terminated */
+               if (!qid)
+                       del_assoc = true;
        }
 
        /* release get taken in nvmet_fc_find_target_assoc */
index 23d5eb1c944f64c485fef8551a41a72151492f2c..cc7ad06b43a78a029dd76fac575c3f6ee57c9e92 100644 (file)
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
                enum nvme_subsys_type type);
 void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
 
 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
 void nvmet_put_namespace(struct nvmet_ns *ns);
index 8c3760a78ac080af522afb5892d471063243bd7b..60990220bd831074bc3c8fdbd044ee3aed37a2db 100644 (file)
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 {
        struct ib_recv_wr *bad_wr;
 
+       ib_dma_sync_single_for_device(ndev->device,
+               cmd->sge[0].addr, cmd->sge[0].length,
+               DMA_FROM_DEVICE);
+
        if (ndev->srq)
                return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
        return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
                first_wr = &rsp->send_wr;
 
        nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+       ib_dma_sync_single_for_device(rsp->queue->dev->device,
+               rsp->send_sge.addr, rsp->send_sge.length,
+               DMA_TO_DEVICE);
+
        if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
                pr_err("sending cmd response failed\n");
                nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
        cmd->n_rdma = 0;
        cmd->req.port = queue->port;
 
+
+       ib_dma_sync_single_for_cpu(queue->dev->device,
+               cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+               DMA_FROM_DEVICE);
+       ib_dma_sync_single_for_cpu(queue->dev->device,
+               cmd->send_sge.addr, cmd->send_sge.length,
+               DMA_TO_DEVICE);
+
        if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
                        &queue->nvme_sq, &nvmet_rdma_ops))
                return;
index 1f38d0836751af9f823d848218d7dc17d9dad43f..f1b633bce525f9752bcb2cba15a5e1e770c89c0f 100644 (file)
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
 
        rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
                               xgene_msi_hwirq_alloc, NULL);
-       if (rc)
+       if (rc < 0)
                goto err_cpuhp;
        pci_xgene_online = rc;
        rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
index bed19994c1e94d4e32c134e58133c4acd8b8bd88..af8f6e92e8851ca84b459e4587508960b225d336 100644 (file)
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
 {
        u32 val;
 
-       /* get iATU unroll support */
-       pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
-       dev_dbg(pp->dev, "iATU unroll: %s\n",
-               pp->iatu_unroll_enabled ? "enabled" : "disabled");
-
        /* set the number of lanes */
        val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
        val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
         * we should not program the ATU here.
         */
        if (!pp->ops->rd_other_conf) {
+               /* get iATU unroll support */
+               pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+               dev_dbg(pp->dev, "iATU unroll: %s\n",
+                       pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
                dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
                                          PCIE_ATU_TYPE_MEM, pp->mem_base,
                                          pp->mem_bus_addr, pp->mem_size);
index e164b5c9f0f03d953e825150977fe0c571b40690..204960e70333f1dbe49d03ffffac7393f06a37c4 100644 (file)
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
        if (!pos)
                return;
+
        pdev->pcie_cap = pos;
        pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
        pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
 
        /*
-        * A Root Port is always the upstream end of a Link.  No PCIe
-        * component has two Links.  Two Links are connected by a Switch
-        * that has a Port on each Link and internal logic to connect the
-        * two Ports.
+        * A Root Port or a PCI-to-PCIe bridge is always the upstream end
+        * of a Link.  No PCIe component has two Links.  Two Links are
+        * connected by a Switch that has a Port on each Link and internal
+        * logic to connect the two Ports.
         */
        type = pci_pcie_type(pdev);
-       if (type == PCI_EXP_TYPE_ROOT_PORT)
+       if (type == PCI_EXP_TYPE_ROOT_PORT ||
+           type == PCI_EXP_TYPE_PCIE_BRIDGE)
                pdev->has_secondary_link = 1;
        else if (type == PCI_EXP_TYPE_UPSTREAM ||
                 type == PCI_EXP_TYPE_DOWNSTREAM) {
index 37300634b7d2c853a05f5dcecce6b9d46d187b59..c123488266ce74883ed8ba972b43103d136bb66e 100644 (file)
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
        enum pin_config_param param = pinconf_to_config_param(*config);
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, pull, val, debounce;
        u16 arg = 0;
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                        return -EINVAL;
 
                raw_spin_lock_irqsave(&vg->lock, flags);
-               debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+               debounce = readl(db_reg);
                raw_spin_unlock_irqrestore(&vg->lock, flags);
 
                switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
        unsigned int param, arg;
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, val, debounce;
        int i, ret = 0;
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 
                        break;
                case PIN_CONFIG_INPUT_DEBOUNCE:
-                       debounce = readl(byt_gpio_reg(vg, offset,
-                                                     BYT_DEBOUNCE_REG));
-                       conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+                       debounce = readl(db_reg);
+                       debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
                        switch (arg) {
+                       case 0:
+                               conf &= BYT_DEBOUNCE_EN;
+                               break;
                        case 375:
-                               conf |= BYT_DEBOUNCE_PULSE_375US;
+                               debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
                        case 750:
-                               conf |= BYT_DEBOUNCE_PULSE_750US;
+                               debounce |= BYT_DEBOUNCE_PULSE_750US;
                                break;
                        case 1500:
-                               conf |= BYT_DEBOUNCE_PULSE_1500US;
+                               debounce |= BYT_DEBOUNCE_PULSE_1500US;
                                break;
                        case 3000:
-                               conf |= BYT_DEBOUNCE_PULSE_3MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_3MS;
                                break;
                        case 6000:
-                               conf |= BYT_DEBOUNCE_PULSE_6MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_6MS;
                                break;
                        case 12000:
-                               conf |= BYT_DEBOUNCE_PULSE_12MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_12MS;
                                break;
                        case 24000:
-                               conf |= BYT_DEBOUNCE_PULSE_24MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
                                ret = -EINVAL;
                        }
 
+                       if (!ret)
+                               writel(debounce, db_reg);
                        break;
                default:
                        ret = -ENOTSUPP;
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
+       struct gpio_chip *gc = &vg->chip;
+       struct device *dev = &vg->pdev->dev;
        void __iomem *reg;
        u32 base, value;
        int i;
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
                }
 
                value = readl(reg);
-               if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
-                   !(value & BYT_DIRECT_IRQ_EN)) {
+               if (value & BYT_DIRECT_IRQ_EN) {
+                       clear_bit(i, gc->irq_valid_mask);
+                       dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+               } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
                        byt_gpio_clear_triggering(vg, i);
-                       dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+                       dev_dbg(dev, "disabling GPIO %d\n", i);
                }
        }
 
@@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        gc->can_sleep   = false;
        gc->parent      = &vg->pdev->dev;
        gc->ngpio       = vg->soc_data->npins;
+       gc->irq_need_valid_mask = true;
 
 #ifdef CONFIG_PM_SLEEP
        vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
index 59cb7a6fc5bef316d042f93da72792edca2ea8d9..901b356b09d71679a2b4a03f7cd57b30a22fa6f4 100644 (file)
@@ -19,7 +19,7 @@
 
 #define BXT_PAD_OWN    0x020
 #define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
 #define BXT_GPI_IE     0x110
 
 #define BXT_COMMUNITY(s, e)                            \
index 1e139672f1af9da0fa7ff4af1a919395e2ea6957..6df35dcb29aea68c0ddec6cbd29bb1c9a3abd56c 100644 (file)
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
        return 0;
 }
 
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+       u32 value;
+
+       value = readl(padcfg0);
+       if (input) {
+               value &= ~PADCFG0_GPIORXDIS;
+               value |= PADCFG0_GPIOTXDIS;
+       } else {
+               value &= ~PADCFG0_GPIOTXDIS;
+               value |= PADCFG0_GPIORXDIS;
+       }
+       writel(value, padcfg0);
+}
+
 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
                                     struct pinctrl_gpio_range *range,
                                     unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
        /* Disable SCI/SMI/NMI generation */
        value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
        value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
-       /* Disable TX buffer and enable RX (this will be input) */
-       value &= ~PADCFG0_GPIORXDIS;
-       value |= PADCFG0_GPIOTXDIS;
        writel(value, padcfg0);
 
+       /* Disable TX buffer and enable RX (this will be input) */
+       __intel_gpio_set_direction(padcfg0, true);
+
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
        return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
        struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *padcfg0;
        unsigned long flags;
-       u32 value;
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
-       value = readl(padcfg0);
-       if (input)
-               value |= PADCFG0_GPIOTXDIS;
-       else
-               value &= ~PADCFG0_GPIOTXDIS;
-       writel(value, padcfg0);
+       __intel_gpio_set_direction(padcfg0, input);
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
index c3928aa3fefa9a1d24b0214e877bbac2bc15f67e..e0bca4df2a2f3188da0d559a29013893a5bea528 100644 (file)
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_13, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index 25694f7094c714bbf35eee2ae7b51e2b4ce5b1e9..b69743b07a1d591ace36d410583231319234d4f0 100644 (file)
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_9, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index c9a146948192dba19ca5da1587791c25b315d628..537b52055756645a8f225dd7e96b191d7d841e96 100644 (file)
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
                        i = 128;
                        pin_num = AMD_GPIO_PINS_BANK2 + i;
                        break;
+               default:
+                       return;
                }
 
                for (; i < pin_num; i++) {
index aa8bd9794683b715013c82aa9220d11cfb0ea595..96686336e3a396254b9473f01f1776e0297301ce 100644 (file)
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                          0, 0, 0, 0};
 static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
                                           41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
 static const unsigned i2c0_pins[] = {63, 64};
 static const int i2c0_muxvals[] = {0, 0};
 static const unsigned i2c1_pins[] = {65, 66};
index 410741acb3c92dabe36417800f564a943c5d42ec..f46ece2ce3c4d48086c73b0e2d0c63ee1fe35893 100644 (file)
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                        case 8:
                        case 7:
                        case 6:
+                       case 1:
                                ideapad_input_report(priv, vpc_bit);
                                break;
                        case 5:
index 1fc0de870ff826e8b90956ab557cc83008e1ce68..361770568ad03a6e7a3bc7e6d579ccacae1725b7 100644 (file)
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_KEY, KEY_POWER);
 
-       error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+       error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
                                     DRIVER_NAME, input);
        if (error) {
                dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
index 97b4c3a219c0c79f3a3ed9359bc30bb13a439ce6..25f15df5c2d7b3c37b82e099f301831c403caa7d 100644 (file)
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
        return 0;
 
 fail_platform_mux_register:
-       for (i--; i > 0 ; i--)
+       while (--i >= 0)
                platform_device_unregister(priv->pdev_mux[i]);
        platform_device_unregister(priv->pdev_i2c);
 fail_alloc:
index cbf4d83a727106ee0f7e42ca1b868616d994c0c3..25b176996cb793a789214a1a1237910b01cd3673 100644 (file)
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
 
 static int s3_wmi_check_platform_device(struct device *dev, void *data)
 {
-       struct acpi_device *adev, *ts_adev;
+       struct acpi_device *adev, *ts_adev = NULL;
        acpi_handle handle;
        acpi_status status;
 
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
 {
        s3_wmi_send_lid_state();
        return 0;
 }
-#endif
 static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
 
 static struct platform_driver s3_wmi_driver = {
index 639ed4e6afd19b46d1e8fb3ec5669b217da8d3c4..070c4da95f48c0e9b0dbb7b6bcf008f8d6e5972e 100644 (file)
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
 #define CCW_CMD_WRITE_CONF 0x21
 #define CCW_CMD_WRITE_STATUS 0x31
 #define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
 #define CCW_CMD_SET_IND_ADAPTER 0x73
 #define CCW_CMD_SET_VIRTIO_REV 0x83
 
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
 
 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
         * This may happen on device detach.
         */
        if (ret && (ret != -ENODEV))
-               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
                         ret, index);
 
        vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
 {
        struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       u8 old_status = *vcdev->status;
+       struct ccw1 *ccw;
+
+       if (vcdev->revision < 1)
+               return *vcdev->status;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return old_status;
+
+       ccw->cmd_code = CCW_CMD_READ_STATUS;
+       ccw->flags = 0;
+       ccw->count = sizeof(*vcdev->status);
+       ccw->cda = (__u32)(unsigned long)vcdev->status;
+       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+/*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+ * handler anyway), vcdev->status was not overwritten and we just
+ * return the old status, which is fine.
+*/
+       kfree(ccw);
 
        return *vcdev->status;
 }
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
        kfree(ccw);
 }
 
-static struct virtio_config_ops virtio_ccw_config_ops = {
+static const struct virtio_config_ops virtio_ccw_config_ops = {
        .get_features = virtio_ccw_get_features,
        .finalize_features = virtio_ccw_finalize_features,
        .get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
                case VIRTIO_CCW_DOING_READ_CONFIG:
                case VIRTIO_CCW_DOING_WRITE_CONFIG:
                case VIRTIO_CCW_DOING_WRITE_STATUS:
+               case VIRTIO_CCW_DOING_READ_STATUS:
                case VIRTIO_CCW_DOING_SET_VQ:
                case VIRTIO_CCW_DOING_SET_IND:
                case VIRTIO_CCW_DOING_SET_CONF_IND:
index a9a00169ad91960798c35114c69b4d17f13d1039..b2e8c0dfc79cb247a30e2ec826bc9a64da489e64 100644 (file)
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
        struct bfad_fcxp    *drv_fcxp;
        struct bfa_fcs_lport_s *fcs_port;
        struct bfa_fcs_rport_s *fcs_rport;
-       struct fc_bsg_request *bsg_request = bsg_request;
+       struct fc_bsg_request *bsg_request = job->request;
        struct fc_bsg_reply *bsg_reply = job->reply;
        uint32_t command_type = bsg_request->msgcode;
        unsigned long flags;
index 8fb5c54c7dd3752084cf1063078422b34e65624c..99b747cedbebc517a78714db321743f0837834b6 100644 (file)
@@ -46,6 +46,7 @@
 
 #define        INITIAL_SRP_LIMIT       800
 #define        DEFAULT_MAX_SECTORS     256
+#define MAX_TXU                        1024 * 1024
 
 static uint max_vdma_size = MAX_H_COPY_RDMA;
 
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
        }
 
        info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
-                                 GFP_KERNEL);
+                                 GFP_ATOMIC);
        if (!info) {
                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
                        iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
        info->mad_version = cpu_to_be32(MAD_VERSION_1);
        info->os_type = cpu_to_be32(LINUX);
        memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
-       info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+       info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
 
        dma_wmb();
        rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
        }
 
        cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
-                                GFP_KERNEL);
+                                GFP_ATOMIC);
        if (!cap) {
                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
                        iue->target);
index 236e4e51d1617243d279d09089d95bb50c0a09d6..7b6bd8ed0d0bd6fc8b056052dffbe60595015cbe 100644 (file)
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
                } else {
                        buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
                        lpfc_els_free_data(phba, buf_ptr1);
+                       elsiocb->context2 = NULL;
                }
        }
 
        if (elsiocb->context3) {
                buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
                lpfc_els_free_bpl(phba, buf_ptr);
+               elsiocb->context3 = NULL;
        }
        lpfc_sli_release_iocbq(phba, elsiocb);
        return 0;
index 4faa7672fc1d80add7e603e7bda066e5b98fd34b..a78a3df68f679659eb9d05b24133312a663ab812 100644 (file)
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 
  free_vfi_bmask:
        kfree(phba->sli4_hba.vfi_bmask);
+       phba->sli4_hba.vfi_bmask = NULL;
  free_xri_ids:
        kfree(phba->sli4_hba.xri_ids);
+       phba->sli4_hba.xri_ids = NULL;
  free_xri_bmask:
        kfree(phba->sli4_hba.xri_bmask);
+       phba->sli4_hba.xri_bmask = NULL;
  free_vpi_ids:
        kfree(phba->vpi_ids);
+       phba->vpi_ids = NULL;
  free_vpi_bmask:
        kfree(phba->vpi_bmask);
+       phba->vpi_bmask = NULL;
  free_rpi_ids:
        kfree(phba->sli4_hba.rpi_ids);
+       phba->sli4_hba.rpi_ids = NULL;
  free_rpi_bmask:
        kfree(phba->sli4_hba.rpi_bmask);
+       phba->sli4_hba.rpi_bmask = NULL;
  err_exit:
        return rc;
 }
index 394fe1338d0976a42f183e328dfaed02f540560f..dcb33f4fa68720624945f4bdc3f5c932e530c86f 100644 (file)
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
  * @eedp_enable: eedp support enable bit
  * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
  * @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
  */
 struct MPT3SAS_DEVICE {
        struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
        u8      ignore_delay_remove;
        /* Iopriority Command Handling */
        u8      ncq_prio_enable;
+       /*
+        * Bug workaround for SATL handling: the mpt2/3sas firmware
+        * doesn't return BUSY or TASK_SET_FULL for subsequent
+        * commands while a SATL pass through is in operation as the
+        * spec requires, it simply does nothing with them until the
+        * pass through completes, causing them possibly to timeout if
+        * the passthrough is a long executing command (like format or
+        * secure erase).  This variable allows us to do the right
+        * thing while a SATL command is pending.
+        */
+       unsigned long ata_command_pending;
 
 };
 
index b5c966e319d315474b94703b93ab0343013dd973..75f3fce1c86773299704347fc0960fb5148ea53f 100644 (file)
@@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
        }
 }
 
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
 {
-       return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+       struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+       if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+               return 0;
+
+       if (pending)
+               return test_and_set_bit(0, &priv->ata_command_pending);
+
+       clear_bit(0, &priv->ata_command_pending);
+       return 0;
 }
 
 /**
@@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
                if (!scmd)
                        continue;
                count++;
-               if (ata_12_16_cmd(scmd))
-                       scsi_internal_device_unblock(scmd->device,
-                                                       SDEV_RUNNING);
+               _scsih_set_satl_pending(scmd, false);
                mpt3sas_base_free_smid(ioc, smid);
                scsi_dma_unmap(scmd);
                if (ioc->pci_error_recovery)
@@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        if (ioc->logging_level & MPT_DEBUG_SCSI)
                scsi_print_command(scmd);
 
-       /*
-        * Lock the device for any subsequent command until command is
-        * done.
-        */
-       if (ata_12_16_cmd(scmd))
-               scsi_internal_device_block(scmd->device);
-
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
                scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
                return 0;
        }
 
+       /*
+        * Bug work around for firmware SATL handling.  The loop
+        * is based on atomic operations and ensures consistency
+        * since we're lockless at this point
+        */
+       do {
+               if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+                       scmd->result = SAM_STAT_BUSY;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+       } while (_scsih_set_satl_pending(scmd, true));
+
        sas_target_priv_data = sas_device_priv_data->sas_target;
 
        /* invalid device handle */
@@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        if (scmd == NULL)
                return 1;
 
-       if (ata_12_16_cmd(scmd))
-               scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+       _scsih_set_satl_pending(scmd, false);
 
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 
index 47eb4d545d13c5f9b80149f162b04756108cd654..f201f40996205c1f522cde8b53c6e0a4d02aaec9 100644 (file)
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
        struct qla_hw_data *ha = vha->hw;
        ssize_t rval = 0;
 
+       mutex_lock(&ha->optrom_mutex);
+
        if (ha->optrom_state != QLA_SREADING)
-               return 0;
+               goto out;
 
-       mutex_lock(&ha->optrom_mutex);
        rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
            ha->optrom_region_size);
+
+out:
        mutex_unlock(&ha->optrom_mutex);
 
        return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
            struct device, kobj)));
        struct qla_hw_data *ha = vha->hw;
 
-       if (ha->optrom_state != QLA_SWRITING)
+       mutex_lock(&ha->optrom_mutex);
+
+       if (ha->optrom_state != QLA_SWRITING) {
+               mutex_unlock(&ha->optrom_mutex);
                return -EINVAL;
-       if (off > ha->optrom_region_size)
+       }
+       if (off > ha->optrom_region_size) {
+               mutex_unlock(&ha->optrom_mutex);
                return -ERANGE;
+       }
        if (off + count > ha->optrom_region_size)
                count = ha->optrom_region_size - off;
 
-       mutex_lock(&ha->optrom_mutex);
        memcpy(&ha->optrom_buffer[off], buf, count);
        mutex_unlock(&ha->optrom_mutex);
 
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
        struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
            struct device, kobj)));
        int type;
-       int rval = 0;
        port_id_t did;
 
        type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
 
        ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
 
-       rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+       qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
        return count;
 }
 
index f7df01b76714e09dc919cbb9660b66bed603d6bc..5b1287a63c494b6edf8fcf7e8ec75a17530032db 100644 (file)
@@ -1556,7 +1556,8 @@ typedef struct {
 struct atio {
        uint8_t         entry_type;             /* Entry type. */
        uint8_t         entry_count;            /* Entry count. */
-       uint8_t         data[58];
+       __le16          attr_n_length;
+       uint8_t         data[56];
        uint32_t        signature;
 #define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
 };
@@ -2732,7 +2733,7 @@ struct isp_operations {
 #define QLA_MSIX_FW_MODE(m)    (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
 #define QLA_MSIX_FW_MODE_1(m)  (QLA_MSIX_FW_MODE(m) == 1)
 
-#define QLA_MSIX_DEFAULT               0x00
+#define QLA_BASE_VECTORS       2 /* default + RSP */
 #define QLA_MSIX_RSP_Q                 0x01
 #define QLA_ATIO_VECTOR                0x02
 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q    0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
        uint16_t entry;
        char name[30];
        void *handle;
-       struct irq_affinity_notify irq_notify;
        int cpuid;
 };
 
index 632d5f30386ab0ae529036c292f3c1c8e64162ca..7b6317c8c2e93bef3509c7e3d15fbae080922788 100644 (file)
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
 
        /* Wait for soft-reset to complete. */
        RD_REG_DWORD(&reg->ctrl_status);
-       for (cnt = 0; cnt < 6000000; cnt++) {
+       for (cnt = 0; cnt < 60; cnt++) {
                barrier();
                if ((RD_REG_DWORD(&reg->ctrl_status) &
                    CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
        RD_REG_DWORD(&reg->hccr);
 
        RD_REG_WORD(&reg->mailbox0);
-       for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+       for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
            rval == QLA_SUCCESS; cnt--) {
                barrier();
                if (cnt)
index 5093ca9b02ec52c8e70674f88205941cc0967d9f..dc88a09f9043c9359cba9c276e523571235c1b50 100644 (file)
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
        sts_entry_t *);
-static void qla_irq_affinity_notify(struct irq_affinity_notify *,
-    const cpumask_t *);
-static void qla_irq_affinity_release(struct kref *);
-
 
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
        if (pkt->entry_status & RF_BUSY)
                res = DID_BUS_BUSY << 16;
 
+       if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+           pkt->handle == QLA_TGT_SKIP_HANDLE)
+               return;
+
        sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
        if (sp) {
                sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        if (!vha->flags.online)
                return;
 
-       if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
-               /* if kernel does not notify qla of IRQ's CPU change,
-                * then set it here.
-                */
-               rsp->msix->cpuid = smp_processor_id();
-               ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
-       }
-
        while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
                pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
 
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
 static int
 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
-#define MIN_MSIX_COUNT 2
        int i, ret;
        struct qla_msix_entry *qentry;
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+       struct irq_affinity desc = {
+               .pre_vectors = QLA_BASE_VECTORS,
+       };
+
+       if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
+               desc.pre_vectors++;
+
+       ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
+                       ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+                       &desc);
 
-       ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
-                                   PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
        if (ret < 0) {
                ql_log(ql_log_fatal, vha, 0x00c7,
                    "MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                qentry->have_irq = 0;
                qentry->in_use = 0;
                qentry->handle = NULL;
-               qentry->irq_notify.notify  = qla_irq_affinity_notify;
-               qentry->irq_notify.release = qla_irq_affinity_release;
-               qentry->cpuid = -1;
        }
 
        /* Enable MSI-X vectors for the base queue */
-       for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
+       for (i = 0; i < QLA_BASE_VECTORS; i++) {
                qentry = &ha->msix_entries[i];
                qentry->handle = rsp;
                rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                        goto msix_register_fail;
                qentry->have_irq = 1;
                qentry->in_use = 1;
-
-               /* Register for CPU affinity notification. */
-               irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
-
-               /* Schedule work (ie. trigger a notification) to read cpu
-                * mask for this specific irq.
-                * kref_get is required because
-               * irq_affinity_notify() will do
-               * kref_put().
-               */
-               kref_get(&qentry->irq_notify.kref);
-               schedule_work(&qentry->irq_notify.work);
        }
 
        /*
@@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
        msix->handle = qpair;
        return ret;
 }
-
-
-/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
-static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
-       const cpumask_t *mask)
-{
-       struct qla_msix_entry *e =
-               container_of(notify, struct qla_msix_entry, irq_notify);
-       struct qla_hw_data *ha;
-       struct scsi_qla_host *base_vha;
-       struct rsp_que *rsp = e->handle;
-
-       /* user is recommended to set mask to just 1 cpu */
-       e->cpuid = cpumask_first(mask);
-
-       ha = rsp->hw;
-       base_vha = pci_get_drvdata(ha->pdev);
-
-       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-           "%s: host %ld : vector %d cpu %d \n", __func__,
-           base_vha->host_no, e->vector, e->cpuid);
-
-       if (e->have_irq) {
-               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
-                   (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
-                       ha->tgt.rspq_vector_cpuid = e->cpuid;
-                       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-                           "%s: host%ld: rspq vector %d cpu %d  runtime change\n",
-                           __func__, base_vha->host_no, e->vector, e->cpuid);
-               }
-       }
-}
-
-static void qla_irq_affinity_release(struct kref *ref)
-{
-       struct irq_affinity_notify *notify =
-               container_of(ref, struct irq_affinity_notify, kref);
-       struct qla_msix_entry *e =
-               container_of(notify, struct qla_msix_entry, irq_notify);
-       struct rsp_que *rsp = e->handle;
-       struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
-
-       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-               "%s: host%ld: vector %d cpu %d\n", __func__,
-           base_vha->host_no, e->vector, e->cpuid);
-}
index 2819ceb96041e5b97b234f115c9b35d4b4251ffe..67f64db390b0cd43e2ff6166d30903712ef80938 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/delay.h>
 #include <linux/gfp.h>
 
-struct rom_cmd {
+static struct rom_cmd {
        uint16_t cmd;
 } rom_cmds[] = {
        { MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                return QLA_FUNCTION_TIMEOUT;
        }
 
-        /* if PCI error, then avoid mbx processing.*/
-        if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
+       /* if PCI error, then avoid mbx processing.*/
+       if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
                ql_log(ql_log_warn, vha, 0x1191,
                    "PCI error, exiting.\n");
                return QLA_FUNCTION_TIMEOUT;
-        }
+       }
 
        reg = ha->iobase;
        io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                }
        } else {
 
-               uint16_t mb0;
-               uint32_t ictrl;
+               uint16_t mb[8];
+               uint32_t ictrl, host_status, hccr;
                uint16_t        w;
 
                if (IS_FWI2_CAPABLE(ha)) {
-                       mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
+                       mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
+                       mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
+                       mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
+                       mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
+                       mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
                        ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+                       host_status = RD_REG_DWORD(&reg->isp24.host_status);
+                       hccr = RD_REG_DWORD(&reg->isp24.hccr);
+
+                       ql_log(ql_log_warn, vha, 0x1119,
+                           "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                           "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
+                           command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
+                           mb[7], host_status, hccr);
+
                } else {
-                       mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
+                       mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
                        ictrl = RD_REG_WORD(&reg->isp.ictrl);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+                           "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                           "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
                }
-               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
-                   "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
-                   "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
                ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
 
                /* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
        mbx_cmd_t       mc;
        mbx_cmd_t       *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
-       int configured_count;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
            "Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
                /*EMPTY*/
                ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
        } else {
-               configured_count = mcp->mb[11];
                ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
                    "Done %s.\n", __func__);
        }
index 54380b434b304eddde918a94ee833e18be6a35f6..0a1723cc08cfc4cbc626f988e58601eff70db5ff 100644 (file)
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
        (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
        QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
 
+const int MD_MIU_TEST_AGT_RDDATA[] = {
+       0x410000A8, 0x410000AC,
+       0x410000B8, 0x410000BC
+};
+
 static void qla82xx_crb_addr_transform_setup(void)
 {
        qla82xx_crb_addr_transform(XDMA);
index 6201dce3553bf951b5b1f770842e289ced553e95..77624eac95a4741a4e475a29a012f1217a75a72f 100644 (file)
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
 #define MD_MIU_TEST_AGT_ADDR_LO                0x41000094
 #define MD_MIU_TEST_AGT_ADDR_HI                0x41000098
 
-static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
-       0x410000B8, 0x410000BC };
+extern const int MD_MIU_TEST_AGT_RDDATA[4];
 
 #define CRB_NIU_XG_PAUSE_CTL_P0        0x1
 #define CRB_NIU_XG_PAUSE_CTL_P1        0x8
index 007192d7bad85fae9711198b15f31924da9f0e3e..dc1ec9b610273956c6d7848e23882961326ecadf 100644 (file)
 
 #define TIMEOUT_100_MS 100
 
+static const uint32_t qla8044_reg_tbl[] = {
+       QLA8044_PEG_HALT_STATUS1,
+       QLA8044_PEG_HALT_STATUS2,
+       QLA8044_PEG_ALIVE_COUNTER,
+       QLA8044_CRB_DRV_ACTIVE,
+       QLA8044_CRB_DEV_STATE,
+       QLA8044_CRB_DRV_STATE,
+       QLA8044_CRB_DRV_SCRATCH,
+       QLA8044_CRB_DEV_PART_INFO1,
+       QLA8044_CRB_IDC_VER_MAJOR,
+       QLA8044_FW_VER_MAJOR,
+       QLA8044_FW_VER_MINOR,
+       QLA8044_FW_VER_SUB,
+       QLA8044_CMDPEG_STATE,
+       QLA8044_ASIC_TEMP,
+};
+
 /* 8044 Flash Read/Write functions */
 uint32_t
 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
index 02fe3c4cdf5577f275d3a898e7d07d6e3e89592c..83c1b7e17c80f1affa2a5857118f44a6549d832a 100644 (file)
@@ -535,23 +535,6 @@ enum qla_regs {
 #define CRB_CMDPEG_CHECK_RETRY_COUNT    60
 #define CRB_CMDPEG_CHECK_DELAY          500
 
-static const uint32_t qla8044_reg_tbl[] = {
-       QLA8044_PEG_HALT_STATUS1,
-       QLA8044_PEG_HALT_STATUS2,
-       QLA8044_PEG_ALIVE_COUNTER,
-       QLA8044_CRB_DRV_ACTIVE,
-       QLA8044_CRB_DEV_STATE,
-       QLA8044_CRB_DRV_STATE,
-       QLA8044_CRB_DRV_SCRATCH,
-       QLA8044_CRB_DEV_PART_INFO1,
-       QLA8044_CRB_IDC_VER_MAJOR,
-       QLA8044_FW_VER_MAJOR,
-       QLA8044_FW_VER_MINOR,
-       QLA8044_FW_VER_SUB,
-       QLA8044_CMDPEG_STATE,
-       QLA8044_ASIC_TEMP,
-};
-
 /* MiniDump Structures */
 
 /* Driver_code is for driver to write some info about the entry
index 8521cfe302e9e3e72c7aaf1a4753ca75f953b972..0a000ecf0881411d4c01c1a95245d1eb9d9da771 100644 (file)
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
                        continue;
 
                rsp = ha->rsp_q_map[cnt];
-               clear_bit(cnt, ha->req_qid_map);
+               clear_bit(cnt, ha->rsp_qid_map);
                ha->rsp_q_map[cnt] =  NULL;
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_rsp_que(ha, rsp);
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                                sizeof(struct ct6_dsd), 0,
                                SLAB_HWCACHE_ALIGN, NULL);
                        if (!ctx_cachep)
-                               goto fail_free_gid_list;
+                               goto fail_free_srb_mempool;
                }
                ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
                        ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
            GFP_KERNEL);
        if (!ha->loop_id_map)
-               goto fail_async_pd;
+               goto fail_loop_id_map;
        else {
                qla2x00_set_reserved_loop_ids(ha);
                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 
        return 0;
 
+fail_loop_id_map:
+       dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
 fail_async_pd:
        dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
 fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
        dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
        ha->ms_iocb = NULL;
        ha->ms_iocb_dma = 0;
+
+       if (ha->sns_cmd)
+               dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+                   ha->sns_cmd, ha->sns_cmd_dma);
 fail_dma_pool:
        if (IS_QLA82XX(ha) || ql2xenabledif) {
                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
        kfree(ha->nvram);
        ha->nvram = NULL;
 fail_free_ctx_mempool:
-       mempool_destroy(ha->ctx_mempool);
+       if (ha->ctx_mempool)
+               mempool_destroy(ha->ctx_mempool);
        ha->ctx_mempool = NULL;
 fail_free_srb_mempool:
-       mempool_destroy(ha->srb_mempool);
+       if (ha->srb_mempool)
+               mempool_destroy(ha->srb_mempool);
        ha->srb_mempool = NULL;
 fail_free_gid_list:
        dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
index bff9689f5ca94f56be0a9739af6bf769885f73bf..e4fda84b959eca2d52aa009c353cbe7f5ffc94bd 100644 (file)
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_sess *sess = NULL;
-       uint32_t unpacked_lun, lun = 0;
        uint16_t loop_id;
        int res = 0;
        struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
-       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
        unsigned long flags;
 
        loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
            "loop_id %d)\n", vha->host_no, sess, sess->port_name,
            mcmd, loop_id);
 
-       lun = a->u.isp24.fcp_cmnd.lun;
-       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
-       return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
-           iocb, QLA24XX_MGMT_SEND_NACK);
+       return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 
        pkt->entry_type = NOTIFY_ACK_TYPE;
        pkt->entry_count = 1;
-       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+       pkt->handle = QLA_TGT_SKIP_HANDLE;
 
        nack = (struct nack_to_isp *)pkt;
        nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 #if 0  /* Todo  */
                if (rc == -ENOMEM)
                        qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#else
+               if (rc) {
+               }
 #endif
                goto done;
        }
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
        if (!vha->flags.online)
                return;
 
-       while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+       while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+           fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
                pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
                cnt = pkt->u.raw.entry_count;
 
-               qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
-                   ha_locked);
+               if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+                       /*
+                        * This packet is corrupted. The header + payload
+                        * can not be trusted. There is no point in passing
+                        * it further up.
+                        */
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+                           pkt->u.isp24.fcp_hdr.s_id,
+                           be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+                           le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+                       adjust_corrupted_atio(pkt);
+                       qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+               } else {
+                       qlt_24xx_atio_pkt_all_vps(vha,
+                           (struct atio_from_isp *)pkt, ha_locked);
+               }
 
                for (i = 0; i < cnt; i++) {
                        ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
 
                /* Disable Full Login after LIP */
                nv->host_p &= cpu_to_le32(~BIT_10);
+
+               /*
+                * clear BIT 15 explicitly as we have seen at least
+                * a couple of instances where this was set and this
+                * was causing the firmware to not be initialized.
+                */
+               nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
                /* Enable target PRLI control */
                nv->firmware_options_2 |= cpu_to_le32(BIT_14);
        } else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
                return;
        }
 
-       /* out-of-order frames reassembly */
-       nv->firmware_options_3 |= BIT_6|BIT_9;
-
        if (ha->tgt.enable_class_2) {
                if (vha->flags.init_done)
                        fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
                /* Disable ini mode, if requested */
                if (!qla_ini_mode_enabled(vha))
                        nv->firmware_options_1 |= cpu_to_le32(BIT_5);
-
                /* Disable Full Login after LIP */
                nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
                /* Enable initial LIP */
                nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+               /*
+                * clear BIT 15 explicitly as we have seen at
+                * least a couple of instances where this was set
+                * and this was causing the firmware to not be
+                * initialized.
+                */
+               nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
                if (ql2xtgt_tape_enable)
                        /* Enable FC tape support */
                        nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
                return;
        }
 
-       /* out-of-order frames reassembly */
-       nv->firmware_options_3 |= BIT_6|BIT_9;
-
        if (ha->tgt.enable_class_2) {
                if (vha->flags.init_done)
                        fc_host_supported_classes(vha->host) =
index f26c5f60eedd27f6dcd36a835266f5924f32a6fd..0824a8164a2494361ef12892851f020ab40de6f4 100644 (file)
@@ -427,13 +427,33 @@ struct atio_from_isp {
                struct {
                        uint8_t  entry_type;    /* Entry type. */
                        uint8_t  entry_count;   /* Entry count. */
-                       uint8_t  data[58];
+                       __le16   attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN  0x38
+                       uint8_t  data[56];
                        uint32_t signature;
 #define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
                } raw;
        } u;
 } __packed;
 
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+       if (atio->entry_type == ATIO_TYPE7 &&
+           (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+           FCP_CMD_LENGTH_MIN))
+               return 1;
+       else
+               return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+       atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+       atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
 
 /*
index 36935c9ed669513195a01ba26fd1e302773b593f..8a58ef3adab4425ba69a992dd2f51bd9357f44c9 100644 (file)
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                                count++;
                        }
                }
+       } else if (QLA_TGT_MODE_ENABLED() &&
+           ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+               struct qla_hw_data *ha = vha->hw;
+               struct atio *atr = ha->tgt.atio_ring;
+
+               if (atr || !buf) {
+                       length = ha->tgt.atio_q_length;
+                       qla27xx_insert16(0, buf, len);
+                       qla27xx_insert16(length, buf, len);
+                       qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
+                       count++;
+               }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd026,
                    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
                                count++;
                        }
                }
+       } else if (QLA_TGT_MODE_ENABLED() &&
+           ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+               struct qla_hw_data *ha = vha->hw;
+               struct atio *atr = ha->tgt.atio_ring_ptr;
+
+               if (atr || !buf) {
+                       qla27xx_insert16(0, buf, len);
+                       qla27xx_insert16(1, buf, len);
+                       qla27xx_insert32(ha->tgt.atio_q_in ?
+                           readl(ha->tgt.atio_q_in) : 0, buf, len);
+                       count++;
+               }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd02f,
                    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
index 6643f6fc7795bcc09fc0c3ffd79a85b907ce6d94..d925910be761dfcdc61c5c3e97bc98c4372a6cb7 100644 (file)
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
 {
        return sprintf(page,
            "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
-           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
            utsname()->machine);
 }
 
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
        int ret;
 
        pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
-           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
            utsname()->machine);
 
        ret = target_register_template(&tcm_qla2xxx_ops);
index 37e026a4823d6fdeb1048dd7f419f7ca253417c0..cf8430be183b6b529d33382386c54c2acbcd828a 100644 (file)
@@ -1,7 +1,6 @@
 #include <target/target_core_base.h>
 #include <linux/btree.h>
 
-#define TCM_QLA2XXX_VERSION    "v0.1"
 /* length of ASCII WWPNs including pad */
 #define TCM_QLA2XXX_NAMELEN    32
 /*
index 1fbb1ecf49f2ec8639e30457e2ec3aee68015bb4..1f5d92a25a49dd0f928c194c5d64372fb2b071f6 100644 (file)
@@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        struct bio *bio = rq->bio;
        sector_t sector = blk_rq_pos(rq);
        unsigned int nr_sectors = blk_rq_sectors(rq);
+       unsigned int nr_bytes = blk_rq_bytes(rq);
        int ret;
 
        if (sdkp->device->no_write_same)
@@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 
        cmd->transfersize = sdp->sector_size;
        cmd->allowed = SD_MAX_RETRIES;
-       return scsi_init_io(cmd);
+
+       /*
+        * For WRITE SAME the data transferred via the DATA OUT buffer is
+        * different from the amount of data actually written to the target.
+        *
+        * We set up __data_len to the amount of data transferred via the
+        * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
+        * to transfer a single sector of data first, but then reset it to
+        * the amount of data to be written right after so that the I/O path
+        * knows how much to actually write.
+        */
+       rq->__data_len = sdp->sector_size;
+       ret = scsi_init_io(cmd);
+       rq->__data_len = nr_bytes;
+       return ret;
 }
 
 static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
@@ -2585,7 +2600,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
                if (sdp->broken_fua) {
                        sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
                        sdkp->DPOFUA = 0;
-               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+                          !sdkp->device->use_16_for_rw) {
                        sd_first_printk(KERN_NOTICE, sdkp,
                                  "Uses READ/WRITE(6), disabling FUA\n");
                        sdkp->DPOFUA = 0;
@@ -2768,13 +2784,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
                queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
-       sdkp->zoned = (buffer[8] >> 4) & 3;
-       if (sdkp->zoned == 1)
-               q->limits.zoned = BLK_ZONED_HA;
-       else if (sdkp->device->type == TYPE_ZBC)
+       if (sdkp->device->type == TYPE_ZBC) {
+               /* Host-managed */
                q->limits.zoned = BLK_ZONED_HM;
-       else
-               q->limits.zoned = BLK_ZONED_NONE;
+       } else {
+               sdkp->zoned = (buffer[8] >> 4) & 3;
+               if (sdkp->zoned == 1)
+                       /* Host-aware */
+                       q->limits.zoned = BLK_ZONED_HA;
+               else
+                       /*
+                        * Treat drive-managed devices as
+                        * regular block devices.
+                        */
+                       q->limits.zoned = BLK_ZONED_NONE;
+       }
        if (blk_queue_is_zoned(q) && sdkp->first_scan)
                sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
                      q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
index 8c9a35c91705e42fcbc07e3721d0522f96d496dc..50adabbb5808902aea6abfd64a39c183678a6a6f 100644 (file)
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 
        ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
-       if (scsi_is_sas_rphy(&sdev->sdev_gendev))
+       if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
                efd.addr = sas_get_address(sdev);
 
        if (efd.addr) {
index 8823cc81ae45345bd0d632436eb4ce387d456e0b..5bb376009d98b78bd0dbf6da3e8f0853f9e6528e 100644 (file)
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
 
        if (IS_ERR(task)) {
                dev_err(dev, "can't create rproc_boot thread\n");
+               ret = PTR_ERR(task);
                goto err_put_rproc;
        }
 
index ec4aa252d6e8c1c761a47246851ad2645382516e..2922a9908302d84781f63d1b091ca6e4ddb2eba8 100644 (file)
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
 config SPI_FSL_DSPI
        tristate "Freescale DSPI controller"
        select REGMAP_MMIO
+       depends on HAS_DMA
        depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
        help
          This enables support for the Freescale DSPI controller in master
index e89da0af45d2518ef26670f7bbd50b875692872b..0314c6b9e04415b0cb792d8e9a4048a6311fd97d 100644 (file)
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct a3700_spi *spi;
        u32 num_cs = 0;
-       int ret = 0;
+       int irq, ret = 0;
 
        master = spi_alloc_master(dev, sizeof(*spi));
        if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        master->unprepare_message = a3700_spi_unprepare_message;
        master->set_cs = a3700_spi_set_cs;
        master->flags = SPI_MASTER_HALF_DUPLEX;
-       master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+       master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
                              SPI_RX_QUAD | SPI_TX_QUAD);
 
        platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
                goto error;
        }
 
-       spi->irq = platform_get_irq(pdev, 0);
-       if (spi->irq < 0) {
-               dev_err(dev, "could not get irq: %d\n", spi->irq);
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "could not get irq: %d\n", irq);
                ret = -ENXIO;
                goto error;
        }
+       spi->irq = irq;
 
        init_completion(&spi->done);
 
index 319225d7e761b0066a062b017c6e6f2104447859..6ab4c770022882eacc338a31b345c0d80bc1b541 100644 (file)
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
                        SPI_ENGINE_VERSION_MAJOR(version),
                        SPI_ENGINE_VERSION_MINOR(version),
                        SPI_ENGINE_VERSION_PATCH(version));
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_put_master;
        }
 
        spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
index d36c11b73a35ca656ab04e9c5ef0492f96950b32..02fb96797ac8b9ec52f41c8a13f93b290db0fc1c 100644 (file)
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
                        buf = t->rx_buf;
                t->rx_dma = dma_map_single(&spi->dev, buf,
                                t->len, DMA_FROM_DEVICE);
-               if (!t->rx_dma) {
+               if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
                        ret = -EFAULT;
                        goto err_rx_map;
                }
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
                        buf = (void *)t->tx_buf;
                t->tx_dma = dma_map_single(&spi->dev, buf,
                                t->len, DMA_TO_DEVICE);
-               if (!t->tx_dma) {
+               if (dma_mapping_error(&spi->dev, t->tx_dma)) {
                        ret = -EFAULT;
                        goto err_tx_map;
                }
index e31971f91475b1b3d9f1b2011e0b6e4e2ae4697b..837cb8d0bac6c9a1bd9f866192cd96950b13a68b 100644 (file)
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
 static void mid_spi_dma_stop(struct dw_spi *dws)
 {
        if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
-               dmaengine_terminate_all(dws->txchan);
+               dmaengine_terminate_sync(dws->txchan);
                clear_bit(TX_BUSY, &dws->dma_chan_busy);
        }
        if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
-               dmaengine_terminate_all(dws->rxchan);
+               dmaengine_terminate_sync(dws->rxchan);
                clear_bit(RX_BUSY, &dws->dma_chan_busy);
        }
 }
index b715a26a91484fb695088459d9249b367b334fe1..054012f875671b995141f8c549021389f21f454f 100644 (file)
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
 
 static int dw_spi_debugfs_init(struct dw_spi *dws)
 {
-       dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+       char name[128];
+
+       snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+       dws->debugfs = debugfs_create_dir(name, NULL);
        if (!dws->debugfs)
                return -ENOMEM;
 
index dd7b5b47291d551890da8e8dfc324ca74895d66b..d6239fa718be9e251f577b9d9dd792a0e5c5ead5 100644 (file)
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
                pxa2xx_spi_write(drv_data, SSCR1, tmp);
                tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
                pxa2xx_spi_write(drv_data, SSCR0, tmp);
+               break;
        default:
                tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
                      SSCR1_TxTresh(TX_THRESH_DFLT);
index 0012ad02e5696d35b547a3a698682f51a2d02819..1f00eeb0b5a3fb93ae838978dbf7815d94897378 100644 (file)
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
 };
 
 static const struct of_device_id sh_msiof_match[] = {
-       { .compatible = "renesas,sh-msiof",        .data = &sh_data },
        { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
        { .compatible = "renesas,msiof-r8a7790",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7791",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7792",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7793",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7794",   .data = &r8a779x_data },
+       { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7796",   .data = &r8a779x_data },
+       { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
+       { .compatible = "renesas,sh-msiof",        .data = &sh_data }, /* Deprecated */
        {},
 };
 MODULE_DEVICE_TABLE(of, sh_msiof_match);
index b811b0fb61b1381fba45440f963030aba9ee509a..4c779651245351ea488cf20dec87199d755eb135 100644 (file)
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
        void (*control)(void __iomem *reg, bool on);
 
        /* Per-sensor methods */
-       int (*get_temp)(struct chip_tsadc_table table,
+       int (*get_temp)(const struct chip_tsadc_table *table,
                        int chn, void __iomem *reg, int *temp);
-       void (*set_alarm_temp)(struct chip_tsadc_table table,
-                              int chn, void __iomem *reg, int temp);
-       void (*set_tshut_temp)(struct chip_tsadc_table table,
-                              int chn, void __iomem *reg, int temp);
+       int (*set_alarm_temp)(const struct chip_tsadc_table *table,
+                             int chn, void __iomem *reg, int temp);
+       int (*set_tshut_temp)(const struct chip_tsadc_table *table,
+                             int chn, void __iomem *reg, int temp);
        void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
 
        /* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
        {3452, 115000},
        {3437, 120000},
        {3421, 125000},
+       {0, 125000},
 };
 
 static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
        {TSADCV3_DATA_MASK, 125000},
 };
 
-static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
+static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
                                   int temp)
 {
        int high, low, mid;
-       u32 error = 0;
+       unsigned long num;
+       unsigned int denom;
+       u32 error = table->data_mask;
 
        low = 0;
-       high = table.length - 1;
+       high = (table->length - 1) - 1; /* ignore the last check for table */
        mid = (high + low) / 2;
 
        /* Return mask code data when the temp is over table range */
-       if (temp < table.id[low].temp || temp > table.id[high].temp) {
-               error = table.data_mask;
+       if (temp < table->id[low].temp || temp > table->id[high].temp)
                goto exit;
-       }
 
        while (low <= high) {
-               if (temp == table.id[mid].temp)
-                       return table.id[mid].code;
-               else if (temp < table.id[mid].temp)
+               if (temp == table->id[mid].temp)
+                       return table->id[mid].code;
+               else if (temp < table->id[mid].temp)
                        high = mid - 1;
                else
                        low = mid + 1;
                mid = (low + high) / 2;
        }
 
+       /*
+        * The conversion code granularity provided by the table. Let's
+        * assume that the relationship between temperature and
+        * analog value between 2 table entries is linear and interpolate
+        * to produce less granular result.
+        */
+       num = abs(table->id[mid + 1].code - table->id[mid].code);
+       num *= temp - table->id[mid].temp;
+       denom = table->id[mid + 1].temp - table->id[mid].temp;
+
+       switch (table->mode) {
+       case ADC_DECREMENT:
+               return table->id[mid].code - (num / denom);
+       case ADC_INCREMENT:
+               return table->id[mid].code + (num / denom);
+       default:
+               pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+               return error;
+       }
+
 exit:
-       pr_err("Invalid the conversion, error=%d\n", error);
+       pr_err("%s: invalid temperature, temp=%d error=%d\n",
+              __func__, temp, error);
        return error;
 }
 
-static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
-                                  int *temp)
+static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
+                                  u32 code, int *temp)
 {
        unsigned int low = 1;
-       unsigned int high = table.length - 1;
+       unsigned int high = table->length - 1;
        unsigned int mid = (low + high) / 2;
        unsigned int num;
        unsigned long denom;
 
-       WARN_ON(table.length < 2);
+       WARN_ON(table->length < 2);
 
-       switch (table.mode) {
+       switch (table->mode) {
        case ADC_DECREMENT:
-               code &= table.data_mask;
-               if (code < table.id[high].code)
+               code &= table->data_mask;
+               if (code <= table->id[high].code)
                        return -EAGAIN;         /* Incorrect reading */
 
                while (low <= high) {
-                       if (code >= table.id[mid].code &&
-                           code < table.id[mid - 1].code)
+                       if (code >= table->id[mid].code &&
+                           code < table->id[mid - 1].code)
                                break;
-                       else if (code < table.id[mid].code)
+                       else if (code < table->id[mid].code)
                                low = mid + 1;
                        else
                                high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
                }
                break;
        case ADC_INCREMENT:
-               code &= table.data_mask;
-               if (code < table.id[low].code)
+               code &= table->data_mask;
+               if (code < table->id[low].code)
                        return -EAGAIN;         /* Incorrect reading */
 
                while (low <= high) {
-                       if (code <= table.id[mid].code &&
-                           code > table.id[mid - 1].code)
+                       if (code <= table->id[mid].code &&
+                           code > table->id[mid - 1].code)
                                break;
-                       else if (code > table.id[mid].code)
+                       else if (code > table->id[mid].code)
                                low = mid + 1;
                        else
                                high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
                }
                break;
        default:
-               pr_err("Invalid the conversion table\n");
+               pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+               return -EINVAL;
        }
 
        /*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
         * temperature between 2 table entries is linear and interpolate
         * to produce less granular result.
         */
-       num = table.id[mid].temp - table.id[mid - 1].temp;
-       num *= abs(table.id[mid - 1].code - code);
-       denom = abs(table.id[mid - 1].code - table.id[mid].code);
-       *temp = table.id[mid - 1].temp + (num / denom);
+       num = table->id[mid].temp - table->id[mid - 1].temp;
+       num *= abs(table->id[mid - 1].code - code);
+       denom = abs(table->id[mid - 1].code - table->id[mid].code);
+       *temp = table->id[mid - 1].temp + (num / denom);
 
        return 0;
 }
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
        writel_relaxed(val, regs + TSADCV2_AUTO_CON);
 }
 
-static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
+static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
                               int chn, void __iomem *regs, int *temp)
 {
        u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
        return rk_tsadcv2_code_to_temp(table, val, temp);
 }
 
-static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
-                                 int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
+                                int chn, void __iomem *regs, int temp)
 {
-       u32 alarm_value, int_en;
+       u32 alarm_value;
+       u32 int_en, int_clr;
+
+       /*
+        * In some cases, some sensors didn't need the trip points, the
+        * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
+        * in the end, ignore this case and disable the high temperature
+        * interrupt.
+        */
+       if (temp == INT_MAX) {
+               int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
+               int_clr &= ~TSADCV2_INT_SRC_EN(chn);
+               writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
+               return 0;
+       }
 
        /* Make sure the value is valid */
        alarm_value = rk_tsadcv2_temp_to_code(table, temp);
-       if (alarm_value == table.data_mask)
-               return;
+       if (alarm_value == table->data_mask)
+               return -ERANGE;
 
-       writel_relaxed(alarm_value & table.data_mask,
+       writel_relaxed(alarm_value & table->data_mask,
                       regs + TSADCV2_COMP_INT(chn));
 
        int_en = readl_relaxed(regs + TSADCV2_INT_EN);
        int_en |= TSADCV2_INT_SRC_EN(chn);
        writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+
+       return 0;
 }
 
-static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
-                                 int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
+                                int chn, void __iomem *regs, int temp)
 {
        u32 tshut_value, val;
 
        /* Make sure the value is valid */
        tshut_value = rk_tsadcv2_temp_to_code(table, temp);
-       if (tshut_value == table.data_mask)
-               return;
+       if (tshut_value == table->data_mask)
+               return -ERANGE;
 
        writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
 
        /* TSHUT will be valid */
        val = readl_relaxed(regs + TSADCV2_AUTO_CON);
        writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+
+       return 0;
 }
 
 static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
        dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
                __func__, sensor->id, low, high);
 
-       tsadc->set_alarm_temp(tsadc->table,
-                             sensor->id, thermal->regs, high);
-
-       return 0;
+       return tsadc->set_alarm_temp(&tsadc->table,
+                                    sensor->id, thermal->regs, high);
 }
 
 static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
        const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
        int retval;
 
-       retval = tsadc->get_temp(tsadc->table,
+       retval = tsadc->get_temp(&tsadc->table,
                                 sensor->id, thermal->regs, out_temp);
        dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
                sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
        int error;
 
        tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
-       tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
+
+       error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
                              thermal->tshut_temp);
+       if (error)
+               dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+                       __func__, thermal->tshut_temp, error);
 
        sensor->thermal = thermal;
        sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
 
                thermal->chip->set_tshut_mode(id, thermal->regs,
                                              thermal->tshut_mode);
-               thermal->chip->set_tshut_temp(thermal->chip->table,
+
+               error = thermal->chip->set_tshut_temp(&thermal->chip->table,
                                              id, thermal->regs,
                                              thermal->tshut_temp);
+               if (error)
+                       dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+                               __func__, thermal->tshut_temp, error);
        }
 
        thermal->chip->control(thermal->regs, true);
index 641faab6e24b50fef4d70d3334edfc49e0ab0ce3..655591316a881274a6d152801ffd19138a0dc34b 100644 (file)
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
        if (!strncmp(dev_name(dev), "thermal_zone",
                     sizeof("thermal_zone") - 1)) {
                tz = to_thermal_zone(dev);
+               kfree(tz->trip_type_attrs);
+               kfree(tz->trip_temp_attrs);
+               kfree(tz->trip_hyst_attrs);
+               kfree(tz->trips_attribute_group.attrs);
+               kfree(tz->device.groups);
                kfree(tz);
        } else if (!strncmp(dev_name(dev), "cooling_device",
                            sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
        thermal_zone_device_set_polling(tz, 0);
 
-       kfree(tz->trip_type_attrs);
-       kfree(tz->trip_temp_attrs);
-       kfree(tz->trip_hyst_attrs);
-       kfree(tz->trips_attribute_group.attrs);
        thermal_set_governor(tz, NULL);
 
        thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
        idr_destroy(&tz->idr);
        mutex_destroy(&tz->lock);
        device_unregister(&tz->device);
-       kfree(tz->device.groups);
 }
 EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
 
index 9548d3e03453db062042600668840d5b4676e649..302b8f5f7d27d26b264bbaeb0eb9a2756362c482 100644 (file)
@@ -513,8 +513,8 @@ struct dwc2_core_params {
        /* Gadget parameters */
        bool g_dma;
        bool g_dma_desc;
-       u16 g_rx_fifo_size;
-       u16 g_np_tx_fifo_size;
+       u32 g_rx_fifo_size;
+       u32 g_np_tx_fifo_size;
        u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
 };
 
index c55db4aa54d677c77fb2a0c9bcff7d38ac7d1b9e..77c5fcf3a5bf7f101c51225f3c21a90fc7e59dcf 100644 (file)
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
        /* keep other bits untouched (so e.g. forced modes are not lost) */
        usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
        usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
-               GUSBCFG_HNPCAP);
+               GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
 
        if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
            (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
                __func__, epctrl, epctrl_reg);
 
        /* Allocate DMA descriptor chain for non-ctrl endpoints */
-       if (using_desc_dma(hsotg)) {
-               hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+       if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+               hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
                        MAX_DMA_DESC_NUM_GENERIC *
                        sizeof(struct dwc2_dma_desc),
                        &hs_ep->desc_list_dma, GFP_ATOMIC);
@@ -3872,7 +3872,7 @@ error1:
 
 error2:
        if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
-               dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+               dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
                        sizeof(struct dwc2_dma_desc),
                        hs_ep->desc_list, hs_ep->desc_list_dma);
                hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
                return -EINVAL;
        }
 
-       /* Remove DMA memory allocated for non-control Endpoints */
-       if (using_desc_dma(hsotg)) {
-               dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
-                                 sizeof(struct dwc2_dma_desc),
-                                 hs_ep->desc_list, hs_ep->desc_list_dma);
-               hs_ep->desc_list = NULL;
-       }
-
        epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 
        spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
        /* keep other bits untouched (so e.g. forced modes are not lost) */
        usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
        usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
-               GUSBCFG_HNPCAP);
+               GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
 
        /* set the PLL on, remove the HNP/SRP and set the PHY */
        trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
index 911c3b36ac067028acdaa5d53bd0c9e8702f8942..46d0ad5105e40e5818e590a957072a48066c5ae0 100644 (file)
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
        if (!HCD_HW_ACCESSIBLE(hcd))
                goto unlock;
 
+       if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+               goto unlock;
+
        if (!hsotg->params.hibernation)
                goto skip_power_saving;
 
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
 {
 #ifdef VERBOSE_DEBUG
        struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
-       char *pipetype;
-       char *speed;
+       char *pipetype = NULL;
+       char *speed = NULL;
 
        dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
        dev_vdbg(hsotg->dev, "  Device address: %d\n",
index 11fe68a4627bd315c8e82dbd3398b2dfde9bde8c..bcd1e19b40768679a0cee673be38587c8fdd11b0 100644 (file)
@@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
 }
 
 /**
- * dwc2_set_param_u16() - Set a u16 parameter
+ * dwc2_set_param_u32() - Set a u32 parameter
  *
  * See dwc2_set_param().
  */
-static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
                               bool lookup, char *property, u16 legacy,
                               u16 def, u16 min, u16 max)
 {
        dwc2_set_param(hsotg, param, lookup, property,
-                      legacy, def, min, max, 2);
+                      legacy, def, min, max, 4);
 }
 
 /**
@@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
                 * auto-detect if the hardware does not support the
                 * default.
                 */
-               dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+               dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
                                   true, "g-rx-fifo-size", 2048,
                                   hw->rx_fifo_size,
                                   16, hw->rx_fifo_size);
 
-               dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+               dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
                                   true, "g-np-tx-fifo-size", 1024,
                                   hw->dev_nperio_tx_fifo_size,
                                   16, hw->dev_nperio_tx_fifo_size);
index e27899bb57064b94811fae851587b06b743128ff..e956306d9b0f834e52632f09fe2af5f2ae913bc0 100644 (file)
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
                exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
                if (IS_ERR(exynos->axius_clk)) {
                        dev_err(dev, "no AXI UpScaler clk specified\n");
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto axius_clk_err;
                }
                clk_prepare_enable(exynos->axius_clk);
        } else {
@@ -196,6 +197,7 @@ err3:
        regulator_disable(exynos->vdd33);
 err2:
        clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
        clk_disable_unprepare(exynos->susp_clk);
        clk_disable_unprepare(exynos->clk);
        return ret;
index 002822d98fda207505581ca75cb47373db81fb78..49d685ad0da90d1a1282dd9d25f31ad64db22087 100644 (file)
@@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
        cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
        if (!cdev->os_desc_req->buf) {
                ret = -ENOMEM;
-               kfree(cdev->os_desc_req);
+               usb_ep_free_request(ep0, cdev->os_desc_req);
                goto end;
        }
        cdev->os_desc_req->context = cdev;
index 5e746adc8a2d5416b7e1bcbeb8c41559716599b4..5490fc51638ede3c565eff9036ff3beaf884d3a9 100644 (file)
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
        unsigned long flags;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while (count--) {
                /* pending requests get nuked */
                if (likely(ep->ep))
                        usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
                        __ffs_epfile_read_buffer_free(epfile);
                        ++epfile;
                }
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 }
 
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
        int ret = 0;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while(count--) {
                struct usb_endpoint_descriptor *ds;
                int desc_idx;
 
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
 
                ++ep;
                ++epfile;
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 
        return ret;
@@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
 
        /* cleanup after autoconfig */
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while (count--) {
                if (ep->ep && ep->req)
                        usb_ep_free_request(ep->ep, ep->req);
                ep->req = NULL;
                ++ep;
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
        kfree(func->eps);
        func->eps = NULL;
index f3212db9bc37bf1889c482e86c9c679720766b1f..12c7687216e62f3b88dc47546ec12d5d52efdff5 100644 (file)
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
                        dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
                        goto err;
                }
-               ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+               sprintf(ep->name, "ep%d", ep->index);
+               ep->ep.name = ep->name;
 
                ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
                ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
index 3e1c9d589dfa34ade18fffa7e300a4c9dfcc1563..b03b2ebfc53a3cdc2e5e7c1731beb9221ea7ea4c 100644 (file)
@@ -280,6 +280,7 @@ struct usba_ep {
        void __iomem                            *ep_regs;
        void __iomem                            *dma_regs;
        void __iomem                            *fifo;
+       char                                    name[8];
        struct usb_ep                           ep;
        struct usba_udc                         *udc;
 
index ddfab301e36658adccde76b9a47141b39a3508c7..e5834dd9bcdedb246a51be1b35fa70c9ed920b61 100644 (file)
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
                return -ENODEV;
 
        /* Try to set 64-bit DMA first */
-       if (WARN_ON(!pdev->dev.dma_mask))
+       if (!pdev->dev.dma_mask)
                /* Platform did not initialize dma_mask */
                ret = dma_coerce_mask_and_coherent(&pdev->dev,
                                                   DMA_BIT_MASK(64));
index c8823578a1b2afd3ae7a36c2f526fd071116876b..128d10282d1632693dc40819ff8b39485ba1e1de 100644 (file)
@@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data,
        /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
                        iommu_group_id(iommu_group), iommu_group); */
        table_group = iommu_group_get_iommudata(iommu_group);
+       if (!table_group) {
+               ret = -ENODEV;
+               goto unlock_exit;
+       }
 
        if (tce_groups_attached(container) && (!table_group->ops ||
                        !table_group->ops->take_ownership ||
index 253310cdaacabc25d67aa997e45033bf7137b3b9..fd6c8b66f06fd97734bfdad5917c7f65de70265d 100644 (file)
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
        struct iov_iter out_iter, in_iter, prot_iter, data_iter;
        u64 tag;
        u32 exp_data_len, data_direction;
-       unsigned out, in;
+       unsigned int out = 0, in = 0;
        int head, ret, prot_bytes;
        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
        size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
        NULL,
 };
 
-static struct target_core_fabric_ops vhost_scsi_ops = {
+static const struct target_core_fabric_ops vhost_scsi_ops = {
        .module                         = THIS_MODULE,
        .name                           = "vhost",
        .get_fabric_name                = vhost_scsi_get_fabric_name,
index bbbf588540ed71d82ed63deb355b77736a2a9628..ce5e63d2c66aac7d019c422ec294cab025e94e5e 100644 (file)
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
 
 static int vhost_vsock_start(struct vhost_vsock *vsock)
 {
+       struct vhost_virtqueue *vq;
        size_t i;
        int ret;
 
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
                goto err;
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
 
                if (!vhost_vq_access_ok(vq)) {
                        ret = -EFAULT;
-                       mutex_unlock(&vq->mutex);
                        goto err_vq;
                }
 
                if (!vq->private_data) {
                        vq->private_data = vsock;
-                       vhost_vq_init_access(vq);
+                       ret = vhost_vq_init_access(vq);
+                       if (ret)
+                               goto err_vq;
                }
 
                mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
        return 0;
 
 err_vq:
+       vq->private_data = NULL;
+       mutex_unlock(&vq->mutex);
+
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
                vq->private_data = NULL;
index f89245b8ba8e9a28483c4ff5edb03b80a1a9b2e3..68a113594808f220aa818424cd6e342897806a74 100644 (file)
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
 
 int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 
 int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
index d47a2fcef818f3cea1ce7f09160061b3c8d2f0a6..c71fde5fe835c48d1ce4611b29108f8cf7fb44f3 100644 (file)
@@ -59,6 +59,7 @@
 #define pr_fmt(fmt) "virtio-mmio: " fmt
 
 #include <linux/acpi.h>
+#include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        struct virtio_mmio_device *vm_dev;
        struct resource *mem;
        unsigned long magic;
+       int rc;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        }
        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 
-       if (vm_dev->version == 1)
+       if (vm_dev->version == 1) {
                writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
 
+               rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+               /*
+                * In the legacy case, ensure our coherently-allocated virtio
+                * ring will be at an address expressable as a 32-bit PFN.
+                */
+               if (!rc)
+                       dma_set_coherent_mask(&pdev->dev,
+                                             DMA_BIT_MASK(32 + PAGE_SHIFT));
+       } else {
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       }
+       if (rc)
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc)
+               dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
+
        platform_set_drvdata(pdev, vm_dev);
 
        return register_virtio_device(&vm_dev->vdev);
index 409aeaa49246a0edd7c6da07ca38b58c3f876109..7e38ed79c3fc0f2c095164d480f75b31630a6694 100644 (file)
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
        if (xen_domain())
                return true;
 
+       /*
+        * On ARM-based machines, the DMA ops will do the right thing,
+        * so always use them with legacy devices.
+        */
+       if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+               return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+
        return false;
 }
 
index 112ce422dc2268f25d7266e42797d6ddfa1a8fe4..2a165cc8a43cd6768529ffe48126c172e5b9f7df 100644 (file)
@@ -42,6 +42,7 @@
 static unsigned long platform_mmio;
 static unsigned long platform_mmio_alloc;
 static unsigned long platform_mmiolen;
+static uint64_t callback_via;
 
 static unsigned long alloc_xen_mmio(unsigned long len)
 {
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
        return addr;
 }
 
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+       u8 pin;
+       int irq;
+
+       irq = pdev->irq;
+       if (irq < 16)
+               return irq; /* ISA IRQ */
+
+       pin = pdev->pin;
+
+       /* We don't know the GSI. Specify the PCI INTx line instead. */
+       return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+               ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+               ((uint64_t)pdev->bus->number << 16) |
+               ((uint64_t)(pdev->devfn & 0xff) << 8) |
+               ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+       xen_hvm_evtchn_do_upcall();
+       return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+       return request_irq(pdev->irq, do_hvm_evtchn_intr,
+                       IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+                       "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+       int err;
+       if (!xen_pv_domain())
+               return 0;
+       err = xen_set_callback_via(callback_via);
+       if (err) {
+               dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+               return err;
+       }
+       return 0;
+}
+
 static int platform_pci_probe(struct pci_dev *pdev,
                              const struct pci_device_id *ent)
 {
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
        platform_mmio = mmio_addr;
        platform_mmiolen = mmio_len;
 
+       /* 
+        * Xen HVM guests always use the vector callback mechanism.
+        * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+        * HVM environment. It needs the platform-pci driver to get
+        * notifications from L0 Xen, but it cannot use the vector callback
+        * as it is not exported by L1 Xen.
+        */
+       if (xen_pv_domain()) {
+               ret = xen_allocate_irq(pdev);
+               if (ret) {
+                       dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+                       goto out;
+               }
+               callback_via = get_callback_via(pdev);
+               ret = xen_set_callback_via(callback_via);
+               if (ret) {
+                       dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+                                        "err=%d\n", ret);
+                       goto out;
+               }
+       }
+
        max_nr_gframes = gnttab_max_grant_frames();
        grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
        ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
        .name =           DRV_NAME,
        .probe =          platform_pci_probe,
        .id_table =       platform_pci_tbl,
+#ifdef CONFIG_PM
+       .resume_early =   platform_pci_resume,
+#endif
 };
 
 builtin_pci_driver(platform_driver);
index f905d6eeb0482ee481cb24d9714bc6081a852d1e..f8afc6dcc29f2769694308092a4b543e5e0bed49 100644 (file)
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
        if (map == SWIOTLB_MAP_ERROR)
                return DMA_ERROR_CODE;
 
+       dev_addr = xen_phys_to_bus(map);
        xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
                                        dev_addr, map & ~PAGE_MASK, size, dir, attrs);
-       dev_addr = xen_phys_to_bus(map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                                sg_dma_len(sgl) = 0;
                                return 0;
                        }
+                       dev_addr = xen_phys_to_bus(map);
                        xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
                                                dev_addr,
                                                map & ~PAGE_MASK,
                                                sg->length,
                                                dir,
                                                attrs);
-                       sg->dma_address = xen_phys_to_bus(map);
+                       sg->dma_address = dev_addr;
                } else {
                        /* we are not interested in the dma_addr returned by
                         * xen_dma_map_page, only in the potential cache flushes executed
index c2a377cdda2b03d6efe8768e4ef7894a06ebe853..83eab52fb3f69a75aa06a9f2a31760a384508f41 100644 (file)
@@ -38,6 +38,7 @@ config FS_DAX
        bool "Direct Access (DAX) support"
        depends on MMU
        depends on !(ARM || MIPS || SPARC)
+       select FS_IOMAP
        help
          Direct Access (DAX) can be used on memory-backed block devices.
          If the block device supports DAX and the filesystem supports DAX,
index 5db5d1340d69eccf475f0feac7f85665bd6aceb5..3c47614a4b32c75c4d5cf75d436389305c145288 100644 (file)
@@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        struct blk_plug plug;
        struct blkdev_dio *dio;
        struct bio *bio;
-       bool is_read = (iov_iter_rw(iter) == READ);
+       bool is_read = (iov_iter_rw(iter) == READ), is_sync;
        loff_t pos = iocb->ki_pos;
        blk_qc_t qc = BLK_QC_T_NONE;
        int ret;
@@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        bio_get(bio); /* extra ref for the completion handler */
 
        dio = container_of(bio, struct blkdev_dio, bio);
-       dio->is_sync = is_sync_kiocb(iocb);
+       dio->is_sync = is_sync = is_sync_kiocb(iocb);
        if (dio->is_sync)
                dio->waiter = current;
        else
@@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        }
        blk_finish_plug(&plug);
 
-       if (!dio->is_sync)
+       if (!is_sync)
                return -EIOCBQUEUED;
 
        for (;;) {
index 4e024260ad713ffc583d8f2ffaeb7ba6a025014d..1e861a063721e7c173a770cd68d0fea484cb75b3 100644 (file)
@@ -3835,10 +3835,7 @@ cache_acl:
                break;
        case S_IFDIR:
                inode->i_fop = &btrfs_dir_file_operations;
-               if (root == fs_info->tree_root)
-                       inode->i_op = &btrfs_dir_ro_inode_operations;
-               else
-                       inode->i_op = &btrfs_dir_inode_operations;
+               inode->i_op = &btrfs_dir_inode_operations;
                break;
        case S_IFLNK:
                inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
                if (found_type > min_type) {
                        del_item = 1;
                } else {
-                       if (item_end < new_size)
+                       if (item_end < new_size) {
+                               /*
+                                * With NO_HOLES mode, for the following mapping
+                                *
+                                * [0-4k][hole][8k-12k]
+                                *
+                                * if truncating isize down to 6k, it ends up
+                                * isize being 8k.
+                                */
+                               if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+                                       last_size = new_size;
                                break;
+                       }
                        if (found_key.offset >= new_size)
                                del_item = 1;
                        else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
 
        inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
        inode->i_op = &btrfs_dir_ro_inode_operations;
+       inode->i_opflags &= ~IOP_XATTR;
        inode->i_fop = &simple_dir_operations;
        inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
        inode->i_mtime = current_time(inode);
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
        struct extent_map *em = NULL;
        int ret;
 
-       down_read(&BTRFS_I(inode)->dio_sem);
        if (type != BTRFS_ORDERED_NOCOW) {
                em = create_pinned_em(inode, start, len, orig_start,
                                      block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
                em = ERR_PTR(ret);
        }
  out:
-       up_read(&BTRFS_I(inode)->dio_sem);
 
        return em;
 }
@@ -8692,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                dio_data.unsubmitted_oe_range_start = (u64)offset;
                dio_data.unsubmitted_oe_range_end = (u64)offset;
                current->journal_info = &dio_data;
+               down_read(&BTRFS_I(inode)->dio_sem);
        } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
                                     &BTRFS_I(inode)->runtime_flags)) {
                inode_dio_end(inode);
@@ -8704,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                                   iter, btrfs_get_blocks_direct, NULL,
                                   btrfs_submit_direct, flags);
        if (iov_iter_rw(iter) == WRITE) {
+               up_read(&BTRFS_I(inode)->dio_sem);
                current->journal_info = NULL;
                if (ret < 0 && ret != -EIOCBQUEUED) {
                        if (dio_data.reserve)
@@ -9212,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
                        break;
                }
 
+               btrfs_block_rsv_release(fs_info, rsv, -1);
                ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
                                              rsv, min_size, 0);
                BUG_ON(ret);    /* shouldn't happen */
@@ -10579,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
 static const struct inode_operations btrfs_dir_ro_inode_operations = {
        .lookup         = btrfs_lookup,
        .permission     = btrfs_permission,
-       .get_acl        = btrfs_get_acl,
-       .set_acl        = btrfs_set_acl,
        .update_time    = btrfs_update_time,
 };
 
index baea866a6751facf4c1f18dda23e71582978dffe..94fd76d04683d88103b42ff71a02490201a9783f 100644 (file)
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                        add_wait_queue(&ci->i_cap_wq, &wait);
 
                        while (!try_get_cap_refs(ci, need, want, endoff,
-                                                true, &_got, &err))
+                                                true, &_got, &err)) {
+                               if (signal_pending(current)) {
+                                       ret = -ERESTARTSYS;
+                                       break;
+                               }
                                wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+                       }
 
                        remove_wait_queue(&ci->i_cap_wq, &wait);
 
index d7a93696663b66b9183a9ae1559c2119b3fc3f98..8ab1fdf0bd49b74f380a578aea92ce738393403d 100644 (file)
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(dir->i_sb)->mdsc;
                struct ceph_mds_request *req;
-               int op, mask, err;
+               int op, err;
+               u32 mask;
 
                if (flags & LOOKUP_RCU)
                        return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
                        if (ceph_security_xattr_wanted(dir))
                                mask |= CEPH_CAP_XATTR_SHARED;
-                       req->r_args.getattr.mask = mask;
+                       req->r_args.getattr.mask = cpu_to_le32(mask);
 
                        err = ceph_mdsc_do_request(mdsc, NULL, req);
                        switch (err) {
index 398e5328b30952410cc503e7e4d20918d3bdc671..5e659d054b40ae6faac23af26c5321c5af6ff69b 100644 (file)
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
 {
        struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
        struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
-       return ceph_frag_compare(ls->frag, rs->frag);
+       return ceph_frag_compare(le32_to_cpu(ls->frag),
+                                le32_to_cpu(rs->frag));
 }
 
 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
index ec6b35e9f966bfae46e69dfcaffee2769d699bfd..c9d2e553a6c487f01bd11ed4c7a2c15ddfcd058d 100644 (file)
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
                                  struct ceph_mds_reply_info_parsed *info,
                                  u64 features)
 {
-       if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
+       u32 op = le32_to_cpu(info->head->op);
+
+       if (op == CEPH_MDS_OP_GETFILELOCK)
                return parse_reply_info_filelock(p, end, info, features);
-       else if (info->head->op == CEPH_MDS_OP_READDIR ||
-                info->head->op == CEPH_MDS_OP_LSSNAP)
+       else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
                return parse_reply_info_dir(p, end, info, features);
-       else if (info->head->op == CEPH_MDS_OP_CREATE)
+       else if (op == CEPH_MDS_OP_CREATE)
                return parse_reply_info_create(p, end, info, features);
        else
                return -EIO;
index ddcddfeaa03bd942e83738d34c4abaed06fa2709..3af2da5e64ce77fa8ae4b3f294c82882d350120f 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 
-#ifdef CONFIG_FS_IOMAP
 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 {
        return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1428,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
 #endif /* CONFIG_FS_DAX_PMD */
-#endif /* CONFIG_FS_IOMAP */
index 36bea5adcabaa735056b20290ce23cdc7dfc0851..c634874e12d969fbd0b00ad8da745553168876e6 100644 (file)
@@ -1,6 +1,5 @@
 config EXT2_FS
        tristate "Second extended fs support"
-       select FS_IOMAP if FS_DAX
        help
          Ext2 is a standard Linux file system for hard disks.
 
index 7b90691e98c4f5fdd3b2d162285b00e71ac51b63..e38039fd96ff59ab59ce17407abcf26de4c5a950 100644 (file)
@@ -37,7 +37,6 @@ config EXT4_FS
        select CRC16
        select CRYPTO
        select CRYPTO_CRC32C
-       select FS_IOMAP if FS_DAX
        help
          This is the next generation of the ext3 filesystem.
 
index 70ea57c7b6bb2b48ecf47d8cc9bd211cfc430a61..4e06a27ed7f80d4d0472e3d6c9e9fe3f0f1d7da5 100644 (file)
@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
                struct fuse_req *req;
                req = list_entry(head->next, struct fuse_req, list);
                req->out.h.error = -ECONNABORTED;
-               clear_bit(FR_PENDING, &req->flags);
                clear_bit(FR_SENT, &req->flags);
                list_del_init(&req->list);
                request_end(fc, req);
@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
                spin_lock(&fiq->waitq.lock);
                fiq->connected = 0;
                list_splice_init(&fiq->pending, &to_end2);
+               list_for_each_entry(req, &to_end2, list)
+                       clear_bit(FR_PENDING, &req->flags);
                while (forget_pending(fiq))
                        kfree(dequeue_forget(fiq, 1, NULL));
                wake_up_all_locked(&fiq->waitq);
index 1f7c732f32b07f1bab9e4961f16cb52ee9f09f70..811fd8929a18c1e330316202fd40ac58857ec3c7 100644 (file)
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
        if (sec || nsec) {
                struct timespec64 ts = {
                        sec,
-                       max_t(u32, nsec, NSEC_PER_SEC - 1)
+                       min_t(u32, nsec, NSEC_PER_SEC - 1)
                };
 
                return get_jiffies_64() + timespec64_to_jiffies(&ts);
index 9ad48d9202a99a6c63ccb07b9b31476f5ee524af..023bb0b03352f4440d893b4e713d7d9ed937a771 100644 (file)
@@ -154,29 +154,38 @@ out_err:
 static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
                            struct dentry **ret)
 {
-       const char *s = d->name.name;
+       /* Counting down from the end, since the prefix can change */
+       size_t rem = d->name.len - 1;
        struct dentry *dentry = NULL;
        int err;
 
-       if (*s != '/')
+       if (d->name.name[0] != '/')
                return ovl_lookup_single(base, d, d->name.name, d->name.len,
                                         0, "", ret);
 
-       while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+       while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+               const char *s = d->name.name + d->name.len - rem;
                const char *next = strchrnul(s, '/');
-               size_t slen = strlen(s);
+               size_t thislen = next - s;
+               bool end = !next[0];
 
-               if (WARN_ON(slen > d->name.len) ||
-                   WARN_ON(strcmp(d->name.name + d->name.len - slen, s)))
+               /* Verify we did not go off the rails */
+               if (WARN_ON(s[-1] != '/'))
                        return -EIO;
 
-               err = ovl_lookup_single(base, d, s, next - s,
-                                       d->name.len - slen, next, &base);
+               err = ovl_lookup_single(base, d, s, thislen,
+                                       d->name.len - rem, next, &base);
                dput(dentry);
                if (err)
                        return err;
                dentry = base;
-               s = next;
+               if (end)
+                       break;
+
+               rem -= thislen + 1;
+
+               if (WARN_ON(rem >= d->name.len))
+                       return -EIO;
        }
        *ret = dentry;
        return 0;
index 8e7e61b28f31c037961c081d09a7be5f818013ef..87c9a9aacda3601e2686e239243f447728137943 100644 (file)
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
             iter.tgid += 1, iter = next_tgid(ns, iter)) {
                char name[PROC_NUMBUF];
                int len;
+
+               cond_resched();
                if (!has_pid_permissions(ns, iter.task, 2))
                        continue;
 
index d0f8a38dfafacd8f3d524d1ff69ae8f621eea278..0186fe6d39f3b4d2e77497d4d34a7691204ae9fa 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/uaccess.h>
+#include <linux/major.h>
 #include "internal.h"
 
 static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
 static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
-       u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+       u64 id = 0;
+
+       /* When calling huge_encode_dev(),
+        * use sb->s_bdev->bd_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK defined
+        * use sb->s_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD defined
+        * leave id as 0 when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD undefined
+        */
+       if (sb->s_bdev)
+               id = huge_encode_dev(sb->s_bdev->bd_dev);
+       else if (sb->s_dev)
+               id = huge_encode_dev(sb->s_dev);
 
        buf->f_type = ROMFS_MAGIC;
        buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_flags |= MS_RDONLY | MS_NOATIME;
        sb->s_op = &romfs_super_ops;
 
+#ifdef CONFIG_ROMFS_ON_MTD
+       /* Use same dev ID from the underlying mtdblock device */
+       if (sb->s_mtd)
+               sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
        /* read the image superblock and check it */
        rsb = kmalloc(512, GFP_KERNEL);
        if (!rsb)
index 0a908ae7af1382d46efa5c8073300db4680ce7e2..b0d0623c83ed88eae3afa31f079dfd2486767d07 100644 (file)
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
 
 config UBIFS_FS_ENCRYPTION
        bool "UBIFS Encryption"
-       depends on UBIFS_FS
+       depends on UBIFS_FS && BLOCK
        select FS_ENCRYPTION
        default n
        help
index 1c5331ac9614040016019aca78c1338133ea4e11..528369f3e472087fe39e0b9e716cb4a70016cee7 100644 (file)
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
        dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
                dentry, mode, dir->i_ino);
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       return err;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       return -EPERM;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
        ubifs_assert(inode_is_locked(dir));
        ubifs_assert(inode_is_locked(inode));
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               if (!fscrypt_has_permitted_context(dir, inode))
-                       return -EPERM;
-
-               err = fscrypt_get_encryption_info(inode);
-               if (err)
-                       return err;
-
-               if (!fscrypt_has_encryption_key(inode))
-                       return -EPERM;
-       }
+       if (ubifs_crypt_is_encrypted(dir) &&
+           !fscrypt_has_permitted_context(dir, inode))
+               return -EPERM;
 
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (err)
                return err;
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       goto out_budg;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       err = -EPERM;
-                       goto out_budg;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
                return err;
        }
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       goto out_budg;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       err = -EPERM;
-                       goto out_budg;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
                        goto out_inode;
                }
 
-               err = fscrypt_get_encryption_info(inode);
-               if (err) {
-                       kfree(sd);
-                       goto out_inode;
-               }
-
-               if (!fscrypt_has_encryption_key(inode)) {
-                       kfree(sd);
-                       err = -EPERM;
-                       goto out_inode;
-               }
-
                ostr.name = sd->encrypted_path;
                ostr.len = disk_link.len;
 
index 78d713644df3c00cf5f3be6ae51b742a47345417..da519ba205f614fb7a5ea9d3eb72b4c58b4504e9 100644 (file)
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case FS_IOC32_SETFLAGS:
                cmd = FS_IOC_SETFLAGS;
                break;
+       case FS_IOC_SET_ENCRYPTION_POLICY:
+       case FS_IOC_GET_ENCRYPTION_POLICY:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index a459211a1c21059ff8739566d873b2093285b495..294519b98874058ef7ac7e089361733adb9f26de 100644 (file)
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
 
        } else {
                data->compr_size = 0;
+               out_len = compr_len;
        }
 
        dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
        dn->compr_type = cpu_to_le16(compr_type);
        dn->size = cpu_to_le32(*new_len);
        *new_len = UBIFS_DATA_NODE_SZ + out_len;
+       err = 0;
 out:
        kfree(buf);
        return err;
index 74ae2de949df68b5918a3656840eb5ab22c8cda1..709aa098dd46e48e34a9627cd0af85d136737fab 100644 (file)
 #include <linux/slab.h>
 #include "ubifs.h"
 
+static int try_read_node(const struct ubifs_info *c, void *buf, int type,
+                        int len, int lnum, int offs);
+static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
+                             struct ubifs_zbranch *zbr, void *node);
+
 /*
  * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
  * @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                return 0;
        }
 
-       err = ubifs_tnc_read_node(c, zbr, node);
+       if (c->replaying) {
+               err = fallible_read_node(c, &zbr->key, zbr, node);
+               /*
+                * When the node was not found, return -ENOENT, 0 otherwise.
+                * Negative return codes stay as-is.
+                */
+               if (err == 0)
+                       err = -ENOENT;
+               else if (err == 1)
+                       err = 0;
+       } else {
+               err = ubifs_tnc_read_node(c, zbr, node);
+       }
        if (err)
                return err;
 
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
        if (fname_len(nm) > 0) {
                if (err) {
                        /* Handle collisions */
-                       err = resolve_collision(c, key, &znode, &n, nm);
+                       if (c->replaying)
+                               err = fallible_resolve_collision(c, key, &znode, &n,
+                                                        nm, 0);
+                       else
+                               err = resolve_collision(c, key, &znode, &n, nm);
                        dbg_tnc("rc returned %d, znode %p, n %d",
                                err, znode, n);
                        if (unlikely(err < 0))
index d96e2f30084bcfab552ffe3005af090abfe319c9..43953e03c35682723c6658dfe9b8cceed9de22ef 100644 (file)
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
        struct uffd_msg msg;
        wait_queue_t wq;
        struct userfaultfd_ctx *ctx;
+       bool waken;
 };
 
 struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
        if (len && (start > uwq->msg.arg.pagefault.address ||
                    start + len <= uwq->msg.arg.pagefault.address))
                goto out;
+       WRITE_ONCE(uwq->waken, true);
+       /*
+        * The implicit smp_mb__before_spinlock in try_to_wake_up()
+        * renders uwq->waken visible to other CPUs before the task is
+        * waken.
+        */
        ret = wake_up_state(wq->private, mode);
        if (ret)
                /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        struct userfaultfd_wait_queue uwq;
        int ret;
        bool must_wait, return_to_userland;
+       long blocking_state;
 
        BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        uwq.wq.private = current;
        uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
        uwq.ctx = ctx;
+       uwq.waken = false;
 
        return_to_userland =
                (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
                (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+       blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+                        TASK_KILLABLE;
 
        spin_lock(&ctx->fault_pending_wqh.lock);
        /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
         * following the spin_unlock to happen before the list_add in
         * __add_wait_queue.
         */
-       set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
-                         TASK_KILLABLE);
+       set_current_state(blocking_state);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 
        must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                wake_up_poll(&ctx->fd_wqh, POLLIN);
                schedule();
                ret |= VM_FAULT_MAJOR;
+
+               /*
+                * False wakeups can orginate even from rwsem before
+                * up_read() however userfaults will wait either for a
+                * targeted wakeup on the specific uwq waitqueue from
+                * wake_userfault() or for signals or for uffd
+                * release.
+                */
+               while (!READ_ONCE(uwq.waken)) {
+                       /*
+                        * This needs the full smp_store_mb()
+                        * guarantee as the state write must be
+                        * visible to other CPUs before reading
+                        * uwq.waken from other CPUs.
+                        */
+                       set_current_state(blocking_state);
+                       if (READ_ONCE(uwq.waken) ||
+                           READ_ONCE(ctx->released) ||
+                           (return_to_userland ? signal_pending(current) :
+                            fatal_signal_pending(current)))
+                               break;
+                       schedule();
+               }
        }
 
        __set_current_state(TASK_RUNNING);
index d346d42c54d1590250040f0b36c05367287e7bf5..33db69be4832c7bb702f34589d715956d7a871fe 100644 (file)
@@ -39,6 +39,7 @@
 #include "xfs_rmap_btree.h"
 #include "xfs_btree.h"
 #include "xfs_refcount_btree.h"
+#include "xfs_ialloc_btree.h"
 
 /*
  * Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
        struct xfs_mount                *mp = pag->pag_mount;
        struct xfs_ag_resv              *resv;
        int                             error;
+       xfs_extlen_t                    reserved;
 
-       resv = xfs_perag_resv(pag, type);
        if (used > ask)
                ask = used;
-       resv->ar_asked = ask;
-       resv->ar_reserved = resv->ar_orig_reserved = ask - used;
-       mp->m_ag_max_usable -= ask;
+       reserved = ask - used;
 
-       trace_xfs_ag_resv_init(pag, type, ask);
-
-       error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
-       if (error)
+       error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+       if (error) {
                trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
                                error, _RET_IP_);
+               xfs_warn(mp,
+"Per-AG reservation for AG %u failed.  Filesystem may run out of space.",
+                               pag->pag_agno);
+               return error;
+       }
 
-       return error;
+       mp->m_ag_max_usable -= ask;
+
+       resv = xfs_perag_resv(pag, type);
+       resv->ar_asked = ask;
+       resv->ar_reserved = resv->ar_orig_reserved = reserved;
+
+       trace_xfs_ag_resv_init(pag, type, ask);
+       return 0;
 }
 
 /* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
 xfs_ag_resv_init(
        struct xfs_perag                *pag)
 {
+       struct xfs_mount                *mp = pag->pag_mount;
+       xfs_agnumber_t                  agno = pag->pag_agno;
        xfs_extlen_t                    ask;
        xfs_extlen_t                    used;
        int                             error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
        if (pag->pag_meta_resv.ar_asked == 0) {
                ask = used = 0;
 
-               error = xfs_refcountbt_calc_reserves(pag->pag_mount,
-                               pag->pag_agno, &ask, &used);
+               error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
 
-               error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
-                               ask, used);
+               error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
+
+               error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+                               ask, used);
+               if (error) {
+                       /*
+                        * Because we didn't have per-AG reservations when the
+                        * finobt feature was added we might not be able to
+                        * reserve all needed blocks.  Warn and fall back to the
+                        * old and potentially buggy code in that case, but
+                        * ensure we do have the reservation for the refcountbt.
+                        */
+                       ask = used = 0;
+
+                       mp->m_inotbt_nores = true;
+
+                       error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+                                       &used);
+                       if (error)
+                               goto out;
+
+                       error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+                                       ask, used);
+                       if (error)
+                               goto out;
+               }
        }
 
        /* Create the AGFL metadata reservation */
        if (pag->pag_agfl_resv.ar_asked == 0) {
                ask = used = 0;
 
-               error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
-                               &ask, &used);
+               error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
 
@@ -256,9 +289,16 @@ xfs_ag_resv_init(
                        goto out;
        }
 
+#ifdef DEBUG
+       /* need to read in the AGF for the ASSERT below to work */
+       error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+       if (error)
+               return error;
+
        ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
               xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
               pag->pagf_freeblks + pag->pagf_flcount);
+#endif
 out:
        return error;
 }
index af1ecb19121e9e8569c0ee907652405575d882c8..6622d46ddec3890c0356dab26fbff448436584d7 100644 (file)
@@ -131,9 +131,6 @@ xfs_attr_get(
        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                return -EIO;
 
-       if (!xfs_inode_hasattr(ip))
-               return -ENOATTR;
-
        error = xfs_attr_args_init(&args, ip, name, flags);
        if (error)
                return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return -EIO;
 
-       if (!xfs_inode_hasattr(dp))
-               return -ENOATTR;
-
        error = xfs_attr_args_init(&args, dp, name, flags);
        if (error)
                return error;
index 44773c9eb957dd8f6c5904ef9d41c3de880d14be..bfc00de5c6f17a75c7addae26e709ee6cbfde3a9 100644 (file)
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
                align = xfs_get_cowextsz_hint(ap->ip);
        else if (xfs_alloc_is_userdata(ap->datatype))
                align = xfs_get_extsz_hint(ap->ip);
-       if (unlikely(align)) {
+       if (align) {
                error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
                                                align, 0, ap->eof, 0, ap->conv,
                                                &ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
                args.minlen = ap->minlen;
        }
        /* apply extent size hints if obtained earlier */
-       if (unlikely(align)) {
+       if (align) {
                args.prod = align;
                if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
                        args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -4514,8 +4514,6 @@ xfs_bmapi_write(
        int                     n;              /* current extent index */
        xfs_fileoff_t           obno;           /* old block number (offset) */
        int                     whichfork;      /* data or attr fork */
-       char                    inhole;         /* current location is hole in file */
-       char                    wasdelay;       /* old extent was delayed */
 
 #ifdef DEBUG
        xfs_fileoff_t           orig_bno;       /* original block number value */
@@ -4603,22 +4601,44 @@ xfs_bmapi_write(
        bma.firstblock = firstblock;
 
        while (bno < end && n < *nmap) {
-               inhole = eof || bma.got.br_startoff > bno;
-               wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
+               bool                    need_alloc = false, wasdelay = false;
 
-               /*
-                * Make sure we only reflink into a hole.
-                */
-               if (flags & XFS_BMAPI_REMAP)
-                       ASSERT(inhole);
-               if (flags & XFS_BMAPI_COWFORK)
-                       ASSERT(!inhole);
+               /* in hole or beyoned EOF? */
+               if (eof || bma.got.br_startoff > bno) {
+                       if (flags & XFS_BMAPI_DELALLOC) {
+                               /*
+                                * For the COW fork we can reasonably get a
+                                * request for converting an extent that races
+                                * with other threads already having converted
+                                * part of it, as there converting COW to
+                                * regular blocks is not protected using the
+                                * IOLOCK.
+                                */
+                               ASSERT(flags & XFS_BMAPI_COWFORK);
+                               if (!(flags & XFS_BMAPI_COWFORK)) {
+                                       error = -EIO;
+                                       goto error0;
+                               }
+
+                               if (eof || bno >= end)
+                                       break;
+                       } else {
+                               need_alloc = true;
+                       }
+               } else {
+                       /*
+                        * Make sure we only reflink into a hole.
+                        */
+                       ASSERT(!(flags & XFS_BMAPI_REMAP));
+                       if (isnullstartblock(bma.got.br_startblock))
+                               wasdelay = true;
+               }
 
                /*
                 * First, deal with the hole before the allocated space
                 * that we found, if any.
                 */
-               if (inhole || wasdelay) {
+               if (need_alloc || wasdelay) {
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
index cecd094404cc53d821567be873bafdbba98880fa..cdef87db5262bdc2f43b17f5a1e76fb45a107233 100644 (file)
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
 /* Map something in the CoW fork. */
 #define XFS_BMAPI_COWFORK      0x200
 
+/* Only convert delalloc space, don't allocate entirely new extents */
+#define XFS_BMAPI_DELALLOC     0x400
+
 #define XFS_BMAPI_FLAGS \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
        { XFS_BMAPI_METADATA,   "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
        { XFS_BMAPI_CONVERT,    "CONVERT" }, \
        { XFS_BMAPI_ZERO,       "ZERO" }, \
        { XFS_BMAPI_REMAP,      "REMAP" }, \
-       { XFS_BMAPI_COWFORK,    "COWFORK" }
+       { XFS_BMAPI_COWFORK,    "COWFORK" }, \
+       { XFS_BMAPI_DELALLOC,   "DELALLOC" }
 
 
 static inline int xfs_bmapi_aflag(int w)
index c58d72c220f58593cd05b90b3227f1ecb4f2d06f..2f389d366e93324c3f481d8c7b0773475b423d83 100644 (file)
 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
 
 /*
- * @mode, if set, indicates that the type field needs to be set up.
- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
- * for file type specification. This will be propagated into the directory
- * structure if appropriate for the given operation and filesystem config.
+ * Convert inode mode to directory entry filetype
  */
-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
-       [0]                     = XFS_DIR3_FT_UNKNOWN,
-       [S_IFREG >> S_SHIFT]    = XFS_DIR3_FT_REG_FILE,
-       [S_IFDIR >> S_SHIFT]    = XFS_DIR3_FT_DIR,
-       [S_IFCHR >> S_SHIFT]    = XFS_DIR3_FT_CHRDEV,
-       [S_IFBLK >> S_SHIFT]    = XFS_DIR3_FT_BLKDEV,
-       [S_IFIFO >> S_SHIFT]    = XFS_DIR3_FT_FIFO,
-       [S_IFSOCK >> S_SHIFT]   = XFS_DIR3_FT_SOCK,
-       [S_IFLNK >> S_SHIFT]    = XFS_DIR3_FT_SYMLINK,
-};
+unsigned char xfs_mode_to_ftype(int mode)
+{
+       switch (mode & S_IFMT) {
+       case S_IFREG:
+               return XFS_DIR3_FT_REG_FILE;
+       case S_IFDIR:
+               return XFS_DIR3_FT_DIR;
+       case S_IFCHR:
+               return XFS_DIR3_FT_CHRDEV;
+       case S_IFBLK:
+               return XFS_DIR3_FT_BLKDEV;
+       case S_IFIFO:
+               return XFS_DIR3_FT_FIFO;
+       case S_IFSOCK:
+               return XFS_DIR3_FT_SOCK;
+       case S_IFLNK:
+               return XFS_DIR3_FT_SYMLINK;
+       default:
+               return XFS_DIR3_FT_UNKNOWN;
+       }
+}
 
 /*
  * ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
        if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
                return rval;
        rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
-       ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
+       if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+               return -EFSCORRUPTED;
        *vp = rval;
        return 0;
 }
index 0197590fa7d7c0a3d97d68dccdcfb3bd0709964b..d6e6d9d16f6c30d90e88f742c077782bc678b86f 100644 (file)
@@ -18,6 +18,9 @@
 #ifndef __XFS_DIR2_H__
 #define __XFS_DIR2_H__
 
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+
 struct xfs_defer_ops;
 struct xfs_da_args;
 struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
 extern struct xfs_name xfs_name_dotdot;
 
 /*
- * directory filetype conversion tables.
+ * Convert inode mode to directory entry filetype
  */
-#define S_SHIFT 12
-extern const unsigned char xfs_mode_to_ftype[];
+extern unsigned char xfs_mode_to_ftype(int mode);
 
 /*
  * directory operations vector for encode/decode routines
index 0fd086d03d4156cde3fc2851533d910cee046c1c..7c471881c9a67482bd6bf88df59bd6f2e63a57a7 100644 (file)
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
 }
 
 STATIC int
-xfs_inobt_alloc_block(
+__xfs_inobt_alloc_block(
        struct xfs_btree_cur    *cur,
        union xfs_btree_ptr     *start,
        union xfs_btree_ptr     *new,
-       int                     *stat)
+       int                     *stat,
+       enum xfs_ag_resv_type   resv)
 {
        xfs_alloc_arg_t         args;           /* block allocation args */
        int                     error;          /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
        args.maxlen = 1;
        args.prod = 1;
        args.type = XFS_ALLOCTYPE_NEAR_BNO;
+       args.resv = resv;
 
        error = xfs_alloc_vextent(&args);
        if (error) {
@@ -122,6 +124,27 @@ xfs_inobt_alloc_block(
        return 0;
 }
 
+STATIC int
+xfs_inobt_alloc_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *start,
+       union xfs_btree_ptr     *new,
+       int                     *stat)
+{
+       return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_alloc_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *start,
+       union xfs_btree_ptr     *new,
+       int                     *stat)
+{
+       return __xfs_inobt_alloc_block(cur, start, new, stat,
+                       XFS_AG_RESV_METADATA);
+}
+
 STATIC int
 xfs_inobt_free_block(
        struct xfs_btree_cur    *cur,
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
 
        .dup_cursor             = xfs_inobt_dup_cursor,
        .set_root               = xfs_finobt_set_root,
-       .alloc_block            = xfs_inobt_alloc_block,
+       .alloc_block            = xfs_finobt_alloc_block,
        .free_block             = xfs_inobt_free_block,
        .get_minrecs            = xfs_inobt_get_minrecs,
        .get_maxrecs            = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
        return 0;
 }
 #endif /* DEBUG */
+
+static xfs_extlen_t
+xfs_inobt_max_size(
+       struct xfs_mount        *mp)
+{
+       /* Bail out if we're uninitialized, which can happen in mkfs. */
+       if (mp->m_inobt_mxr[0] == 0)
+               return 0;
+
+       return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+               (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
+                               XFS_INODES_PER_CHUNK);
+}
+
+static int
+xfs_inobt_count_blocks(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno,
+       xfs_btnum_t             btnum,
+       xfs_extlen_t            *tree_blocks)
+{
+       struct xfs_buf          *agbp;
+       struct xfs_btree_cur    *cur;
+       int                     error;
+
+       error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+       if (error)
+               return error;
+
+       cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+       error = xfs_btree_count_blocks(cur, tree_blocks);
+       xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+       xfs_buf_relse(agbp);
+
+       return error;
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_finobt_calc_reserves(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno,
+       xfs_extlen_t            *ask,
+       xfs_extlen_t            *used)
+{
+       xfs_extlen_t            tree_len = 0;
+       int                     error;
+
+       if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+               return 0;
+
+       error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+       if (error)
+               return error;
+
+       *ask += xfs_inobt_max_size(mp);
+       *used += tree_len;
+       return 0;
+}
index bd88453217ceca0466fbd07e409707ab7e9354b3..aa81e2e63f3f95da8434798e8b2bf6501491eac3 100644 (file)
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
 #define xfs_inobt_rec_check_count(mp, rec)     0
 #endif /* DEBUG */
 
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
+               xfs_extlen_t *ask, xfs_extlen_t *used);
+
 #endif /* __XFS_IALLOC_BTREE_H__ */
index dd483e2767f7a38a4dfef5c063148430364b4aaf..d93f9d918cfc11ada2ded50d21e0eb9226381f93 100644 (file)
@@ -29,6 +29,7 @@
 #include "xfs_icache.h"
 #include "xfs_trans.h"
 #include "xfs_ialloc.h"
+#include "xfs_dir2.h"
 
 /*
  * Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
        xfs_ino_t               ino,
        struct xfs_dinode       *dip)
 {
+       uint16_t                mode;
        uint16_t                flags;
        uint64_t                flags2;
 
@@ -396,8 +398,12 @@ xfs_dinode_verify(
        if (be64_to_cpu(dip->di_size) & (1ULL << 63))
                return false;
 
-       /* No zero-length symlinks. */
-       if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
+       mode = be16_to_cpu(dip->di_mode);
+       if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
+               return false;
+
+       /* No zero-length symlinks/dirs. */
+       if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
                return false;
 
        /* only version 3 or greater inodes are extensively verified here */
index 2580262e4ea00c3dc728b041dca125f4f7078373..584ec896a53374f81da05906d517fba717686504 100644 (file)
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
            sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG                    ||
            sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG                    ||
            sbp->sb_blocksize != (1 << sbp->sb_blocklog)                ||
-           sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG                   ||
+           sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
            sbp->sb_inodesize < XFS_DINODE_MIN_SIZE                     ||
            sbp->sb_inodesize > XFS_DINODE_MAX_SIZE                     ||
            sbp->sb_inodelog < XFS_DINODE_MIN_LOG                       ||
index b9abce524c33b1af8581e8d52d032685fb767747..c1417919ab0a67eb61e97b77746d71c90c392776 100644 (file)
@@ -528,7 +528,6 @@ xfs_getbmap(
        xfs_bmbt_irec_t         *map;           /* buffer for user's data */
        xfs_mount_t             *mp;            /* file system mount point */
        int                     nex;            /* # of user extents can do */
-       int                     nexleft;        /* # of user extents left */
        int                     subnex;         /* # of bmapi's can do */
        int                     nmap;           /* number of map entries */
        struct getbmapx         *out;           /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
                goto out_free_map;
        }
 
-       nexleft = nex;
-
        do {
-               nmap = (nexleft > subnex) ? subnex : nexleft;
+               nmap = (nex> subnex) ? subnex : nex;
                error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
                                       XFS_BB_TO_FSB(mp, bmv->bmv_length),
                                       map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
                        goto out_free_map;
                ASSERT(nmap <= subnex);
 
-               for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
-                               cur_ext < bmv->bmv_count; i++) {
+               for (i = 0; i < nmap && bmv->bmv_length &&
+                               cur_ext < bmv->bmv_count - 1; i++) {
                        out[cur_ext].bmv_oflags = 0;
                        if (map[i].br_state == XFS_EXT_UNWRITTEN)
                                out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
                                continue;
                        }
 
+                       /*
+                        * In order to report shared extents accurately,
+                        * we report each distinct shared/unshared part
+                        * of a single bmbt record using multiple bmap
+                        * extents.  To make that happen, we iterate the
+                        * same map array item multiple times, each
+                        * time trimming out the subextent that we just
+                        * reported.
+                        *
+                        * Because of this, we must check the out array
+                        * index (cur_ext) directly against bmv_count-1
+                        * to avoid overflows.
+                        */
                        if (inject_map.br_startblock != NULLFSBLOCK) {
                                map[i] = inject_map;
                                i--;
-                       } else
-                               nexleft--;
+                       }
                        bmv->bmv_entries++;
                        cur_ext++;
                }
-       } while (nmap && nexleft && bmv->bmv_length &&
-                cur_ext < bmv->bmv_count);
+       } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
 
  out_free_map:
        kmem_free(map);
index 7f0a01f7b592d20932649d1f8a705a836d86ca02..ac3b4db519df8ee5c03fc759295028d6316f474e 100644 (file)
@@ -422,6 +422,7 @@ retry:
 out_free_pages:
        for (i = 0; i < bp->b_page_count; i++)
                __free_page(bp->b_pages[i]);
+       bp->b_flags &= ~_XBF_PAGES;
        return error;
 }
 
index 7a30b8f11db7a26f8a82ded531e8a5170ea03ad5..9d06cc30e875e147a5560bad24e5a55aedb65cf0 100644 (file)
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
        /* Simple advance */
        next_id = *id + 1;
 
+       /* If we'd wrap past the max ID, stop */
+       if (next_id < *id)
+               return -ENOENT;
+
        /* If new ID is within the current chunk, advancing it sufficed */
        if (next_id % mp->m_quotainfo->qi_dqperchunk) {
                *id = next_id;
index b9557795eb74d4249b34ffd97db6faf0354f8be0..de32f0fe47c8e00d3163b808ebec9c42edefb70b 100644 (file)
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
        int                     error;
 
        /*
-        * The ifree transaction might need to allocate blocks for record
-        * insertion to the finobt. We don't want to fail here at ENOSPC, so
-        * allow ifree to dip into the reserved block pool if necessary.
-        *
-        * Freeing large sets of inodes generally means freeing inode chunks,
-        * directory and file data blocks, so this should be relatively safe.
-        * Only under severe circumstances should it be possible to free enough
-        * inodes to exhaust the reserve block pool via finobt expansion while
-        * at the same time not creating free space in the filesystem.
+        * We try to use a per-AG reservation for any block needed by the finobt
+        * tree, but as the finobt feature predates the per-AG reservation
+        * support a degraded file system might not have enough space for the
+        * reservation at mount time.  In that case try to dip into the reserved
+        * pool and pray.
         *
         * Send a warning if the reservation does happen to fail, as the inode
         * now remains allocated and sits on the unlinked list until the fs is
         * repaired.
         */
-       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
-                       XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+       if (unlikely(mp->m_inotbt_nores)) {
+               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+                               XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
+                               &tp);
+       } else {
+               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
+       }
        if (error) {
                if (error == -ENOSPC) {
                        xfs_warn_ratelimited(mp,
index 0d147428971e0c21c88aa90e2628001cf582971c..1aa3abd67b36670f60b9daf7672c243307c77438 100644 (file)
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
        xfs_trans_t     *tp;
        int             nimaps;
        int             error = 0;
-       int             flags = 0;
+       int             flags = XFS_BMAPI_DELALLOC;
        int             nres;
 
        if (whichfork == XFS_COW_FORK)
index 308bebb6dfd266f85ae225ef0c235128bb7b36ba..22c16155f1b42a1380f85935fa43706cd60e2b40 100644 (file)
@@ -97,13 +97,28 @@ xfs_init_security(
 
 static void
 xfs_dentry_to_name(
+       struct xfs_name *namep,
+       struct dentry   *dentry)
+{
+       namep->name = dentry->d_name.name;
+       namep->len = dentry->d_name.len;
+       namep->type = XFS_DIR3_FT_UNKNOWN;
+}
+
+static int
+xfs_dentry_mode_to_name(
        struct xfs_name *namep,
        struct dentry   *dentry,
        int             mode)
 {
        namep->name = dentry->d_name.name;
        namep->len = dentry->d_name.len;
-       namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
+       namep->type = xfs_mode_to_ftype(mode);
+
+       if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
+               return -EFSCORRUPTED;
+
+       return 0;
 }
 
 STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
         * xfs_init_security we must back out.
         * ENOSPC can hit here, among other things.
         */
-       xfs_dentry_to_name(&teardown, dentry, 0);
+       xfs_dentry_to_name(&teardown, dentry);
 
        xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
 }
@@ -154,8 +169,12 @@ xfs_generic_create(
        if (error)
                return error;
 
+       /* Verify mode is valid also for tmpfile case */
+       error = xfs_dentry_mode_to_name(&name, dentry, mode);
+       if (unlikely(error))
+               goto out_free_acl;
+
        if (!tmpfile) {
-               xfs_dentry_to_name(&name, dentry, mode);
                error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
        } else {
                error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
        if (dentry->d_name.len >= MAXNAMELEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       xfs_dentry_to_name(&name, dentry, 0);
+       xfs_dentry_to_name(&name, dentry);
        error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
        if (unlikely(error)) {
                if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
        if (dentry->d_name.len >= MAXNAMELEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       xfs_dentry_to_name(&xname, dentry, 0);
+       xfs_dentry_to_name(&xname, dentry);
        error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
        if (unlikely(error)) {
                if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
        struct xfs_name name;
        int             error;
 
-       xfs_dentry_to_name(&name, dentry, inode->i_mode);
+       error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
+       if (unlikely(error))
+               return error;
 
        error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
        if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
        struct xfs_name name;
        int             error;
 
-       xfs_dentry_to_name(&name, dentry, 0);
+       xfs_dentry_to_name(&name, dentry);
 
        error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
        if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
 
        mode = S_IFLNK |
                (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
-       xfs_dentry_to_name(&name, dentry, mode);
+       error = xfs_dentry_mode_to_name(&name, dentry, mode);
+       if (unlikely(error))
+               goto out;
 
        error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
        if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
 {
        struct inode    *new_inode = d_inode(ndentry);
        int             omode = 0;
+       int             error;
        struct xfs_name oname;
        struct xfs_name nname;
 
@@ -405,8 +429,14 @@ xfs_vn_rename(
        if (flags & RENAME_EXCHANGE)
                omode = d_inode(ndentry)->i_mode;
 
-       xfs_dentry_to_name(&oname, odentry, omode);
-       xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
+       error = xfs_dentry_mode_to_name(&oname, odentry, omode);
+       if (omode && unlikely(error))
+               return error;
+
+       error = xfs_dentry_mode_to_name(&nname, ndentry,
+                                       d_inode(odentry)->i_mode);
+       if (unlikely(error))
+               return error;
 
        return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
                          XFS_I(ndir), &nname,
index e467218c0098323d41e55caf4a660862d39463a8..7a989de224f4b77477e88e74e23d1be4272682be 100644 (file)
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
 }
 
 #define ASSERT_ALWAYS(expr)    \
-       (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
 #ifdef DEBUG
 #define ASSERT(expr)   \
-       (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
 #ifndef STATIC
 # define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
 #ifdef XFS_WARN
 
 #define ASSERT(expr)   \
-       (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
 
 #ifndef STATIC
 # define STATIC static noinline
index 84f785218907434276d12e6926d8e85974a14477..7f351f706b7a2938292680801c9fa0bf51458c1c 100644 (file)
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
        int                     m_fixedfsid[2]; /* unchanged for life of FS */
        uint                    m_dmevmask;     /* DMI events for this FS */
        __uint64_t              m_flags;        /* global mount flags */
+       bool                    m_inotbt_nores; /* no per-AG finobt resv. */
        int                     m_ialloc_inos;  /* inodes in inode allocation */
        int                     m_ialloc_blks;  /* blocks in inode allocation */
        int                     m_ialloc_min_blks;/* min blocks in sparse inode
index 45e50ea90769f15d80e3da57e9a6d1721d12a21a..b669b123287bb115e561a6ea1c0be8ae4db359db 100644 (file)
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
         * the case in all other instances. It's OK that we do this because
         * quotacheck is done only at mount time.
         */
-       error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
+       error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
+                        &ip);
        if (error) {
                *res = BULKSTAT_RV_NOTHING;
                return error;
index d6d241f63b9f8e4c3a9310008f17b9439f7fa2ed..56814e8ae7ea91a4c9aeed59a2c6f9d2263fb2e4 100644 (file)
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
        struct drm_crtc *ptr;
        struct drm_crtc_state *state;
        struct drm_crtc_commit *commit;
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
 };
 
 struct __drm_connnectors_state {
index bf9991b20611a666d05445152d587446c5a834f5..137432386310aa8a9449d28f655a60b78bdd654a 100644 (file)
@@ -488,7 +488,7 @@ struct drm_mode_config {
        /**
         * @prop_out_fence_ptr: Sync File fd pointer representing the
         * outgoing fences for a CRTC. Userspace should provide a pointer to a
-        * value of type s64, and then cast that pointer to u64.
+        * value of type s32, and then cast that pointer to u64.
         */
        struct drm_property *prop_out_fence_ptr;
        /**
index b717ed9d2b755255cfafa76553b3823449f594c2..5c970ce6794977a73e8c69181b5fe715191927b5 100644 (file)
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
 
 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
 
+void kvm_timer_init_vhe(void);
 #endif
index 5d417eacc5198801a34201f3ed1363909135b719..57d60dc5b60098d0f82ead2ad65b5bfa9df9019a 100644 (file)
@@ -248,6 +248,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
+void *bpf_map_area_alloc(size_t size);
+void bpf_map_area_free(void *base);
 
 extern int sysctl_unprivileged_bpf_disabled;
 
index 20bfefbe75941627c25c30621d4d9b09606c720c..d936a0021839cca651e19ec43e71b8f21cb69cf0 100644 (file)
@@ -74,6 +74,8 @@ enum cpuhp_state {
        CPUHP_ZCOMP_PREPARE,
        CPUHP_TIMERS_DEAD,
        CPUHP_MIPS_SOC_PREPARE,
+       CPUHP_BP_PREPARE_DYN,
+       CPUHP_BP_PREPARE_DYN_END                = CPUHP_BP_PREPARE_DYN + 20,
        CPUHP_BRINGUP_CPU,
        CPUHP_AP_IDLE_DEAD,
        CPUHP_AP_OFFLINE,
index c2748accea71aa006268afebe65898fcc9f6d033..e973faba69dc5c90586aa97511d860178f31ff2e 100644 (file)
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
                struct irq_chip *irqchip,
                int parent_irq);
 
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lockdep requires that each irqchip instance be created with a
+ * unique key so as to avoid unnecessary warnings. This upfront
+ * boilerplate static inlines provides such a key for each
+ * unique instance.
+ */
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, &key);
+}
+
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key);
+                         unsigned int type)
+{
+
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, &key);
+}
+#else
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, NULL);
+}
 
-/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
                          unsigned int type)
 {
-       return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
-                                    handler, type, true, NULL);
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, NULL);
 }
-
-#ifdef CONFIG_LOCKDEP
-#define gpiochip_irqchip_add(...)                              \
-(                                                              \
-       ({                                                      \
-               static struct lock_class_key _key;              \
-               _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
-       })                                                      \
-)
-#else
-#define gpiochip_irqchip_add(...)                              \
-       _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
-#endif
+#endif /* CONFIG_LOCKDEP */
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
index 56aec84237ad5b7b55c3a43eb04a882f9eb24d5e..cb09238f6d32be355a9b7e2347e48746de7eae77 100644 (file)
@@ -514,8 +514,8 @@ extern enum system_states {
 #define TAINT_FLAGS_COUNT              16
 
 struct taint_flag {
-       char true;      /* character printed when tainted */
-       char false;     /* character printed when not tainted */
+       char c_true;    /* character printed when tainted */
+       char c_false;   /* character printed when not tainted */
        bool module;    /* also show as a per-module taint flag */
 };
 
index 01033fadea4766d5e6efddc78ca595d68c021464..c1784c0b4f3585e0d20ca8253813c31d47f11c04 100644 (file)
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                unsigned long map_offset);
 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
                                          unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                         enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                         enum zone_type target, int *zone_shift);
 
 #endif /* __LINUX_MEMORY_HOTPLUG_H */
index 257173e0095ebdb22e1684fbd56ce05a8971ee1d..f541da68d1e7c50f1bb543c05e57a73001c86d28 100644 (file)
@@ -35,6 +35,8 @@
 #define PHY_ID_KSZ886X         0x00221430
 #define PHY_ID_KSZ8863         0x00221435
 
+#define PHY_ID_KSZ8795         0x00221550
+
 /* struct phy_device dev_flags definitions */
 #define MICREL_PHY_50MHZ_CLK   0x00000001
 #define MICREL_PHY_FXEN                0x00000002
index 36d9896fbc1eb0d12e60682f15e96648c13ebf98..f4aac87adcc3555014f6215d6599b604c70388e6 100644 (file)
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
  * @zonelist - The zonelist to search for a suitable zone
  * @highest_zoneidx - The zone index of the highest zone to return
  * @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
  *
  * This function returns the first zone at or below a given zone index that is
  * within the allowed nodemask. The zoneref returned is a cursor that can be
  * used to iterate the zonelist with next_zones_zonelist by advancing it by
  * one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
  */
 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
                                        enum zone_type highest_zoneidx,
index aacca824a6aef4fcc4d2480aa9eeefd2fe82d6f9..0a3fadc32693a9cf869693f4c406eee5d168e36b 100644 (file)
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
 extern int watchdog_thresh;
 extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
 #ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
index 5c9d2529685fe21540e542e1233abf938692d79d..43474f39ef6523c5b59eee8d4adbe93ed3d62451 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/mod_devicetable.h>
-#include <linux/phy_led_triggers.h>
 
 #include <linux/atomic.h>
 
index a2daea0a37d2ae14ed4c9d965bff2400e5528fd2..b37b05bfd1a6dd8af03b6295febf4e14915e6941 100644 (file)
@@ -18,11 +18,11 @@ struct phy_device;
 #ifdef CONFIG_LED_TRIGGER_PHY
 
 #include <linux/leds.h>
+#include <linux/phy.h>
 
 #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE      10
-#define PHY_MII_BUS_ID_SIZE    (20 - 3)
 
-#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
+#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
                                       FIELD_SIZEOF(struct mdio_device, addr)+\
                                       PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
 
index 321f9ed552a995f396696b71343f66cbd2e01d9e..01f71e1d2e941e359fc5fdd07f0645813ef8f845 100644 (file)
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
 #error "Unknown RCU implementation specified to kernel configuration"
 #endif
 
+#define RCU_SCHEDULER_INACTIVE 0
+#define RCU_SCHEDULER_INIT     1
+#define RCU_SCHEDULER_RUNNING  2
+
 /*
  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
  * initialization and destruction of rcu_head on the stack. rcu_head structures
index 0c729c3c85499a1e51f79ac7252d60d548787fb4..d9718378a8bee0b327d08c2e80a6fd3b5490b967 100644 (file)
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
 };
 
 #ifdef CONFIG_SUSPEND
-extern suspend_state_t mem_sleep_default;
-
 /**
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
index 56436472ccc774e43a372d3e92fb2da6a4957792..5209b5ed2a6476dfa5fe6b779f433a795f084ed7 100644 (file)
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
 
 static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
                                          struct virtio_net_hdr *hdr,
-                                         bool little_endian)
+                                         bool little_endian,
+                                         bool has_data_valid)
 {
        memset(hdr, 0, sizeof(*hdr));   /* no info leak */
 
@@ -91,6 +92,9 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
                                skb_checksum_start_offset(skb));
                hdr->csum_offset = __cpu_to_virtio16(little_endian,
                                skb->csum_offset);
+       } else if (has_data_valid &&
+                  skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
        } /* else everything is zero */
 
        return 0;
index 487e5739166415625465fa13e0c748dcf1b894ed..7afe991e900e25838c3e66f2ff185a5226f790ff 100644 (file)
@@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
  *     upper-layer output functions
  */
 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
-            struct ipv6_txoptions *opt, int tclass);
+            __u32 mark, struct ipv6_txoptions *opt, int tclass);
 
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
 
index d4c1c75b886244f76f9539c0709bb72be7974578..73dd8764746069d48cd9ed6863f54c53864bb47a 100644 (file)
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
        int (*get_encap_size)(struct lwtunnel_state *lwtstate);
        int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
        int (*xmit)(struct sk_buff *skb);
+
+       struct module *owner;
 };
 
 #ifdef CONFIG_LWTUNNEL
@@ -105,6 +107,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
+int lwtunnel_valid_encap_type(u16 encap_type);
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
 int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
                         struct nlattr *encap,
                         unsigned int family, const void *cfg,
@@ -168,6 +172,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
        return -EOPNOTSUPP;
 }
 
+static inline int lwtunnel_valid_encap_type(u16 encap_type)
+{
+       return -EOPNOTSUPP;
+}
+static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
                                       struct nlattr *encap,
                                       unsigned int family, const void *cfg,
index 924325c46aab2fafc10b3b92bccafd3e00c60c71..7dfdb517f0be826018cf649bb2e410e44d8f9c36 100644 (file)
@@ -207,9 +207,9 @@ struct nft_set_iter {
        unsigned int    skip;
        int             err;
        int             (*fn)(const struct nft_ctx *ctx,
-                             const struct nft_set *set,
+                             struct nft_set *set,
                              const struct nft_set_iter *iter,
-                             const struct nft_set_elem *elem);
+                             struct nft_set_elem *elem);
 };
 
 /**
@@ -301,7 +301,7 @@ struct nft_set_ops {
        void                            (*remove)(const struct nft_set *set,
                                                  const struct nft_set_elem *elem);
        void                            (*walk)(const struct nft_ctx *ctx,
-                                               const struct nft_set *set,
+                                               struct nft_set *set,
                                                struct nft_set_iter *iter);
 
        unsigned int                    (*privsize)(const struct nlattr * const nla[]);
index cbedda077db2ca4bf4aef6e9939b53ee96079920..5ceb2205e4e3ed93461ed4a3956b227f99ac9494 100644 (file)
@@ -9,6 +9,12 @@ struct nft_fib {
 
 extern const struct nla_policy nft_fib_policy[];
 
+static inline bool
+nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
+{
+       return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
+}
+
 int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
 int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                 const struct nlattr * const tb[]);
index 958a24d8fae794547c486b5b025f3815c96f82e7..b567e4452a4733de98b720e2c0d9060f21cc92e2 100644 (file)
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
        }
 }
 
+static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
+{
+       if (mtu >= 4096)
+               return IB_MTU_4096;
+       else if (mtu >= 2048)
+               return IB_MTU_2048;
+       else if (mtu >= 1024)
+               return IB_MTU_1024;
+       else if (mtu >= 512)
+               return IB_MTU_512;
+       else
+               return IB_MTU_256;
+}
+
 enum ib_port_state {
        IB_PORT_NOP             = 0,
        IB_PORT_DOWN            = 1,
index 96dd0b3f70d75ba5362eccecdd8c8371627f22a7..da5033dd8cbcab5bff1557452b7a55e888d46c30 100644 (file)
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
 /**
  * fc_set_wwpn() - Set the World Wide Port Name of a local port
  * @lport: The local port whose WWPN is to be set
- * @wwnn:  The new WWPN
+ * @wwpn:  The new WWPN
  */
-static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn)
+static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
 {
-       lport->wwpn = wwnn;
+       lport->wwpn = wwpn;
 }
 
 /**
index 3cbc327801d6dc625f3f7786730fb07d065ac760..c451eec42a83101a6eea0219788165600d128e49 100644 (file)
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
                                                  __u8 audio_out_compensated,
                                                  __u8 audio_out_delay)
 {
-       msg->len = 7;
+       msg->len = 6;
        msg->msg[0] |= 0xf; /* broadcast */
        msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
        msg->msg[2] = phys_addr >> 8;
        msg->msg[3] = phys_addr & 0xff;
        msg->msg[4] = video_latency;
        msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
-       msg->msg[6] = audio_out_delay;
+       if (audio_out_compensated == 3)
+               msg->msg[msg->len++] = audio_out_delay;
 }
 
 static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
        *video_latency = msg->msg[4];
        *low_latency_mode = (msg->msg[5] >> 2) & 1;
        *audio_out_compensated = msg->msg[5] & 3;
-       *audio_out_delay = msg->msg[6];
+       if (*audio_out_compensated == 3 && msg->len >= 7)
+               *audio_out_delay = msg->msg[6];
+       else
+               *audio_out_delay = 0;
 }
 
 static inline void cec_msg_request_current_latency(struct cec_msg *msg,
index 8be21e02387db67010fb26b50abb59201b9c0e5c..d0b5fa91ff5493fd4af4dbed049102a8c65debf3 100644 (file)
@@ -9,4 +9,6 @@
 #define NF_LOG_MACDECODE       0x20    /* Decode MAC header */
 #define NF_LOG_MASK            0x2f
 
+#define NF_LOG_PREFIXLEN       128
+
 #endif /* _NETFILTER_NF_LOG_H */
index 881d49e94569648d2f735e81b34fa567627f3019..e3f27e09eb2be460ebfea6a3b8f2d1ffb38d3010 100644 (file)
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
 /**
  * enum nft_rule_compat_attributes - nf_tables rule compat attributes
  *
- * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
  * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
  */
 enum nft_rule_compat_attributes {
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
  * enum nft_byteorder_ops - nf_tables byteorder operators
  *
  * @NFT_BYTEORDER_NTOH: network to host operator
- * @NFT_BYTEORDER_HTON: host to network opertaor
+ * @NFT_BYTEORDER_HTON: host to network operator
  */
 enum nft_byteorder_ops {
        NFT_BYTEORDER_NTOH,
index 82bdf5626859989085f831ee3a3a70c1b26ff066..bb68cb1b04ed3893faccac5eed815f50521f3813 100644 (file)
@@ -16,3 +16,4 @@ header-y += nes-abi.h
 header-y += ocrdma-abi.h
 header-y += hns-abi.h
 header-y += vmw_pvrdma-abi.h
+header-y += qedr-abi.h
index 48a19bda071b8db5396991dcd97cd9e0f3872ecf..d24eee12128fc5cb7d1f5612e85a7fb9b194520f 100644 (file)
@@ -30,7 +30,7 @@
  * SOFTWARE.
  */
 #ifndef CXGB3_ABI_USER_H
-#define CXBG3_ABI_USER_H
+#define CXGB3_ABI_USER_H
 
 #include <linux/types.h>
 
index 229a5d5df9770fc66774bf5defea359873946d01..3d55d95dcf49e600fe8f99c92eb57e7cde043208 100644 (file)
@@ -11,7 +11,6 @@
  */
 #include <linux/bpf.h>
 #include <linux/err.h>
-#include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/filter.h>
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        if (array_size >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-ENOMEM);
 
-
        /* allocate all map elements and zero-initialize them */
-       array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
-       if (!array) {
-               array = vzalloc(array_size);
-               if (!array)
-                       return ERR_PTR(-ENOMEM);
-       }
+       array = bpf_map_area_alloc(array_size);
+       if (!array)
+               return ERR_PTR(-ENOMEM);
 
        /* copy mandatory map attributes */
        array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        if (array_size >= U32_MAX - PAGE_SIZE ||
            elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
-               kvfree(array);
+               bpf_map_area_free(array);
                return ERR_PTR(-ENOMEM);
        }
 out:
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
        if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
                bpf_array_free_percpu(array);
 
-       kvfree(array);
+       bpf_map_area_free(array);
 }
 
 static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
        /* make sure it's empty */
        for (i = 0; i < array->map.max_entries; i++)
                BUG_ON(array->ptrs[i] != NULL);
-       kvfree(array);
+
+       bpf_map_area_free(array);
 }
 
 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
index 3f2bb58952d8dfa4a2e082f9e6f9d277e19f0577..a753bbe7df0a1747658ca75325e28880173613f8 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/bpf.h>
 #include <linux/jhash.h>
 #include <linux/filter.h>
-#include <linux/vmalloc.h>
 #include "percpu_freelist.h"
 #include "bpf_lru_list.h"
 
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
                free_percpu(pptr);
        }
 free_elems:
-       vfree(htab->elems);
+       bpf_map_area_free(htab->elems);
 }
 
 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
 {
        int err = -ENOMEM, i;
 
-       htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
+       htab->elems = bpf_map_area_alloc(htab->elem_size *
+                                        htab->map.max_entries);
        if (!htab->elems)
                return -ENOMEM;
 
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                goto free_htab;
 
        err = -ENOMEM;
-       htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
-                                     GFP_USER | __GFP_NOWARN);
-
-       if (!htab->buckets) {
-               htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
-               if (!htab->buckets)
-                       goto free_htab;
-       }
+       htab->buckets = bpf_map_area_alloc(htab->n_buckets *
+                                          sizeof(struct bucket));
+       if (!htab->buckets)
+               goto free_htab;
 
        for (i = 0; i < htab->n_buckets; i++) {
                INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 free_extra_elems:
        free_percpu(htab->extra_elems);
 free_buckets:
-       kvfree(htab->buckets);
+       bpf_map_area_free(htab->buckets);
 free_htab:
        kfree(htab);
        return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
                prealloc_destroy(htab);
 
        free_percpu(htab->extra_elems);
-       kvfree(htab->buckets);
+       bpf_map_area_free(htab->buckets);
        kfree(htab);
 }
 
index 732ae16d12b720e6be3c16b8922b7138e14d08dd..be8519148c255efb92704b5e3b0de102ac4c209c 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/bpf.h>
 #include <linux/jhash.h>
 #include <linux/filter.h>
-#include <linux/vmalloc.h>
 #include <linux/stacktrace.h>
 #include <linux/perf_event.h>
 #include "percpu_freelist.h"
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
        u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
        int err;
 
-       smap->elems = vzalloc(elem_size * smap->map.max_entries);
+       smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
        if (!smap->elems)
                return -ENOMEM;
 
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
        return 0;
 
 free_elems:
-       vfree(smap->elems);
+       bpf_map_area_free(smap->elems);
        return err;
 }
 
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        if (cost >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-E2BIG);
 
-       smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
-       if (!smap) {
-               smap = vzalloc(cost);
-               if (!smap)
-                       return ERR_PTR(-ENOMEM);
-       }
+       smap = bpf_map_area_alloc(cost);
+       if (!smap)
+               return ERR_PTR(-ENOMEM);
 
        err = -E2BIG;
        cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 put_buffers:
        put_callchain_buffers();
 free_smap:
-       kvfree(smap);
+       bpf_map_area_free(smap);
        return ERR_PTR(err);
 }
 
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
        /* wait for bpf programs to complete before freeing stack map */
        synchronize_rcu();
 
-       vfree(smap->elems);
+       bpf_map_area_free(smap->elems);
        pcpu_freelist_destroy(&smap->freelist);
-       kvfree(smap);
+       bpf_map_area_free(smap);
        put_callchain_buffers();
 }
 
index 05ad086ab71dc97d7de833cdf651cb0c28e94618..08a4d287226bc468935e6f828351f47bf26d55c3 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/bpf_trace.h>
 #include <linux/syscalls.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
 #include <linux/anon_inodes.h>
 #include <linux/file.h>
 #include <linux/license.h>
@@ -50,6 +52,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
        list_add(&tl->list_node, &bpf_map_types);
 }
 
+void *bpf_map_area_alloc(size_t size)
+{
+       /* We definitely need __GFP_NORETRY, so OOM killer doesn't
+        * trigger under memory pressure as we really just want to
+        * fail instead.
+        */
+       const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+       void *area;
+
+       if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+               area = kmalloc(size, GFP_USER | flags);
+               if (area != NULL)
+                       return area;
+       }
+
+       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
+                        PAGE_KERNEL);
+}
+
+void bpf_map_area_free(void *area)
+{
+       kvfree(area);
+}
+
 int bpf_map_precharge_memlock(u32 pages)
 {
        struct user_struct *user = get_current_user();
index f75c4d031eeb2152c75182466dbf5ec97e93c008..0a5f630f5c5430c231b2ba8ccb7d671bca09014e 100644 (file)
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
        int prev_state, ret = 0;
-       bool hasdied = false;
 
        if (num_online_cpus() == 1)
                return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
                cpuhp_kick_ap_work(cpu);
        }
 
-       hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
 out:
        cpu_hotplug_done();
        return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
  */
 static int cpuhp_reserve_state(enum cpuhp_state state)
 {
-       enum cpuhp_state i;
+       enum cpuhp_state i, end;
+       struct cpuhp_step *step;
 
-       for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
-               if (!cpuhp_ap_states[i].name)
+       switch (state) {
+       case CPUHP_AP_ONLINE_DYN:
+               step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
+               end = CPUHP_AP_ONLINE_DYN_END;
+               break;
+       case CPUHP_BP_PREPARE_DYN:
+               step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
+               end = CPUHP_BP_PREPARE_DYN_END;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       for (i = state; i <= end; i++, step++) {
+               if (!step->name)
                        return i;
        }
        WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
 
        mutex_lock(&cpuhp_state_mutex);
 
-       if (state == CPUHP_AP_ONLINE_DYN) {
+       if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
                ret = cpuhp_reserve_state(state);
                if (ret < 0)
                        goto out;
index 5088784c0cf9e97c166b7dc06afed9d1c7709d2e..38d4270925d4d13619d725052aa3f9844f23bc96 100644 (file)
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
 
        for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
                if (taint_flags[i].module && test_bit(i, &mod->taints))
-                       buf[l++] = taint_flags[i].true;
+                       buf[l++] = taint_flags[i].c_true;
        }
 
        return l;
index c51edaa04fce389bfcf5a62e59fe7a76bf853c6f..08aa88dde7de806d4cb2b14fd93e87be8dd94501 100644 (file)
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
                 * Delay timeout seconds before rebooting the machine.
                 * We can't use the "normal" timers since we just panicked.
                 */
-               pr_emerg("Rebooting in %d seconds..", panic_timeout);
+               pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
 
                for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
                        touch_nmi_watchdog();
@@ -355,7 +355,7 @@ const char *print_tainted(void)
                for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
                        const struct taint_flag *t = &taint_flags[i];
                        *s++ = test_bit(i, &tainted_mask) ?
-                                       t->true : t->false;
+                                       t->c_true : t->c_false;
                }
                *s = 0;
        } else
index f67ceb7768b82ac4e183b22042f1f0784011262b..15e6baef5c73f90b6817c0b1c4e871ea40e30318 100644 (file)
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
 const char *mem_sleep_states[PM_SUSPEND_MAX];
 
 suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
-suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
+static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
 
 unsigned int pm_suspend_global_flags;
 EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
        }
        if (valid_state(PM_SUSPEND_MEM)) {
                mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
-               if (mem_sleep_default >= PM_SUSPEND_MEM)
+               if (mem_sleep_default == PM_SUSPEND_MEM)
                        mem_sleep_current = PM_SUSPEND_MEM;
        }
 
index 80adef7d4c3d01d9ef9ed95c483956d2a858854f..0d6ff3e471be6c1597e0e78fb90d07eb0ce9c546 100644 (file)
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
 #define TPS(x)  tracepoint_string(x)
 
 void rcu_early_boot_tests(void);
+void rcu_test_sync_prims(void);
 
 /*
  * This function really isn't for public consumption, but RCU is special in
index 1898559e6b60ddc52884f6977fca21e57c6f1f90..b23a4d076f3d2c64862172c83c18f21605e87159 100644 (file)
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
  * benefits of doing might_sleep() to reduce latency.)
  *
  * Cool, huh?  (Due to Josh Triplett.)
- *
- * But we want to make this a static inline later.  The cond_resched()
- * currently makes this problematic.
  */
 void synchronize_sched(void)
 {
@@ -195,7 +192,6 @@ void synchronize_sched(void)
                         lock_is_held(&rcu_lock_map) ||
                         lock_is_held(&rcu_sched_lock_map),
                         "Illegal synchronize_sched() in RCU read-side critical section");
-       cond_resched();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
 
index 196f0302e2f4320ebd25dedc31d0ea8a4ab026c1..c64b827ecbca19656395e873ca06da0c92a6298e 100644 (file)
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
 /*
  * During boot, we forgive RCU lockdep issues.  After this function is
- * invoked, we start taking RCU lockdep issues seriously.
+ * invoked, we start taking RCU lockdep issues seriously.  Note that unlike
+ * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
+ * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
+ * The reason for this is that Tiny RCU does not need kthreads, so does
+ * not have to care about the fact that the scheduler is half-initialized
+ * at a certain phase of the boot process.
  */
 void __init rcu_scheduler_starting(void)
 {
        WARN_ON(nr_context_switches() > 0);
-       rcu_scheduler_active = 1;
+       rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
 }
 
 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
index 96c52e43f7cac0e5d6b41004c0c72d269c351f4a..cb4e2056ccf3cf799bb7c045aca346fedb2ed698 100644 (file)
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 int sysctl_panic_on_rcu_stall __read_mostly;
 
 /*
- * The rcu_scheduler_active variable transitions from zero to one just
- * before the first task is spawned.  So when this variable is zero, RCU
- * can assume that there is but one task, allowing RCU to (for example)
+ * The rcu_scheduler_active variable is initialized to the value
+ * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
+ * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
+ * RCU can assume that there is but one task, allowing RCU to (for example)
  * optimize synchronize_rcu() to a simple barrier().  When this variable
- * is one, RCU must actually do all the hard work required to detect real
- * grace periods.  This variable is also used to suppress boot-time false
- * positives from lockdep-RCU error checking.
+ * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
+ * to detect real grace periods.  This variable is also used to suppress
+ * boot-time false positives from lockdep-RCU error checking.  Finally, it
+ * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
+ * is fully initialized, including all of its kthreads having been spawned.
  */
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
 early_initcall(rcu_spawn_gp_kthread);
 
 /*
- * This function is invoked towards the end of the scheduler's initialization
- * process.  Before this is called, the idle task might contain
- * RCU read-side critical sections (during which time, this idle
- * task is booting the system).  After this function is called, the
- * idle tasks are prohibited from containing RCU read-side critical
- * sections.  This function also enables RCU lockdep checking.
+ * This function is invoked towards the end of the scheduler's
+ * initialization process.  Before this is called, the idle task might
+ * contain synchronous grace-period primitives (during which time, this idle
+ * task is booting the system, and such primitives are no-ops).  After this
+ * function is called, any synchronous grace-period primitives are run as
+ * expedited, with the requesting task driving the grace period forward.
+ * A later core_initcall() rcu_exp_runtime_mode() will switch to full
+ * runtime RCU functionality.
  */
 void rcu_scheduler_starting(void)
 {
        WARN_ON(num_online_cpus() != 1);
        WARN_ON(nr_context_switches() > 0);
-       rcu_scheduler_active = 1;
+       rcu_test_sync_prims();
+       rcu_scheduler_active = RCU_SCHEDULER_INIT;
+       rcu_test_sync_prims();
 }
 
 /*
index d3053e99fdb67deb01a35a9af998a66658d0ee22..e59e1849b89aca14797999deb3e9e91bdd9b78c2 100644 (file)
@@ -531,6 +531,20 @@ struct rcu_exp_work {
        struct work_struct rew_work;
 };
 
+/*
+ * Common code to drive an expedited grace period forward, used by
+ * workqueues and mid-boot-time tasks.
+ */
+static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
+                                 smp_call_func_t func, unsigned long s)
+{
+       /* Initialize the rcu_node tree in preparation for the wait. */
+       sync_rcu_exp_select_cpus(rsp, func);
+
+       /* Wait and clean up, including waking everyone. */
+       rcu_exp_wait_wake(rsp, s);
+}
+
 /*
  * Work-queue handler to drive an expedited grace period forward.
  */
@@ -538,12 +552,8 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
 {
        struct rcu_exp_work *rewp;
 
-       /* Initialize the rcu_node tree in preparation for the wait. */
        rewp = container_of(wp, struct rcu_exp_work, rew_work);
-       sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
-
-       /* Wait and clean up, including waking everyone. */
-       rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
+       rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
 }
 
 /*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
        if (exp_funnel_lock(rsp, s))
                return;  /* Someone else did our work for us. */
 
-       /* Marshall arguments and schedule the expedited grace period. */
-       rew.rew_func = func;
-       rew.rew_rsp = rsp;
-       rew.rew_s = s;
-       INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
-       schedule_work(&rew.rew_work);
+       /* Ensure that load happens before action based on it. */
+       if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
+               /* Direct call during scheduler init and early_initcalls(). */
+               rcu_exp_sel_wait_wake(rsp, func, s);
+       } else {
+               /* Marshall arguments & schedule the expedited grace period. */
+               rew.rew_func = func;
+               rew.rew_rsp = rsp;
+               rew.rew_s = s;
+               INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+               schedule_work(&rew.rew_work);
+       }
 
        /* Wait for expedited grace period to complete. */
        rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
 {
        struct rcu_state *rsp = rcu_state_p;
 
+       if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+               return;
        _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/*
+ * Switch to run-time mode once Tree RCU has fully initialized.
+ */
+static int __init rcu_exp_runtime_mode(void)
+{
+       rcu_test_sync_prims();
+       rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+       rcu_test_sync_prims();
+       return 0;
+}
+core_initcall(rcu_exp_runtime_mode);
index 85c5a883c6e31047194a8c74603ce71ab8381f67..56583e764ebf398a7b14f442f63ce6f707f046e4 100644 (file)
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
                         lock_is_held(&rcu_lock_map) ||
                         lock_is_held(&rcu_sched_lock_map),
                         "Illegal synchronize_rcu() in RCU read-side critical section");
-       if (!rcu_scheduler_active)
+       if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
                return;
        if (rcu_gp_is_expedited())
                synchronize_rcu_expedited();
index f19271dce0a9784709d7bc7860a2de972e67a070..4f6db7e6a1179ee00c99f62d854a39b00c959d2b 100644 (file)
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
  * Should expedited grace-period primitives always fall back to their
  * non-expedited counterparts?  Intended for use within RCU.  Note
  * that if the user specifies both rcu_expedited and rcu_normal, then
- * rcu_normal wins.
+ * rcu_normal wins.  (Except during the time period during boot from
+ * when the first task is spawned until the rcu_exp_runtime_mode()
+ * core_initcall() is invoked, at which point everything is expedited.)
  */
 bool rcu_gp_is_normal(void)
 {
-       return READ_ONCE(rcu_normal);
+       return READ_ONCE(rcu_normal) &&
+              rcu_scheduler_active != RCU_SCHEDULER_INIT;
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
 /*
  * Should normal grace-period primitives be expedited?  Intended for
  * use within RCU.  Note that this function takes the rcu_expedited
- * sysfs/boot variable into account as well as the rcu_expedite_gp()
- * nesting.  So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
- * returns false is a -really- bad idea.
+ * sysfs/boot variable and rcu_scheduler_active into account as well
+ * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
+ * until rcu_gp_is_expedited() returns false is a -really- bad idea.
  */
 bool rcu_gp_is_expedited(void)
 {
-       return rcu_expedited || atomic_read(&rcu_expedited_nesting);
+       return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
+              rcu_scheduler_active == RCU_SCHEDULER_INIT;
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
 
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
 
 int notrace debug_lockdep_rcu_enabled(void)
 {
-       return rcu_scheduler_active && debug_locks &&
+       return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
               current->lockdep_recursion == 0;
 }
 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
 void synchronize_rcu_tasks(void)
 {
        /* Complain if the scheduler has not started.  */
-       RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+       RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
                         "synchronize_rcu_tasks called too soon");
 
        /* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
 
 #endif /* #ifdef CONFIG_TASKS_RCU */
 
+/*
+ * Test each non-SRCU synchronous grace-period wait API.  This is
+ * useful just after a change in mode for these primitives, and
+ * during early boot.
+ */
+void rcu_test_sync_prims(void)
+{
+       if (!IS_ENABLED(CONFIG_PROVE_RCU))
+               return;
+       synchronize_rcu();
+       synchronize_rcu_bh();
+       synchronize_sched();
+       synchronize_rcu_expedited();
+       synchronize_rcu_bh_expedited();
+       synchronize_sched_expedited();
+}
+
 #ifdef CONFIG_PROVE_RCU
 
 /*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
                early_boot_test_call_rcu_bh();
        if (rcu_self_test_sched)
                early_boot_test_call_rcu_sched();
+       rcu_test_sync_prims();
 }
 
 static int rcu_verify_early_boot_tests(void)
index 8dbaec0e4f7f079b87f50ea67c82341304387783..1aea594a54dbdac604ca950fdaf93508e5b6e6a7 100644 (file)
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
                                break;
                        if (neg)
                                continue;
+                       val = convmul * val / convdiv;
                        if ((min && val < *min) || (max && val > *max))
                                continue;
                        *i = val;
index 9d20d5dd298af25d0cd95635e217180601703959..4bbd38ec37886d3d104e3d37dc80d101ab3767ac 100644 (file)
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
 
-       spin_lock(&ucounts_lock);
+       spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
        if (!ucounts) {
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irq(&ucounts_lock);
 
                new = kzalloc(sizeof(*new), GFP_KERNEL);
                if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
                new->uid = uid;
                atomic_set(&new->count, 0);
 
-               spin_lock(&ucounts_lock);
+               spin_lock_irq(&ucounts_lock);
                ucounts = find_ucounts(ns, uid, hashent);
                if (ucounts) {
                        kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        }
        if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
                ucounts = NULL;
-       spin_unlock(&ucounts_lock);
+       spin_unlock_irq(&ucounts_lock);
        return ucounts;
 }
 
 static void put_ucounts(struct ucounts *ucounts)
 {
+       unsigned long flags;
+
        if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock(&ucounts_lock);
+               spin_lock_irqsave(&ucounts_lock, flags);
                hlist_del_init(&ucounts->node);
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irqrestore(&ucounts_lock, flags);
 
                kfree(ucounts);
        }
index d4b0fa01cae39cd720661d7a62f50a7926f9db69..63177be0159e9493f6d6ade90efae743aaf117b7 100644 (file)
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 #define for_each_watchdog_cpu(cpu) \
        for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
 /*
  * The 'watchdog_running' variable is set to 1 when the watchdog threads
  * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return HRTIMER_NORESTART;
+
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
 {
        int cpu, ret = 0;
 
+       atomic_set(&watchdog_park_in_progress, 1);
+
        for_each_watchdog_cpu(cpu) {
                ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
                if (ret)
                        break;
        }
 
+       atomic_set(&watchdog_park_in_progress, 0);
+
        return ret;
 }
 
index 84016c8aee6b5d2769495a8c6eee0b4ac559b1a6..12b8dd64078655dd9004d03caa8167da16b57cf5 100644 (file)
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return;
+
        if (__this_cpu_read(watchdog_nmi_touch) == true) {
                __this_cpu_write(watchdog_nmi_touch, false);
                return;
index 86c8911b0e3a6fff02b9e52faa11816cfe508362..a3e14ce92a5684a662c2c8f80f97e6fef95943b7 100644 (file)
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(ioremap_page_range);
index 0b92d605fb69cc805a96c8333dab36174f755e22..84812a9fb16fbbd1409315ea3752fb9a1e3e39ef 100644 (file)
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
                        struct radix_tree_node *old = child;
                        offset = child->offset + 1;
                        child = child->parent;
-                       WARN_ON_ONCE(!list_empty(&node->private_list));
+                       WARN_ON_ONCE(!list_empty(&old->private_list));
                        radix_tree_node_free(old);
                        if (old == entry_to_node(node))
                                return;
index 9a6bd6c8d55a6691047e516a46c2cf6b931b912d..5f3ad65c85de01fa6e4c8a07ef9494410bf2b133 100644 (file)
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
+       /*
+        * When we COW a devmap PMD entry, we split it into PTEs, so we should
+        * not be in this function with `flags & FOLL_COW` set.
+        */
+       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                return NULL;
 
@@ -1128,6 +1134,16 @@ out_unlock:
        return ret;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+       return pmd_write(pmd) ||
+              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
                goto out;
 
        /* Avoid dumping huge zero page */
index a63a8f8326647b92bdc63810c8a93be96047f748..b822e158b319e8f2f02ecbfe76c31b6466be51f1 100644 (file)
@@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
                return ret;
        }
 
-       /* Try charges one by one with reclaim */
+       /* Try charges one by one with reclaim, but do not retry */
        while (count--) {
-               ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
+               ret = try_charge(mc.to, GFP_KERNEL __GFP_NORETRY, 1);
                if (ret)
                        return ret;
                mc.precharge++;
index e43142c15631fefdf5a605ced247a6429825252f..ca2723d4733849eab01b323a50e6b1bc609e308c 100644 (file)
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
        node_set_state(node, N_MEMORY);
 }
 
-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                  enum zone_type target)
+bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                  enum zone_type target, int *zone_shift)
 {
        struct zone *zone = page_zone(pfn_to_page(pfn));
        enum zone_type idx = zone_idx(zone);
        int i;
 
+       *zone_shift = 0;
+
        if (idx < target) {
                /* pages must be at end of current zone */
                if (pfn + nr_pages != zone_end_pfn(zone))
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = idx + 1; i < target; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
        if (target < idx) {
                /* pages must be at beginning of current zone */
                if (pfn != zone->zone_start_pfn)
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = target + 1; i < idx; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
-       return target - idx;
+       *zone_shift = target - idx;
+       return true;
 }
 
 /* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
            !can_online_high_movable(zone))
                return -EINVAL;
 
-       if (online_type == MMOP_ONLINE_KERNEL)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
-       else if (online_type == MMOP_ONLINE_MOVABLE)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
+       if (online_type == MMOP_ONLINE_KERNEL) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
+                       return -EINVAL;
+       } else if (online_type == MMOP_ONLINE_MOVABLE) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
+                       return -EINVAL;
+       }
 
        zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
        if (!zone)
index 2e346645eb80d6bb8f97761c30aed6a512017e59..1e7873e40c9a16e922d4800e6dc41486eee23540 100644 (file)
@@ -2017,8 +2017,8 @@ retry_cpuset:
 
        nmask = policy_nodemask(gfp, pol);
        zl = policy_zonelist(gfp, pol, node);
-       mpol_cond_put(pol);
        page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+       mpol_cond_put(pol);
 out:
        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
index d604d2596b7bed41b9748ee3242571b771db4d5e..f3e0c69a97b76997d9fa65cda0b7e1b1fb8fa29a 100644 (file)
@@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        unsigned int alloc_flags;
        unsigned long did_some_progress;
-       enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
+       enum compact_priority compact_priority;
        enum compact_result compact_result;
-       int compaction_retries = 0;
-       int no_progress_loops = 0;
+       int compaction_retries;
+       int no_progress_loops;
        unsigned long alloc_start = jiffies;
        unsigned int stall_timeout = 10 * HZ;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * In the slowpath, we sanity check order to avoid ever trying to
@@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
                gfp_mask &= ~__GFP_ATOMIC;
 
+retry_cpuset:
+       compaction_retries = 0;
+       no_progress_loops = 0;
+       compact_priority = DEF_COMPACT_PRIORITY;
+       cpuset_mems_cookie = read_mems_allowed_begin();
+       /*
+        * We need to recalculate the starting point for the zonelist iterator
+        * because we might have used different nodemask in the fast path, or
+        * there was a cpuset modification and we are retrying - otherwise we
+        * could end up iterating over non-eligible zones endlessly.
+        */
+       ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       if (!ac->preferred_zoneref->zone)
+               goto nopage;
+
+
        /*
         * The fast path uses conservative alloc_flags to succeed only until
         * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3708,6 +3726,13 @@ retry:
                                &compaction_retries))
                goto retry;
 
+       /*
+        * It's possible we raced with cpuset update so the OOM would be
+        * premature (see below the nopage: label for full explanation).
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
        if (page)
@@ -3720,6 +3745,16 @@ retry:
        }
 
 nopage:
+       /*
+        * When updating a task's mems_allowed or mempolicy nodemask, it is
+        * possible to race with parallel threads in such a way that our
+        * allocation can fail while the mask is being updated. If we are about
+        * to fail, check if the cpuset changed during allocation and if so,
+        * retry.
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        warn_alloc(gfp_mask,
                        "page allocation failure: order:%u", order);
 got_pg:
@@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
        struct page *page;
-       unsigned int cpuset_mems_cookie;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
        gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
        struct alloc_context ac = {
@@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
                alloc_flags |= ALLOC_CMA;
 
-retry_cpuset:
-       cpuset_mems_cookie = read_mems_allowed_begin();
-
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
@@ -3784,8 +3815,13 @@ retry_cpuset:
         */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
-       if (!ac.preferred_zoneref) {
+       if (!ac.preferred_zoneref->zone) {
                page = NULL;
+               /*
+                * This might be due to race with cpuset_current_mems_allowed
+                * update, so make sure we retry with original nodemask in the
+                * slow path.
+                */
                goto no_zone;
        }
 
@@ -3794,6 +3830,7 @@ retry_cpuset:
        if (likely(page))
                goto out;
 
+no_zone:
        /*
         * Runtime PM, block IO and its error handling path can deadlock
         * because I/O on the device might not complete.
@@ -3805,21 +3842,10 @@ retry_cpuset:
         * Restore the original nodemask if it was potentially replaced with
         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
         */
-       if (cpusets_enabled())
+       if (unlikely(ac.nodemask != nodemask))
                ac.nodemask = nodemask;
-       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
-no_zone:
-       /*
-        * When updating a task's mems_allowed, it is possible to race with
-        * parallel threads in such a way that an allocation can fail while
-        * the mask is being updated. If a page allocation is about to fail,
-        * check if the cpuset changed during allocation and if so, retry.
-        */
-       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
-               alloc_mask = gfp_mask;
-               goto retry_cpuset;
-       }
+       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
 out:
        if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
                .zone = page_zone(pfn_to_page(start)),
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
+               .gfp_mask = GFP_KERNEL,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index 067598a008493fabb68d48120a904943fff4e08c..7aa6f433f4de554d308e774d9e9b40507c6ab48a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
        return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+                         unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
                        length, 1);
        metadata_access_disable();
 }
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+                             s->red_left_pad);
        else if (p > addr + 16)
-               print_section("Bytes b4 ", p - 16, 16);
+               print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section("Object ", p, min_t(unsigned long, s->object_size,
-                               PAGE_SIZE));
+       print_section(KERN_ERR, "Object ", p,
+                     min_t(unsigned long, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
 
        if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section("Padding ", p + off, size_from_object(s) - off);
+               print_section(KERN_ERR, "Padding ", p + off,
+                             size_from_object(s) - off);
 
        dump_stack();
 }
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                end--;
 
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-       print_section("Padding ", end - remainder, remainder);
+       print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
        restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
        return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object,
+                       print_section(KERN_INFO, "Object ", (void *)object,
                                        s->object_size);
 
                dump_stack();
index 42bfbd801a1bee88b46a48352b2bdef8c28f2834..ead18ca836de7ba134502f3ca2ea3201e6257f97 100644 (file)
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if) {
                ret = -EINVAL;
-               goto put_primary_if;
+               goto free_skb;
        }
 
        /* Create one header to be copied to all fragments */
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
                if (!skb_fragment) {
                        ret = -ENOMEM;
-                       goto free_skb;
+                       goto put_primary_if;
                }
 
                batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
                if (ret != NET_XMIT_SUCCESS) {
                        ret = NET_XMIT_DROP;
-                       goto free_skb;
+                       goto put_primary_if;
                }
 
                frag_header.no++;
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
                /* The initial check in this function should cover this case */
                if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
                        ret = -EINVAL;
-                       goto free_skb;
+                       goto put_primary_if;
                }
        }
 
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
        if (batadv_skb_head_push(skb, header_size) < 0 ||
            pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
                ret = -ENOMEM;
-               goto free_skb;
+               goto put_primary_if;
        }
 
        memcpy(skb->data, &frag_header, header_size);
index 6c087cd049b91af1d99ccf5a72d8cc5e58409c30..1ca25498fe4d7e5f59248ee6b993d28a79ebb947 100644 (file)
@@ -786,20 +786,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
        return 0;
 }
 
-static int br_dev_newlink(struct net *src_net, struct net_device *dev,
-                         struct nlattr *tb[], struct nlattr *data[])
-{
-       struct net_bridge *br = netdev_priv(dev);
-
-       if (tb[IFLA_ADDRESS]) {
-               spin_lock_bh(&br->lock);
-               br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
-               spin_unlock_bh(&br->lock);
-       }
-
-       return register_netdevice(dev);
-}
-
 static int br_port_slave_changelink(struct net_device *brdev,
                                    struct net_device *dev,
                                    struct nlattr *tb[],
@@ -1120,6 +1106,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
        return 0;
 }
 
+static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+                         struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net_bridge *br = netdev_priv(dev);
+       int err;
+
+       if (tb[IFLA_ADDRESS]) {
+               spin_lock_bh(&br->lock);
+               br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+               spin_unlock_bh(&br->lock);
+       }
+
+       err = br_changelink(dev, tb, data);
+       if (err)
+               return err;
+
+       return register_netdevice(dev);
+}
+
 static size_t br_get_size(const struct net_device *brdev)
 {
        return nla_total_size(sizeof(u32)) +    /* IFLA_BR_FORWARD_DELAY  */
index 3949ce70be07bc90b1ee7e67ca95f09c1d5258f3..292e33bd916e650c0317ab630a0c60a400d21c7d 100644 (file)
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
        SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
        struct sg_table sgt;
        struct scatterlist prealloc_sg;
-       char iv[AES_BLOCK_SIZE];
+       char iv[AES_BLOCK_SIZE] __aligned(8);
        int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
        int crypt_len = encrypt ? in_len + pad_byte : in_len;
        int ret;
index c8f1f67ff16c97a9c785ab7338410c5b9ad8ef3a..be11abac89b3e6ce3013a6b905a268eaf4685609 100644 (file)
@@ -2773,9 +2773,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, type)) {
                features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
-       } else if (illegal_highdma(skb->dev, skb)) {
-               features &= ~NETIF_F_SG;
        }
+       if (illegal_highdma(skb->dev, skb))
+               features &= ~NETIF_F_SG;
 
        return features;
 }
index e23766c7e3ba19414494d242af86c1029e8eee61..236a21e3c878e73fbd97d74de3694caaeab5e762 100644 (file)
@@ -1712,7 +1712,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
 static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
                                                   void __user *useraddr)
 {
-       struct ethtool_channels channels, max;
+       struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
        u32 max_rx_in_use = 0;
 
        if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
index 40ef8ae8d93d8655c6d338139840371cd3328f29..03600459bcfdce654d3f2cce02886e0df7d6e1e8 100644 (file)
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
        .fill_encap     = bpf_fill_encap_info,
        .get_encap_size = bpf_encap_nlsize,
        .cmp_encap      = bpf_encap_cmp,
+       .owner          = THIS_MODULE,
 };
 
 static int __init bpf_lwt_init(void)
index a5d4e866ce88b4d055798d9ea55fc905b351fb3d..c23465005f2f4ced93d7bcb2754fb267c2cf00d0 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/lwtunnel.h>
 #include <net/rtnetlink.h>
 #include <net/ip6_fib.h>
+#include <net/nexthop.h>
 
 #ifdef CONFIG_MODULES
 
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
        ret = -EOPNOTSUPP;
        rcu_read_lock();
        ops = rcu_dereference(lwtun_encaps[encap_type]);
+       if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
+               ret = ops->build_state(dev, encap, family, cfg, lws);
+               if (ret)
+                       module_put(ops->owner);
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_valid_encap_type(u16 encap_type)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = -EINVAL;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE ||
+           encap_type > LWTUNNEL_ENCAP_MAX)
+               return ret;
+
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[encap_type]);
+       rcu_read_unlock();
 #ifdef CONFIG_MODULES
        if (!ops) {
                const char *encap_type_str = lwtunnel_encap_str(encap_type);
 
                if (encap_type_str) {
-                       rcu_read_unlock();
+                       __rtnl_unlock();
                        request_module("rtnl-lwt-%s", encap_type_str);
+                       rtnl_lock();
+
                        rcu_read_lock();
                        ops = rcu_dereference(lwtun_encaps[encap_type]);
+                       rcu_read_unlock();
                }
        }
 #endif
-       if (likely(ops && ops->build_state))
-               ret = ops->build_state(dev, encap, family, cfg, lws);
-       rcu_read_unlock();
+       return ops ? 0 : -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(lwtunnel_valid_encap_type);
 
-       return ret;
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
+{
+       struct rtnexthop *rtnh = (struct rtnexthop *)attr;
+       struct nlattr *nla_entype;
+       struct nlattr *attrs;
+       struct nlattr *nla;
+       u16 encap_type;
+       int attrlen;
+
+       while (rtnh_ok(rtnh, remaining)) {
+               attrlen = rtnh_attrlen(rtnh);
+               if (attrlen > 0) {
+                       attrs = rtnh_attrs(rtnh);
+                       nla = nla_find(attrs, attrlen, RTA_ENCAP);
+                       nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+
+                       if (nla_entype) {
+                               encap_type = nla_get_u16(nla_entype);
+
+                               if (lwtunnel_valid_encap_type(encap_type) != 0)
+                                       return -EOPNOTSUPP;
+                       }
+               }
+               rtnh = rtnh_next(rtnh, &remaining);
+       }
+
+       return 0;
 }
-EXPORT_SYMBOL(lwtunnel_build_state);
+EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
 
 void lwtstate_free(struct lwtunnel_state *lws)
 {
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
        } else {
                kfree(lws);
        }
+       module_put(ops->owner);
 }
 EXPORT_SYMBOL(lwtstate_free);
 
index 08bcdc3d171721243319cd29d444589c90e750c3..cef60a4a28030d2066ba84e0ef195855d0014fbb 100644 (file)
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
                opt = ireq->ipv6_opt;
                if (!opt)
                        opt = rcu_dereference(np->opt);
-               err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+               err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
        dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
        if (!IS_ERR(dst)) {
                skb_dst_set(skb, dst);
-               ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
+               ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
                DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
                DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
                return;
index b8e58689a9a193a26957dd42d19bcc4d5425df13..9750dd6f8c178a12c8b4b680d14a0b18444e150a 100644 (file)
@@ -1116,10 +1116,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
        /* Use already configured phy mode */
        if (p->phy_interface == PHY_INTERFACE_MODE_NA)
                p->phy_interface = p->phy->interface;
-       phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
-                          p->phy_interface);
-
-       return 0;
+       return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+                                 p->phy_interface);
 }
 
 static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
@@ -1214,6 +1212,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_slave_priv *p = netdev_priv(slave_dev);
 
+       netif_device_detach(slave_dev);
+
        if (p->phy) {
                phy_stop(p->phy);
                p->old_pause = -1;
index eae0332b0e8c1f861ce629ed9ce3ddc45802a6b8..7db2ad2e82d3193ff1748bf393f536ba3a5a3eb9 100644 (file)
@@ -46,6 +46,7 @@
 #include <net/rtnetlink.h>
 #include <net/xfrm.h>
 #include <net/l3mdev.h>
+#include <net/lwtunnel.h>
 #include <trace/events/fib.h>
 
 #ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                        cfg->fc_mx_len = nla_len(attr);
                        break;
                case RTA_MULTIPATH:
+                       err = lwtunnel_valid_encap_type_attr(nla_data(attr),
+                                                            nla_len(attr));
+                       if (err < 0)
+                               goto errout;
                        cfg->fc_mp = nla_data(attr);
                        cfg->fc_mp_len = nla_len(attr);
                        break;
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                        break;
                case RTA_ENCAP_TYPE:
                        cfg->fc_encap_type = nla_get_u16(attr);
+                       err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+                       if (err < 0)
+                               goto errout;
                        break;
                }
        }
index fac275c4810865a5b9b9ca1ac9fc826b8482aa9f..b67719f459537d49d958de9874414ea868c4a8e1 100644 (file)
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
        sk->sk_sndbuf = sysctl_wmem_default;
+       sk->sk_mark = fl4.flowi4_mark;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
        if (unlikely(err)) {
index 5476110598f77ee7064a6b79e57c22e1f274bdf5..9d6c10096d44b8196af83c1a016c3913588c49e3 100644 (file)
@@ -311,6 +311,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
        .fill_encap = ip_tun_fill_encap_info,
        .get_encap_size = ip_tun_encap_nlsize,
        .cmp_encap = ip_tun_cmp_encap,
+       .owner = THIS_MODULE,
 };
 
 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
@@ -401,6 +402,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
        .fill_encap = ip6_tun_fill_encap_info,
        .get_encap_size = ip6_tun_encap_nlsize,
        .cmp_encap = ip_tun_cmp_encap,
+       .owner = THIS_MODULE,
 };
 
 void __init ip_tunnel_core_init(void)
index a6b8c1a4102ba7ab07efbcf504fa7ca4025c6f19..0a783cd73faf25d9ec4d7605759038e4e0aef345 100644 (file)
@@ -144,7 +144,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
        rcu_read_lock_bh();
        c = __clusterip_config_find(net, clusterip);
        if (c) {
-               if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
+#ifdef CONFIG_PROC_FS
+               if (!c->pde)
+                       c = NULL;
+               else
+#endif
+               if (unlikely(!atomic_inc_not_zero(&c->refcount)))
                        c = NULL;
                else if (entry)
                        atomic_inc(&c->entries);
index f273098e48fd5bbe0ffb95c0daeb315c3a6f06f1..37fb9552e85898d0ee9b311f22af31563c621de7 100644 (file)
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
        return dev_match || flags & XT_RPFILTER_LOOSE;
 }
 
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
 {
-       const struct rtable *rt = skb_rtable(skb);
-       return rt && (rt->rt_flags & RTCF_LOCAL);
+       return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
 }
 
 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        info = par->matchinfo;
        invert = info->flags & XT_RPFILTER_INVERT;
 
-       if (rpfilter_is_local(skb))
+       if (rpfilter_is_loopback(skb, xt_in(par)))
                return true ^ invert;
 
        iph = ip_hdr(skb);
index fd8220213afc36375f83d0af902443cf2f5a4a8f..146d86105183e1a456a0f17ed6bb5371aa1e8f76 100644 (file)
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
        /* ip_route_me_harder expects skb->dst to be set */
        skb_dst_set_noref(nskb, skb_dst(oldskb));
 
+       nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
+
        skb_reserve(nskb, LL_MAX_HEADER);
        niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
                                   ip4_dst_hoplimit(skb_dst(nskb)));
index 965b1a161369323e37dd5f103c34cbc3e4e901b5..2981291910dd2cac2d508fcde89083afc22affd4 100644 (file)
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
        return addr;
 }
 
-static bool fib4_is_local(const struct sk_buff *skb)
-{
-       const struct rtable *rt = skb_rtable(skb);
-
-       return rt && (rt->rt_flags & RTCF_LOCAL);
-}
-
 #define DSCP_BITS     0xfc
 
 void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
        else
                oif = NULL;
 
-       if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) {
-               nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+       if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+           nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+               nft_fib_store_result(dest, priv->result, pkt,
+                                    nft_in(pkt)->ifindex);
                return;
        }
 
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
        switch (res.type) {
        case RTN_UNICAST:
                break;
-       case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */
+       case RTN_LOCAL: /* Should not see RTN_LOCAL here */
                return;
        default:
                break;
index 9674bec4a0f8109f22c1db7eb931b4d99fe9cd20..8ea4e9787f82ba65cd07b4c2b663df76fe4eb143 100644 (file)
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
         * scaled. So correct it appropriately.
         */
        tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+       tp->max_window = tp->snd_wnd;
 
        /* Activate the retrans timer so that SYNACK can be retransmitted.
         * The request socket is not added to the ehash
index 3de6eba378ade2c0d4a8400ecb5582a7d126b884..27c95acbb52fcb3ad17c9aeb8a4a9fcdc7e3457e 100644 (file)
@@ -5029,7 +5029,7 @@ static void tcp_check_space(struct sock *sk)
        if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
                sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
                /* pairs with tcp_poll() */
-               smp_mb__after_atomic();
+               smp_mb();
                if (sk->sk_socket &&
                    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
                        tcp_new_space(sk);
index 4c47656b9f09be2974fa39cbc3dc7a6acb2f6cb9..156ed578d3c09547d031963b268bb284d593824e 100644 (file)
@@ -5568,8 +5568,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
        struct net_device *dev;
        struct inet6_dev *idev;
 
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
+       for_each_netdev(net, dev) {
                idev = __in6_dev_get(dev);
                if (idev) {
                        int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5578,7 +5577,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
                                dev_disable_change(idev);
                }
        }
-       rcu_read_unlock();
 }
 
 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
index a7bc54ab46e2d10ab18abdc3fcef511f06574343..13b5e85fe0d56471ab792b1e75801def3800ee9c 100644 (file)
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
        .fill_encap = ila_fill_encap_info,
        .get_encap_size = ila_encap_nlsize,
        .cmp_encap = ila_encap_cmp,
+       .owner = THIS_MODULE,
 };
 
 int ila_lwt_init(void)
index 97074c459fe68a13a6df1ec715b5510affbc6f44..9a31d13bf180d1e58a4a4b0a65750f377f963c52 100644 (file)
@@ -136,7 +136,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
        /* Restore final destination back after routing done */
        fl6.daddr = sk->sk_v6_daddr;
 
-       res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+       res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
                       np->tclass);
        rcu_read_unlock();
        return res;
index 1ba7567b4d8fc8706d6e7ff69b2b182b637fb2ee..51b9835b3176ac2d05639fd318af89788655dd69 100644 (file)
@@ -577,6 +577,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
                return -1;
 
        offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+       /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+       ipv6h = ipv6_hdr(skb);
+
        if (offset > 0) {
                struct ipv6_tlv_tnl_enc_lim *tel;
                tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
index 38122d04fadc646c27a5ccdf0eef5eb6d7923a27..2c0df09e90365ad38b5362f77c6e33a24fc062f0 100644 (file)
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  * which are using proper atomic operations or spinlocks.
  */
 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
-            struct ipv6_txoptions *opt, int tclass)
+            __u32 mark, struct ipv6_txoptions *opt, int tclass)
 {
        struct net *net = sock_net(sk);
        const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->priority = sk->sk_priority;
-       skb->mark = sk->sk_mark;
+       skb->mark = mark;
 
        mtu = dst_mtu(dst);
        if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
index 753d6d0860fb14c100ab8b20799782ab81602635..ff8ee06491c335d209e86bb15f2526ab1915df3b 100644 (file)
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
 
 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
 {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
-       __u8 nexthdr = ipv6h->nexthdr;
-       __u16 off = sizeof(*ipv6h);
+       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+       unsigned int nhoff = raw - skb->data;
+       unsigned int off = nhoff + sizeof(*ipv6h);
+       u8 next, nexthdr = ipv6h->nexthdr;
 
        while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
-               __u16 optlen = 0;
                struct ipv6_opt_hdr *hdr;
-               if (raw + off + sizeof(*hdr) > skb->data &&
-                   !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
+               u16 optlen;
+
+               if (!pskb_may_pull(skb, off + sizeof(*hdr)))
                        break;
 
-               hdr = (struct ipv6_opt_hdr *) (raw + off);
+               hdr = (struct ipv6_opt_hdr *)(skb->data + off);
                if (nexthdr == NEXTHDR_FRAGMENT) {
                        struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
                        if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                } else {
                        optlen = ipv6_optlen(hdr);
                }
+               /* cache hdr->nexthdr, since pskb_may_pull() might
+                * invalidate hdr
+                */
+               next = hdr->nexthdr;
                if (nexthdr == NEXTHDR_DEST) {
-                       __u16 i = off + 2;
+                       u16 i = 2;
+
+                       /* Remember : hdr is no longer valid at this point. */
+                       if (!pskb_may_pull(skb, off + optlen))
+                               break;
+
                        while (1) {
                                struct ipv6_tlv_tnl_enc_lim *tel;
 
                                /* No more room for encapsulation limit */
-                               if (i + sizeof (*tel) > off + optlen)
+                               if (i + sizeof(*tel) > optlen)
                                        break;
 
-                               tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
+                               tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
                                /* return index of option if found and valid */
                                if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
                                    tel->length == 1)
-                                       return i;
+                                       return i + off - nhoff;
                                /* else jump to next option */
                                if (tel->type)
                                        i += tel->length + 2;
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
                                        i++;
                        }
                }
-               nexthdr = hdr->nexthdr;
+               nexthdr = next;
                off += optlen;
        }
        return 0;
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                fl6.flowlabel = key->label;
        } else {
                offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+               /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+               ipv6h = ipv6_hdr(skb);
                if (offset > 0) {
                        struct ipv6_tlv_tnl_enc_lim *tel;
 
index d5263dc364a97a2a5530e69defd46df225b004b1..b12e61b7b16ce9f3f98a0906558c98803a48a9a3 100644 (file)
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
        return ret;
 }
 
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
 {
-       const struct rt6_info *rt = (const void *) skb_dst(skb);
-       return rt && (rt->rt6i_flags & RTF_LOCAL);
+       return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
 }
 
 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        struct ipv6hdr *iph;
        bool invert = info->flags & XT_RPFILTER_INVERT;
 
-       if (rpfilter_is_local(skb))
+       if (rpfilter_is_loopback(skb, xt_in(par)))
                return true ^ invert;
 
        iph = ipv6_hdr(skb);
index 10090400c72f19b7dd21d6543a2c1d740a9bd595..eedee5d108d98422eab5753d7c619b269e07685e 100644 (file)
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
        fl6.fl6_sport = otcph->dest;
        fl6.fl6_dport = otcph->source;
        fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
+       fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
        security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
 
        skb_dst_set(nskb, dst);
 
+       nskb->mark = fl6.flowi6_mark;
+
        skb_reserve(nskb, hh_len + dst->header_len);
        ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
                                    ip6_dst_hoplimit(dst));
index c947aad8bcc620e87cacd942d146cdfb7ad2a73e..765facf03d45c47b9913b1adcdaf59b6fe09383c 100644 (file)
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
 
-static bool fib6_is_local(const struct sk_buff *skb)
-{
-       const struct rt6_info *rt = (const void *)skb_dst(skb);
-
-       return rt && (rt->rt6i_flags & RTF_LOCAL);
-}
-
 static int get_ifindex(const struct net_device *dev)
 {
        return dev ? dev->ifindex : 0;
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
 
        lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
 
-       if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) {
-               nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+       if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+           nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+               nft_fib_store_result(dest, priv->result, pkt,
+                                    nft_in(pkt)->ifindex);
                return;
        }
 
index 5046d2b240047ee36a1282f6a0b20acf8ceb3fc6..61d7006324ed1b62db29f406f79f4c0f2c2631fd 100644 (file)
@@ -2899,6 +2899,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (tb[RTA_MULTIPATH]) {
                cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
                cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
+
+               err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
+                                                    cfg->fc_mp_len);
+               if (err < 0)
+                       goto errout;
        }
 
        if (tb[RTA_PREF]) {
@@ -2912,9 +2917,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (tb[RTA_ENCAP])
                cfg->fc_encap = tb[RTA_ENCAP];
 
-       if (tb[RTA_ENCAP_TYPE])
+       if (tb[RTA_ENCAP_TYPE]) {
                cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
 
+               err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+               if (err < 0)
+                       goto errout;
+       }
+
        if (tb[RTA_EXPIRES]) {
                unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
 
index b172d85c650a376f541ea05d72046c76b8404303..a855eb325b030a666fe92c56a2d432c77d9dfe7a 100644 (file)
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
 
        val = nla_data(info->attrs[SEG6_ATTR_DST]);
        t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
+       if (!t_new)
+               return -ENOMEM;
 
        mutex_lock(&sdata->lock);
 
index 1d60cb132835c9f9089510f035a1ca95e5b1e1a7..c46f8cbf5ab5aa4031d4080d70079e99859d4eb4 100644 (file)
@@ -422,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
        .fill_encap = seg6_fill_encap_info,
        .get_encap_size = seg6_encap_nlsize,
        .cmp_encap = seg6_encap_cmp,
+       .owner = THIS_MODULE,
 };
 
 int __init seg6_iptunnel_init(void)
index 95c05e5293b1889bebe97e896c8b61a1e9a3812c..64834ec5ab730a8d2b1828028432ddf9ac77714c 100644 (file)
@@ -474,7 +474,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
                opt = ireq->ipv6_opt;
                if (!opt)
                        opt = rcu_dereference(np->opt);
-               err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
+               err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
@@ -845,7 +845,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
        dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
        if (!IS_ERR(dst)) {
                skb_dst_set(buff, dst);
-               ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
+               ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
                TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
                if (rst)
                        TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
index 9e2641d4558753b7bf746388a971ac337cef9349..206698bc93f406939bb5d883b6ab2f04bc1a3bed 100644 (file)
@@ -40,8 +40,6 @@ void rate_control_rate_init(struct sta_info *sta)
 
        ieee80211_sta_set_rx_nss(sta);
 
-       ieee80211_recalc_min_chandef(sta->sdata);
-
        if (!ref)
                return;
 
index 4dc81963af8f7c0d131e448230b4e8ded3f4ab54..64d3bf269a26896b55517517091c12bfd3a0411f 100644 (file)
@@ -119,18 +119,19 @@ void mpls_stats_inc_outucastpkts(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
 
-static u32 mpls_multipath_hash(struct mpls_route *rt,
-                              struct sk_buff *skb, bool bos)
+static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
 {
        struct mpls_entry_decoded dec;
+       unsigned int mpls_hdr_len = 0;
        struct mpls_shim_hdr *hdr;
        bool eli_seen = false;
        int label_index;
        u32 hash = 0;
 
-       for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+       for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
             label_index++) {
-               if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+               mpls_hdr_len += sizeof(*hdr);
+               if (!pskb_may_pull(skb, mpls_hdr_len))
                        break;
 
                /* Read and decode the current label */
@@ -155,37 +156,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
                        eli_seen = true;
                }
 
-               bos = dec.bos;
-               if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
-                                        sizeof(struct iphdr))) {
+               if (!dec.bos)
+                       continue;
+
+               /* found bottom label; does skb have room for a header? */
+               if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
                        const struct iphdr *v4hdr;
 
-                       v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
-                                                      label_index);
+                       v4hdr = (const struct iphdr *)(hdr + 1);
                        if (v4hdr->version == 4) {
                                hash = jhash_3words(ntohl(v4hdr->saddr),
                                                    ntohl(v4hdr->daddr),
                                                    v4hdr->protocol, hash);
                        } else if (v4hdr->version == 6 &&
-                               pskb_may_pull(skb, sizeof(*hdr) * label_index +
-                                             sizeof(struct ipv6hdr))) {
+                                  pskb_may_pull(skb, mpls_hdr_len +
+                                                sizeof(struct ipv6hdr))) {
                                const struct ipv6hdr *v6hdr;
 
-                               v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
-                                                               label_index);
-
+                               v6hdr = (const struct ipv6hdr *)(hdr + 1);
                                hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
                                hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
                                hash = jhash_1word(v6hdr->nexthdr, hash);
                        }
                }
+
+               break;
        }
 
        return hash;
 }
 
 static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
-                                            struct sk_buff *skb, bool bos)
+                                            struct sk_buff *skb)
 {
        int alive = ACCESS_ONCE(rt->rt_nhn_alive);
        u32 hash = 0;
@@ -201,7 +203,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
        if (alive <= 0)
                return NULL;
 
-       hash = mpls_multipath_hash(rt, skb, bos);
+       hash = mpls_multipath_hash(rt, skb);
        nh_index = hash % alive;
        if (alive == rt->rt_nhn)
                goto out;
@@ -308,22 +310,22 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        hdr = mpls_hdr(skb);
        dec = mpls_entry_decode(hdr);
 
-       /* Pop the label */
-       skb_pull(skb, sizeof(*hdr));
-       skb_reset_network_header(skb);
-
-       skb_orphan(skb);
-
        rt = mpls_route_input_rcu(net, dec.label);
        if (!rt) {
                MPLS_INC_STATS(mdev, rx_noroute);
                goto drop;
        }
 
-       nh = mpls_select_multipath(rt, skb, dec.bos);
+       nh = mpls_select_multipath(rt, skb);
        if (!nh)
                goto err;
 
+       /* Pop the label */
+       skb_pull(skb, sizeof(*hdr));
+       skb_reset_network_header(skb);
+
+       skb_orphan(skb);
+
        if (skb_warn_if_lro(skb))
                goto err;
 
index 02531284bc499616e9780d41c780d5bada119d8b..67b7a955de65abac3604cf93c7f8e672820b4539 100644 (file)
@@ -222,6 +222,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
        .fill_encap = mpls_fill_encap_info,
        .get_encap_size = mpls_encap_nlsize,
        .cmp_encap = mpls_encap_cmp,
+       .owner = THIS_MODULE,
 };
 
 static int __init mpls_iptunnel_init(void)
index 63729b489c2c17608575e4f58adb0c264e182d0b..bbc45f8a7b2de6801eab367fa7f3611e23b92b9e 100644 (file)
@@ -494,7 +494,7 @@ config NFT_CT
        depends on NF_CONNTRACK
        tristate "Netfilter nf_tables conntrack module"
        help
-         This option adds the "meta" expression that you can use to match
+         This option adds the "ct" expression that you can use to match
          connection tracking information such as the flow state.
 
 config NFT_SET_RBTREE
index 3a073cd9fcf49ed9cfd228a420cc2de928ca4459..4e8083c5e01d1ec631258af169c18aceed101e3a 100644 (file)
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
 /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV     64u
-/* upper bound of scan intervals */
-#define GC_INTERVAL_MAX                (2 * HZ)
-/* maximum conntracks to evict per gc run */
-#define GC_MAX_EVICTS          256u
+#define GC_MAX_BUCKETS_DIV     128u
+/* upper bound of full table scan */
+#define GC_MAX_SCAN_JIFFIES    (16u * HZ)
+/* desired ratio of entries found to be expired */
+#define GC_EVICT_RATIO 50u
 
 static struct conntrack_gc_work conntrack_gc_work;
 
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
 
 static void gc_worker(struct work_struct *work)
 {
+       unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
        unsigned int i, goal, buckets = 0, expired_count = 0;
        struct conntrack_gc_work *gc_work;
        unsigned int ratio, scanned = 0;
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
                 */
                rcu_read_unlock();
                cond_resched_rcu_qs();
-       } while (++buckets < goal &&
-                expired_count < GC_MAX_EVICTS);
+       } while (++buckets < goal);
 
        if (gc_work->exiting)
                return;
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
         * 1. Minimize time until we notice a stale entry
         * 2. Maximize scan intervals to not waste cycles
         *
-        * Normally, expired_count will be 0, this increases the next_run time
-        * to priorize 2) above.
+        * Normally, expire ratio will be close to 0.
         *
-        * As soon as a timed-out entry is found, move towards 1) and increase
-        * the scan frequency.
-        * In case we have lots of evictions next scan is done immediately.
+        * As soon as a sizeable fraction of the entries have expired
+        * increase scan frequency.
         */
        ratio = scanned ? expired_count * 100 / scanned : 0;
-       if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
-               gc_work->next_gc_run = 0;
-               next_run = 0;
-       } else if (expired_count) {
-               gc_work->next_gc_run /= 2U;
-               next_run = msecs_to_jiffies(1);
+       if (ratio > GC_EVICT_RATIO) {
+               gc_work->next_gc_run = min_interval;
        } else {
-               if (gc_work->next_gc_run < GC_INTERVAL_MAX)
-                       gc_work->next_gc_run += msecs_to_jiffies(1);
+               unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
 
-               next_run = gc_work->next_gc_run;
+               BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
+
+               gc_work->next_gc_run += min_interval;
+               if (gc_work->next_gc_run > max)
+                       gc_work->next_gc_run = max;
        }
 
+       next_run = gc_work->next_gc_run;
        gc_work->last_bucket = i;
        queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
 }
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
 {
        INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
-       gc_work->next_gc_run = GC_INTERVAL_MAX;
+       gc_work->next_gc_run = HZ;
        gc_work->exiting = false;
 }
 
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
        nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
 
        conntrack_gc_work_init(&conntrack_gc_work);
-       queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
+       queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
 
        return 0;
 
index 3dca90dc24ad392a6be6076e4ddbc345a1959ac0..ffb9e8ada899b770293744ed0da5bebba4b2166e 100644 (file)
@@ -13,7 +13,6 @@
 /* Internal logging interface, which relies on the real
    LOG target modules */
 
-#define NF_LOG_PREFIXLEN               128
 #define NFLOGGER_NAME_LEN              64
 
 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
index 0db5f9782265ebb033f10d07da815495e8a7d278..1b913760f205be79e1809c983cb3140c284a00cc 100644 (file)
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
 }
 
 static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
-       [NFTA_CHAIN_TABLE]      = { .type = NLA_STRING },
+       [NFTA_CHAIN_TABLE]      = { .type = NLA_STRING,
+                                   .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_CHAIN_HANDLE]     = { .type = NLA_U64 },
        [NFTA_CHAIN_NAME]       = { .type = NLA_STRING,
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
 }
 
 static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
-       [NFTA_RULE_TABLE]       = { .type = NLA_STRING },
+       [NFTA_RULE_TABLE]       = { .type = NLA_STRING,
+                                   .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_RULE_CHAIN]       = { .type = NLA_STRING,
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
        [NFTA_RULE_HANDLE]      = { .type = NLA_U64 },
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
 }
 
 static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
-       [NFTA_SET_TABLE]                = { .type = NLA_STRING },
+       [NFTA_SET_TABLE]                = { .type = NLA_STRING,
+                                           .len = NFT_TABLE_MAXNAMELEN - 1 },
        [NFTA_SET_NAME]                 = { .type = NLA_STRING,
                                            .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_SET_FLAGS]                = { .type = NLA_U32 },
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
 }
 
 static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
-                                       const struct nft_set *set,
+                                       struct nft_set *set,
                                        const struct nft_set_iter *iter,
-                                       const struct nft_set_elem *elem)
+                                       struct nft_set_elem *elem)
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
        enum nft_registers dreg;
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
 };
 
 static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
-       [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING },
-       [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING,
+                                           .len = NFT_TABLE_MAXNAMELEN - 1 },
+       [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING,
+                                           .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_SET_ELEM_LIST_ELEMENTS]   = { .type = NLA_NESTED },
        [NFTA_SET_ELEM_LIST_SET_ID]     = { .type = NLA_U32 },
 };
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
 };
 
 static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
-                                 const struct nft_set *set,
+                                 struct nft_set *set,
                                  const struct nft_set_iter *iter,
-                                 const struct nft_set_elem *elem)
+                                 struct nft_set_elem *elem)
 {
        struct nft_set_dump_args *args;
 
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct net *net = sock_net(skb->sk);
        u8 genmask = nft_genmask_cur(net);
-       const struct nft_set *set;
+       struct nft_set *set;
        struct nft_set_dump_args args;
        struct nft_ctx ctx;
        struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                goto err5;
        }
 
+       if (set->size &&
+           !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+               err = -ENFILE;
+               goto err6;
+       }
+
        nft_trans_elem(trans) = elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return 0;
 
+err6:
+       set->ops->remove(set, &elem);
 err5:
        kfree(trans);
 err4:
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
                return -EBUSY;
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
-               if (set->size &&
-                   !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
-                       return -ENFILE;
-
                err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
-               if (err < 0) {
-                       atomic_dec(&set->nelems);
+               if (err < 0)
                        break;
-               }
        }
        return err;
 }
@@ -3883,9 +3890,9 @@ err1:
 }
 
 static int nft_flush_set(const struct nft_ctx *ctx,
-                        const struct nft_set *set,
+                        struct nft_set *set,
                         const struct nft_set_iter *iter,
-                        const struct nft_set_elem *elem)
+                        struct nft_set_elem *elem)
 {
        struct nft_trans *trans;
        int err;
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
                err = -ENOENT;
                goto err1;
        }
+       set->ndeact++;
 
-       nft_trans_elem_set(trans) = (struct nft_set *)set;
-       nft_trans_elem(trans) = *((struct nft_set_elem *)elem);
+       nft_trans_elem_set(trans) = set;
+       nft_trans_elem(trans) = *elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
        return 0;
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
 EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
 
 static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
-       [NFTA_OBJ_TABLE]        = { .type = NLA_STRING },
-       [NFTA_OBJ_NAME]         = { .type = NLA_STRING },
+       [NFTA_OBJ_TABLE]        = { .type = NLA_STRING,
+                                   .len = NFT_TABLE_MAXNAMELEN - 1 },
+       [NFTA_OBJ_NAME]         = { .type = NLA_STRING,
+                                   .len = NFT_OBJ_MAXNAMELEN - 1 },
        [NFTA_OBJ_TYPE]         = { .type = NLA_U32 },
        [NFTA_OBJ_DATA]         = { .type = NLA_NESTED },
 };
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
                                if (idx > s_idx)
                                        memset(&cb->args[1], 0,
                                               sizeof(cb->args) - sizeof(cb->args[0]));
-                               if (filter->table[0] &&
+                               if (filter && filter->table[0] &&
                                    strcmp(filter->table, table->name))
                                        goto cont;
-                               if (filter->type != NFT_OBJECT_UNSPEC &&
+                               if (filter &&
+                                   filter->type != NFT_OBJECT_UNSPEC &&
                                    obj->type->type != filter->type)
                                        goto cont;
 
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                                 const struct nft_chain *chain);
 
 static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
-                                       const struct nft_set *set,
+                                       struct nft_set *set,
                                        const struct nft_set_iter *iter,
-                                       const struct nft_set_elem *elem)
+                                       struct nft_set_elem *elem)
 {
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
        const struct nft_data *data;
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
 {
        const struct nft_rule *rule;
        const struct nft_expr *expr, *last;
-       const struct nft_set *set;
+       struct nft_set *set;
        struct nft_set_binding *binding;
        struct nft_set_iter iter;
 
index 7de2f46734a428d0938fef91aa914865c62d680e..049ad2d9ee66959367a051903563dca6ba654edb 100644 (file)
@@ -98,7 +98,8 @@ out:
 }
 
 static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
-       [NFTA_DYNSET_SET_NAME]  = { .type = NLA_STRING },
+       [NFTA_DYNSET_SET_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_DYNSET_SET_ID]    = { .type = NLA_U32 },
        [NFTA_DYNSET_OP]        = { .type = NLA_U32 },
        [NFTA_DYNSET_SREG_KEY]  = { .type = NLA_U32 },
index 6271e40a3dd6d00b0a19f31a5ef5509185120505..6f6e64423643a8c2991f2dedab65aff9e8c92720 100644 (file)
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
 
 static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
        [NFTA_LOG_GROUP]        = { .type = NLA_U16 },
-       [NFTA_LOG_PREFIX]       = { .type = NLA_STRING },
+       [NFTA_LOG_PREFIX]       = { .type = NLA_STRING,
+                                   .len = NF_LOG_PREFIXLEN - 1 },
        [NFTA_LOG_SNAPLEN]      = { .type = NLA_U32 },
        [NFTA_LOG_QTHRESHOLD]   = { .type = NLA_U16 },
        [NFTA_LOG_LEVEL]        = { .type = NLA_U32 },
index d4f97fa7e21d0036690e229768ab097fc5220cfc..e21aea7e5ec8f141ea3155d1da3c491484c00a73 100644 (file)
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
 }
 
 static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
-       [NFTA_LOOKUP_SET]       = { .type = NLA_STRING },
+       [NFTA_LOOKUP_SET]       = { .type = NLA_STRING,
+                                   .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_LOOKUP_SET_ID]    = { .type = NLA_U32 },
        [NFTA_LOOKUP_SREG]      = { .type = NLA_U32 },
        [NFTA_LOOKUP_DREG]      = { .type = NLA_U32 },
index 415a65ba2b85eb93a6866fe42909aa6fcfd74cb3..1ae8c49ca4a1fac06f69c41f68a36b7e85593adb 100644 (file)
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
 }
 
 static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
-       [NFTA_OBJREF_IMM_NAME]  = { .type = NLA_STRING },
+       [NFTA_OBJREF_IMM_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_OBJ_MAXNAMELEN - 1 },
        [NFTA_OBJREF_IMM_TYPE]  = { .type = NLA_U32 },
        [NFTA_OBJREF_SET_SREG]  = { .type = NLA_U32 },
-       [NFTA_OBJREF_SET_NAME]  = { .type = NLA_STRING },
+       [NFTA_OBJREF_SET_NAME]  = { .type = NLA_STRING,
+                                   .len = NFT_SET_MAXNAMELEN - 1 },
        [NFTA_OBJREF_SET_ID]    = { .type = NLA_U32 },
 };
 
index 1e20e2bbb6d924b5cdb331acf8610c8719763c7c..e36069fb76aebd4140098f38a3758135e78b8d43 100644 (file)
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
        rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
 }
 
-static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_iter *iter)
 {
        struct nft_hash *priv = nft_set_priv(set);
index 08376e50f6cdc26d7864e846b5a07f2ca662207c..f06f55ee516de44d3b0367cc52afb32173a2d0e8 100644 (file)
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
 }
 
 static void nft_rbtree_walk(const struct nft_ctx *ctx,
-                           const struct nft_set *set,
+                           struct nft_set *set,
                            struct nft_set_iter *iter)
 {
        const struct nft_rbtree *priv = nft_set_priv(set);
index ddbda255b6ae47c816522244ddcdbbd1aac2db00..9854baad66abba0ffee8445999211695ccfc7502 100644 (file)
@@ -1984,7 +1984,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
                return -EINVAL;
        *len -= sizeof(vnet_hdr);
 
-       if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le()))
+       if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
                return -EINVAL;
 
        return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
@@ -2245,7 +2245,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->has_vnet_hdr) {
                if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
                                            sizeof(struct virtio_net_hdr),
-                                           vio_le())) {
+                                           vio_le(), true)) {
                        spin_lock(&sk->sk_receive_queue.lock);
                        goto drop_n_account;
                }
index 6619367bb6ca30f1421c91248263e5ae6e5edc86..063baac5b9fe4048e9d7b41e848a33f0f73c61d4 100644 (file)
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
        SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
 
        rcu_read_lock();
-       res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+       res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
+                      np->tclass);
        rcu_read_unlock();
        return res;
 }
index 7e869d0cca69826ee3e892e389bacdc9a58a1637..4f5a2b580aa52deb75e00c92d8b60992cf5bdaa6 100644 (file)
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
                goto out;
        }
 
-       segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+       segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
        if (IS_ERR(segs))
                goto out;
 
index d699d2cbf27563fcefd3dfe6d5bac87ab62a08f5..5fc7122c76deb86c798059e62c72be33cf1e455d 100644 (file)
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
                                              sctp_assoc_t id)
 {
        struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
-       struct sctp_transport *transport;
+       struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
        union sctp_addr *laddr = (union sctp_addr *)addr;
+       struct sctp_transport *transport;
+
+       if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+               return NULL;
 
        addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
                                               laddr,
index f96dacf173abe6479e2e78a8454b25d12b11413f..e9295fa3a554c860120f8e22a96f16c9f7e74745 100644 (file)
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
        write_lock_bh(&n->lock);
 }
 
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+       write_unlock_bh(&n->lock);
+}
+
 static void tipc_node_write_unlock(struct tipc_node *n)
 {
        struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
        }
        tipc_node_write_lock(n);
        list_add_tail(subscr, &n->publ_list);
-       tipc_node_write_unlock(n);
+       tipc_node_write_unlock_fast(n);
        tipc_node_put(n);
 }
 
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
        }
        tipc_node_write_lock(n);
        list_del_init(subscr);
-       tipc_node_write_unlock(n);
+       tipc_node_write_unlock_fast(n);
        tipc_node_put(n);
 }
 
index 215849ce453dfbd70bccceacc4d42b1a2a6d37d8..3cd6402e812cb05c5fc0872941a8a6c683e367f3 100644 (file)
@@ -86,12 +86,12 @@ struct outqueue_entry {
 static void tipc_recv_work(struct work_struct *work);
 static void tipc_send_work(struct work_struct *work);
 static void tipc_clean_outqueues(struct tipc_conn *con);
-static void tipc_sock_release(struct tipc_conn *con);
 
 static void tipc_conn_kref_release(struct kref *kref)
 {
        struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
-       struct sockaddr_tipc *saddr = con->server->saddr;
+       struct tipc_server *s = con->server;
+       struct sockaddr_tipc *saddr = s->saddr;
        struct socket *sock = con->sock;
        struct sock *sk;
 
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref)
                }
                saddr->scope = -TIPC_NODE_SCOPE;
                kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
-               tipc_sock_release(con);
                sock_release(sock);
                con->sock = NULL;
+
+               spin_lock_bh(&s->idr_lock);
+               idr_remove(&s->conn_idr, con->conid);
+               s->idr_in_use--;
+               spin_unlock_bh(&s->idr_lock);
        }
 
        tipc_clean_outqueues(con);
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
 
        spin_lock_bh(&s->idr_lock);
        con = idr_find(&s->conn_idr, conid);
-       if (con)
+       if (con && test_bit(CF_CONNECTED, &con->flags))
                conn_get(con);
+       else
+               con = NULL;
        spin_unlock_bh(&s->idr_lock);
        return con;
 }
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
-static void tipc_sock_release(struct tipc_conn *con)
-{
-       struct tipc_server *s = con->server;
-
-       if (con->conid)
-               s->tipc_conn_release(con->conid, con->usr_data);
-
-       tipc_unregister_callbacks(con);
-}
-
 static void tipc_close_conn(struct tipc_conn *con)
 {
        struct tipc_server *s = con->server;
 
        if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+               tipc_unregister_callbacks(con);
 
-               spin_lock_bh(&s->idr_lock);
-               idr_remove(&s->conn_idr, con->conid);
-               s->idr_in_use--;
-               spin_unlock_bh(&s->idr_lock);
+               if (con->conid)
+                       s->tipc_conn_release(con->conid, con->usr_data);
 
                /* We shouldn't flush pending works as we may be in the
                 * thread. In fact the races with pending rx/tx work structs
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
        if (!con)
                return -EINVAL;
 
+       if (!test_bit(CF_CONNECTED, &con->flags)) {
+               conn_put(con);
+               return 0;
+       }
+
        e = tipc_alloc_entry(data, len);
        if (!e) {
                conn_put(con);
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
        list_add_tail(&e->list, &con->outqueue);
        spin_unlock_bh(&con->outqueue_lock);
 
-       if (test_bit(CF_CONNECTED, &con->flags)) {
-               if (!queue_work(s->send_wq, &con->swork))
-                       conn_put(con);
-       } else {
+       if (!queue_work(s->send_wq, &con->swork))
                conn_put(con);
-       }
        return 0;
 }
 
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
        int ret;
 
        spin_lock_bh(&con->outqueue_lock);
-       while (1) {
+       while (test_bit(CF_CONNECTED, &con->flags)) {
                e = list_entry(con->outqueue.next, struct outqueue_entry,
                               list);
                if ((struct list_head *) e == &con->outqueue)
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
 void tipc_server_stop(struct tipc_server *s)
 {
        struct tipc_conn *con;
-       int total = 0;
        int id;
 
        spin_lock_bh(&s->idr_lock);
-       for (id = 0; total < s->idr_in_use; id++) {
+       for (id = 0; s->idr_in_use; id++) {
                con = idr_find(&s->conn_idr, id);
                if (con) {
-                       total++;
                        spin_unlock_bh(&s->idr_lock);
                        tipc_close_conn(con);
                        spin_lock_bh(&s->idr_lock);
index 0dd02244e21d72b8e53e371d51eeae53e4b15a41..9d94e65d0894183b4af94ed24e84b94c0478b551 100644 (file)
@@ -54,6 +54,8 @@ struct tipc_subscriber {
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub);
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
 
 /**
  * htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 {
        struct tipc_name_seq seq;
 
+       tipc_subscrp_get(sub);
        tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
        if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
                return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
 
        tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
                                node);
+       tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_timeout(unsigned long data)
 {
        struct tipc_subscription *sub = (struct tipc_subscription *)data;
-       struct tipc_subscriber *subscriber = sub->subscriber;
 
        /* Notify subscriber of timeout */
        tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
                                TIPC_SUBSCR_TIMEOUT, 0, 0);
 
-       spin_lock_bh(&subscriber->lock);
-       tipc_subscrp_delete(sub);
-       spin_unlock_bh(&subscriber->lock);
-
-       tipc_subscrb_put(subscriber);
+       tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrb_kref_release(struct kref *kref)
 {
-       struct tipc_subscriber *subcriber = container_of(kref,
-                                           struct tipc_subscriber, kref);
-
-       kfree(subcriber);
+       kfree(container_of(kref,struct tipc_subscriber, kref));
 }
 
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
        kref_get(&subscriber->kref);
 }
 
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+       struct tipc_subscription *sub = container_of(kref,
+                                                    struct tipc_subscription,
+                                                    kref);
+       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+       struct tipc_subscriber *subscriber = sub->subscriber;
+
+       spin_lock_bh(&subscriber->lock);
+       tipc_nametbl_unsubscribe(sub);
+       list_del(&sub->subscrp_list);
+       atomic_dec(&tn->subscription_count);
+       spin_unlock_bh(&subscriber->lock);
+       kfree(sub);
+       tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+       kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+       kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+                                       struct tipc_subscr *s)
+{
+       struct list_head *subscription_list = &subscriber->subscrp_list;
+       struct tipc_subscription *sub, *temp;
+
+       spin_lock_bh(&subscriber->lock);
+       list_for_each_entry_safe(sub, temp, subscription_list,  subscrp_list) {
+               if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+                       continue;
+
+               tipc_subscrp_get(sub);
+               spin_unlock_bh(&subscriber->lock);
+               tipc_subscrp_delete(sub);
+               tipc_subscrp_put(sub);
+               spin_lock_bh(&subscriber->lock);
+
+               if (s)
+                       break;
+       }
+       spin_unlock_bh(&subscriber->lock);
+}
+
 static struct tipc_subscriber *tipc_subscrb_create(int conid)
 {
        struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
                pr_warn("Subscriber rejected, no memory\n");
                return NULL;
        }
-       kref_init(&subscriber->kref);
        INIT_LIST_HEAD(&subscriber->subscrp_list);
+       kref_init(&subscriber->kref);
        subscriber->conid = conid;
        spin_lock_init(&subscriber->lock);
 
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
 
 static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
 {
-       struct tipc_subscription *sub, *temp;
-       u32 timeout;
-
-       spin_lock_bh(&subscriber->lock);
-       /* Destroy any existing subscriptions for subscriber */
-       list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-                                subscrp_list) {
-               timeout = htohl(sub->evt.s.timeout, sub->swap);
-               if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
-                       tipc_subscrp_delete(sub);
-                       tipc_subscrb_put(subscriber);
-               }
-       }
-       spin_unlock_bh(&subscriber->lock);
-
+       tipc_subscrb_subscrp_delete(subscriber, NULL);
        tipc_subscrb_put(subscriber);
 }
 
 static void tipc_subscrp_delete(struct tipc_subscription *sub)
 {
-       struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+       u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
 
-       tipc_nametbl_unsubscribe(sub);
-       list_del(&sub->subscrp_list);
-       kfree(sub);
-       atomic_dec(&tn->subscription_count);
+       if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+               tipc_subscrp_put(sub);
 }
 
 static void tipc_subscrp_cancel(struct tipc_subscr *s,
                                struct tipc_subscriber *subscriber)
 {
-       struct tipc_subscription *sub, *temp;
-       u32 timeout;
-
-       spin_lock_bh(&subscriber->lock);
-       /* Find first matching subscription, exit if not found */
-       list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
-                                subscrp_list) {
-               if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-                       timeout = htohl(sub->evt.s.timeout, sub->swap);
-                       if ((timeout == TIPC_WAIT_FOREVER) ||
-                           del_timer(&sub->timer)) {
-                               tipc_subscrp_delete(sub);
-                               tipc_subscrb_put(subscriber);
-                       }
-                       break;
-               }
-       }
-       spin_unlock_bh(&subscriber->lock);
+       tipc_subscrb_subscrp_delete(subscriber, s);
 }
 
 static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(*s));
        atomic_inc(&tn->subscription_count);
+       kref_init(&sub->kref);
        return sub;
 }
 
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
 
        spin_lock_bh(&subscriber->lock);
        list_add(&sub->subscrp_list, &subscriber->subscrp_list);
-       tipc_subscrb_get(subscriber);
        sub->subscriber = subscriber;
        tipc_nametbl_subscribe(sub);
+       tipc_subscrb_get(subscriber);
        spin_unlock_bh(&subscriber->lock);
 
+       setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
        timeout = htohl(sub->evt.s.timeout, swap);
-       if (timeout == TIPC_WAIT_FOREVER)
-               return;
 
-       setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
-       mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+       if (timeout != TIPC_WAIT_FOREVER)
+               mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
 }
 
 /* Handle one termination request for the subscriber */
index be60103082c923c0fd768f52c081af38eb42491b..ffdc214c117a924f34b416fde415fcd18201ebc0 100644 (file)
@@ -57,6 +57,7 @@ struct tipc_subscriber;
  * @evt: template for events generated by subscription
  */
 struct tipc_subscription {
+       struct kref kref;
        struct tipc_subscriber *subscriber;
        struct net *net;
        struct timer_list timer;
index 127656ebe7be47af8ebb8ea288340177fd068049..cef79873b09d2051663fedf37dc52874b7f7c415 100644 (file)
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        unsigned int hash;
        struct unix_address *addr;
        struct hlist_head *list;
+       struct path path = { NULL, NULL };
 
        err = -EINVAL;
        if (sunaddr->sun_family != AF_UNIX)
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        addr_len = err;
 
+       if (sun_path[0]) {
+               umode_t mode = S_IFSOCK |
+                      (SOCK_INODE(sock)->i_mode & ~current_umask());
+               err = unix_mknod(sun_path, mode, &path);
+               if (err) {
+                       if (err == -EEXIST)
+                               err = -EADDRINUSE;
+                       goto out;
+               }
+       }
+
        err = mutex_lock_interruptible(&u->bindlock);
        if (err)
-               goto out;
+               goto out_put;
 
        err = -EINVAL;
        if (u->addr)
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        atomic_set(&addr->refcnt, 1);
 
        if (sun_path[0]) {
-               struct path path;
-               umode_t mode = S_IFSOCK |
-                      (SOCK_INODE(sock)->i_mode & ~current_umask());
-               err = unix_mknod(sun_path, mode, &path);
-               if (err) {
-                       if (err == -EEXIST)
-                               err = -EADDRINUSE;
-                       unix_release_addr(addr);
-                       goto out_up;
-               }
                addr->hash = UNIX_HASH_SIZE;
                hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
@@ -1065,6 +1067,9 @@ out_unlock:
        spin_unlock(&unix_table_lock);
 out_up:
        mutex_unlock(&u->bindlock);
+out_put:
+       if (err)
+               path_put(&path);
 out:
        return err;
 }
index 92a44729dbe43ec05f5556939d37ab0a9fc2fa2f..7ef2a12b25b244b2700ab7feee961fb91b435970 100644 (file)
@@ -4,6 +4,7 @@
  * modify it under the terms of version 2 of the GNU General Public
  * License as published by the Free Software Foundation.
  */
+#define KBUILD_MODNAME "foo"
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/if_ether.h>
 #include <uapi/linux/if_packet.h>
index 85c38ecd3a2dcf5f65f99011a73be87d78c287ff..0f4f6e8c8611e3dea0e758215a65cc185ecdae02 100644 (file)
@@ -8,6 +8,7 @@
  * encapsulating the incoming packet in an IPv4/v6 header
  * and then XDP_TX it out.
  */
+#define KBUILD_MODNAME "foo"
 #include <uapi/linux/bpf.h>
 #include <linux/in.h>
 #include <linux/if_ether.h>
index 4a57c8a60bd91990fad493b9a16e265831cb88a5..6a6f44dd594bc4c6275694335ebaa02b22118982 100644 (file)
@@ -610,6 +610,33 @@ error:
        return ret ? : -ENOENT;
 }
 
+/* Adjust symbol name and address */
+static int post_process_probe_trace_point(struct probe_trace_point *tp,
+                                          struct map *map, unsigned long offs)
+{
+       struct symbol *sym;
+       u64 addr = tp->address + tp->offset - offs;
+
+       sym = map__find_symbol(map, addr);
+       if (!sym)
+               return -ENOENT;
+
+       if (strcmp(sym->name, tp->symbol)) {
+               /* If we have no realname, use symbol for it */
+               if (!tp->realname)
+                       tp->realname = tp->symbol;
+               else
+                       free(tp->symbol);
+               tp->symbol = strdup(sym->name);
+               if (!tp->symbol)
+                       return -ENOMEM;
+       }
+       tp->offset = addr - sym->start;
+       tp->address -= offs;
+
+       return 0;
+}
+
 /*
  * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
  * and generate new symbols with suffixes such as .constprop.N or .isra.N
@@ -622,11 +649,9 @@ static int
 post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
                                        int ntevs, const char *pathname)
 {
-       struct symbol *sym;
        struct map *map;
        unsigned long stext = 0;
-       u64 addr;
-       int i;
+       int i, ret = 0;
 
        /* Prepare a map for offline binary */
        map = dso__new_map(pathname);
@@ -636,23 +661,14 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        for (i = 0; i < ntevs; i++) {
-               addr = tevs[i].point.address + tevs[i].point.offset - stext;
-               sym = map__find_symbol(map, addr);
-               if (!sym)
-                       continue;
-               if (!strcmp(sym->name, tevs[i].point.symbol))
-                       continue;
-               /* If we have no realname, use symbol for it */
-               if (!tevs[i].point.realname)
-                       tevs[i].point.realname = tevs[i].point.symbol;
-               else
-                       free(tevs[i].point.symbol);
-               tevs[i].point.symbol = strdup(sym->name);
-               tevs[i].point.offset = addr - sym->start;
+               ret = post_process_probe_trace_point(&tevs[i].point,
+                                                    map, stext);
+               if (ret < 0)
+                       break;
        }
        map__put(map);
 
-       return 0;
+       return ret;
 }
 
 static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
@@ -682,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
        return ret;
 }
 
-static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
-                                           int ntevs, const char *module)
+static int
+post_process_module_probe_trace_events(struct probe_trace_event *tevs,
+                                      int ntevs, const char *module,
+                                      struct debuginfo *dinfo)
 {
+       Dwarf_Addr text_offs = 0;
        int i, ret = 0;
        char *mod_name = NULL;
+       struct map *map;
 
        if (!module)
                return 0;
 
-       mod_name = find_module_name(module);
+       map = get_target_map(module, false);
+       if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
+               pr_warning("Failed to get ELF symbols for %s\n", module);
+               return -EINVAL;
+       }
 
+       mod_name = find_module_name(module);
        for (i = 0; i < ntevs; i++) {
+               ret = post_process_probe_trace_point(&tevs[i].point,
+                                               map, (unsigned long)text_offs);
+               if (ret < 0)
+                       break;
                tevs[i].point.module =
                        strdup(mod_name ? mod_name : module);
                if (!tevs[i].point.module) {
@@ -703,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        free(mod_name);
+       map__put(map);
+
        return ret;
 }
 
@@ -760,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
 static int post_process_probe_trace_events(struct perf_probe_event *pev,
                                           struct probe_trace_event *tevs,
                                           int ntevs, const char *module,
-                                          bool uprobe)
+                                          bool uprobe, struct debuginfo *dinfo)
 {
        int ret;
 
@@ -768,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
                ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
        else if (module)
                /* Currently ref_reloc_sym based probe is not for drivers */
-               ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+               ret = post_process_module_probe_trace_events(tevs, ntevs,
+                                                            module, dinfo);
        else
                ret = post_process_kernel_probe_trace_events(tevs, ntevs);
 
@@ -812,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                }
        }
 
-       debuginfo__delete(dinfo);
-
        if (ntevs > 0) {        /* Succeeded to find trace events */
                pr_debug("Found %d probe_trace_events.\n", ntevs);
                ret = post_process_probe_trace_events(pev, *tevs, ntevs,
-                                               pev->target, pev->uprobes);
+                                       pev->target, pev->uprobes, dinfo);
                if (ret < 0 || ret == ntevs) {
+                       pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
                        clear_probe_trace_events(*tevs, ntevs);
                        zfree(tevs);
+                       ntevs = 0;
                }
-               if (ret != ntevs)
-                       return ret < 0 ? ret : ntevs;
-               ntevs = 0;
-               /* Fall through */
        }
 
+       debuginfo__delete(dinfo);
+
        if (ntevs == 0) {       /* No error but failed to find probe point. */
                pr_warning("Probe point '%s' not found.\n",
                           synthesize_perf_probe_point(&pev->point));
                return -ENOENT;
-       }
-       /* Error path : ntevs < 0 */
-       pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
-       if (ntevs < 0) {
+       } else if (ntevs < 0) {
+               /* Error path : ntevs < 0 */
+               pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
                if (ntevs == -EBADF)
                        pr_warning("Warning: No dwarf info found in the vmlinux - "
                                "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
index df4debe564daabce2e739289b14360d49185bc6f..0d9d6e0803b88b6fe3909c0b8b83f24fa47580f6 100644 (file)
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
 }
 
 /* For the kernel module, we need a special code to get a DIE */
-static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                               bool adjust_offset)
 {
        int n, i;
        Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
                        if (!shdr)
                                return -ENOENT;
                        *offs = shdr->sh_addr;
+                       if (adjust_offset)
+                               *offs -= shdr->sh_offset;
                }
        }
        return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
        Dwarf_Addr _addr = 0, baseaddr = 0;
        const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
        int baseline = 0, lineno = 0, ret = 0;
-       bool reloc = false;
 
-retry:
+       /* We always need to relocate the address for aranges */
+       if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
+               addr += baseaddr;
        /* Find cu die */
        if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
-               if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
-                       addr += baseaddr;
-                       reloc = true;
-                       goto retry;
-               }
                pr_warning("Failed to find debug information for address %lx\n",
                           addr);
                ret = -EINVAL;
index f1d8558f498e96771c13b3f42046a757e888bdf2..2956c51986529ee7481f922d488a449c0a7619a0 100644 (file)
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
 int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
                                struct perf_probe_point *ppt);
 
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                              bool adjust_offset);
+
 /* Find a line range */
 int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
 
index b13fed534d761742700c21491887667f22a403c9..9f7bd1915c217bedc9b2ae51bb347b9a39bd1192 100644 (file)
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected)
        return map_subset(lru_map, expected) && map_subset(expected, lru_map);
 }
 
-static int sched_next_online(int pid, int next_to_try)
+static int sched_next_online(int pid, int *next_to_try)
 {
        cpu_set_t cpuset;
+       int next = *next_to_try;
+       int ret = -1;
 
-       if (next_to_try == nr_cpus)
-               return -1;
-
-       while (next_to_try < nr_cpus) {
+       while (next < nr_cpus) {
                CPU_ZERO(&cpuset);
-               CPU_SET(next_to_try++, &cpuset);
-               if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset))
+               CPU_SET(next++, &cpuset);
+               if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
+                       ret = 0;
                        break;
+               }
        }
 
-       return next_to_try;
+       *next_to_try = next;
+       return ret;
 }
 
 /* Size of the LRU amp is 2
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags)
 {
        unsigned long long key, value[nr_cpus];
        int lru_map_fd, expected_map_fd;
+       int next_cpu = 0;
 
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned int batch_size;
        unsigned int map_size;
+       int next_cpu = 0;
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        batch_size = tgt_free / 2;
        assert(batch_size * 2 == tgt_free);
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned int batch_size;
        unsigned int map_size;
+       int next_cpu = 0;
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                /* Ther percpu lru list (i.e each cpu has its own LRU
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        batch_size = tgt_free / 2;
        assert(batch_size * 2 == tgt_free);
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned int batch_size;
        unsigned int map_size;
+       int next_cpu = 0;
 
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        batch_size = tgt_free / 2;
        assert(batch_size * 2 == tgt_free);
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
        int lru_map_fd, expected_map_fd;
        unsigned long long key, value[nr_cpus];
        unsigned long long end_key;
+       int next_cpu = 0;
 
        printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
               map_flags);
 
-       assert(sched_next_online(0, 0) != -1);
+       assert(sched_next_online(0, &next_cpu) != -1);
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                lru_map_fd = create_map(map_type, map_flags,
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
 static void test_lru_sanity5(int map_type, int map_flags)
 {
        unsigned long long key, value[nr_cpus];
-       int next_sched_cpu = 0;
+       int next_cpu = 0;
        int map_fd;
-       int i;
 
        if (map_flags & BPF_F_NO_COMMON_LRU)
                return;
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags)
        key = 0;
        assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
 
-       for (i = 0; i < nr_cpus; i++) {
+       while (sched_next_online(0, &next_cpu) != -1) {
                pid_t pid;
 
                pid = fork();
                if (pid == 0) {
-                       next_sched_cpu = sched_next_online(0, next_sched_cpu);
-                       if (next_sched_cpu != -1)
-                               do_test_lru_sanity5(key, map_fd);
+                       do_test_lru_sanity5(key, map_fd);
                        exit(0);
                } else if (pid == -1) {
-                       printf("couldn't spawn #%d process\n", i);
+                       printf("couldn't spawn process to test key:%llu\n",
+                              key);
                        exit(1);
                } else {
                        int status;
 
-                       /* It is mostly redundant and just allow the parent
-                        * process to update next_shced_cpu for the next child
-                        * process
-                        */
-                       next_sched_cpu = sched_next_online(pid, next_sched_cpu);
-
                        assert(waitpid(pid, &status, 0) == pid);
                        assert(status == 0);
                        key++;
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags)
        }
 
        close(map_fd);
+       /* At least one key should be tested */
+       assert(key > 0);
 
        printf("Pass\n");
 }
index c22860ab973378f76417d2bc85f1daf2c828e0c7..30e1ac62e8cb4249350c89aa64163e3d4ee3bedd 100644 (file)
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
 
        FAIL_IF(ebb_event_enable(&event));
 
-       mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
+       mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
        mtspr(SPRN_PMC5, 0);
        mtspr(SPRN_PMC6, 0);
 
index 34e63cc4c572bfcafe6fecb4784fc4ba5079bf8a..14142faf040b7e81a1c38a983aa76d9ae50ee4e1 100644 (file)
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
 #define VMEXIT_CYCLES 500
 #define VMENTRY_CYCLES 500
 
+#elif defined(__s390x__)
+static inline void wait_cycles(unsigned long long cycles)
+{
+       asm volatile("0: brctg %0,0b" : : "d" (cycles));
+}
+
+/* tweak me */
+#define VMEXIT_CYCLES 200
+#define VMENTRY_CYCLES 200
+
 #else
 static inline void wait_cycles(unsigned long long cycles)
 {
@@ -81,6 +91,8 @@ extern unsigned ring_size;
 /* Is there a portable way to do this? */
 #if defined(__x86_64__) || defined(__i386__)
 #define cpu_relax() asm ("rep; nop" ::: "memory")
+#elif defined(__s390x__)
+#define cpu_relax() barrier()
 #else
 #define cpu_relax() assert(0)
 #endif
index 2e69ca812b4cf4b39d653cd774286587fcc87bc7..29b0d3920bfc412a049b1478a7074869cbd113df 100755 (executable)
@@ -1,12 +1,13 @@
 #!/bin/sh
 
+CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
 #use last CPU for host. Why not the first?
 #many devices tend to use cpu0 by default so
 #it tends to be busier
-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
+HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
 
 #run command on all cpus
-for cpu in $(seq 0 $HOST_AFFINITY)
+for cpu in $CPUS_ONLINE
 do
        #Don't run guest and host on same CPU
        #It actually works ok if using signalling
index a2dbbccbb6a3fe96751fafde8cd01658aa1301a5..6a084cd57b883e1c4bb8420bee37ed0a74321d98 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <clocksource/arm_arch_timer.h>
 #include <asm/arch_timer.h>
+#include <asm/kvm_hyp.h>
 
 #include <kvm/arm_vgic.h>
 #include <kvm/arm_arch_timer.h>
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
        struct kvm_vcpu *vcpu;
 
        vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-       vcpu->arch.timer_cpu.armed = false;
-
-       WARN_ON(!kvm_timer_should_fire(vcpu));
 
        /*
         * If the vcpu is blocked we want to wake it up so that it will see
@@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
 {
        kvm->arch.timer.cntvoff = kvm_phys_timer_read();
 }
+
+/*
+ * On VHE system, we only need to configure trap on physical timer and counter
+ * accesses in EL0 and EL1 once, not for every world switch.
+ * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
+ * and this makes those bits have no effect for the host kernel execution.
+ */
+void kvm_timer_init_vhe(void)
+{
+       /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
+       u32 cnthctl_shift = 10;
+       u64 val;
+
+       /*
+        * Disallow physical timer access for the guest.
+        * Physical counter access is allowed.
+        */
+       val = read_sysreg(cnthctl_el2);
+       val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
+       val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
+       write_sysreg(val, cnthctl_el2);
+}
index 798866a8d8756b07dd815d7a8bc8dec8f6019e13..63e28dd18bb09755b035f6e30bb4aaa6eca082fc 100644 (file)
@@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
 
-       /* Allow physical timer/counter access for the host */
-       val = read_sysreg(cnthctl_el2);
-       val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
-       write_sysreg(val, cnthctl_el2);
+       /*
+        * We don't need to do this for VHE since the host kernel runs in EL2
+        * with HCR_EL2.TGE ==1, which makes those bits have no impact.
+        */
+       if (!has_vhe()) {
+               /* Allow physical timer/counter access for the host */
+               val = read_sysreg(cnthctl_el2);
+               val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 
        /* Clear cntvoff for the host */
        write_sysreg(0, cntvoff_el2);
@@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        u64 val;
 
-       /*
-        * Disallow physical timer access for the guest
-        * Physical counter access is allowed
-        */
-       val = read_sysreg(cnthctl_el2);
-       val &= ~CNTHCTL_EL1PCEN;
-       val |= CNTHCTL_EL1PCTEN;
-       write_sysreg(val, cnthctl_el2);
+       /* Those bits are already configured at boot on VHE-system */
+       if (!has_vhe()) {
+               /*
+                * Disallow physical timer access for the guest
+                * Physical counter access is allowed
+                */
+               val = read_sysreg(cnthctl_el2);
+               val &= ~CNTHCTL_EL1PCEN;
+               val |= CNTHCTL_EL1PCTEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 
        if (timer->enabled) {
                write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
index 5114391b7e5af52ee5f815baead6b4561243a31a..c737ea0a310a732cc6f878c57877aa3086e67280 100644 (file)
@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
 
-       mutex_lock(&kvm->lock);
-
        dist->ready = false;
        dist->initialized = false;
 
        kfree(dist->spis);
        dist->nr_spis = 0;
-
-       mutex_unlock(&kvm->lock);
 }
 
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
 }
 
-void kvm_vgic_destroy(struct kvm *kvm)
+/* To be called with kvm->lock held */
+static void __kvm_vgic_destroy(struct kvm *kvm)
 {
        struct kvm_vcpu *vcpu;
        int i;
@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
                kvm_vgic_vcpu_destroy(vcpu);
 }
 
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+       mutex_lock(&kvm->lock);
+       __kvm_vgic_destroy(kvm);
+       mutex_unlock(&kvm->lock);
+}
+
 /**
  * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
  * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
                ret = vgic_v2_map_resources(kvm);
        else
                ret = vgic_v3_map_resources(kvm);
+
+       if (ret)
+               __kvm_vgic_destroy(kvm);
+
 out:
        mutex_unlock(&kvm->lock);
        return ret;
index 9bab86757fa4f3613c372fbc0250c146284306ff..834137e7b83ff0c37515a1c36300c24aeadb9925 100644 (file)
@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
        dist->ready = true;
 
 out:
-       if (ret)
-               kvm_vgic_destroy(kvm);
        return ret;
 }
 
index 5c9f9745e6cab8284161397c3d810df65304fae8..e6b03fd8c374ca7a4dcb1e272141504f66c697d6 100644 (file)
@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
        dist->ready = true;
 
 out:
-       if (ret)
-               kvm_vgic_destroy(kvm);
        return ret;
 }