]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'trace-v4.11-rc5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rosted...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 18 Apr 2017 16:31:51 +0000 (09:31 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 18 Apr 2017 16:31:51 +0000 (09:31 -0700)
Pull ftrace fix from Steven Rostedt:
 "Namhyung Kim discovered a use after free bug. It has to do with adding
  a pid filter to function tracing in an instance, and then freeing the
  instance"

* tag 'trace-v4.11-rc5-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  ftrace: Fix function pid filter on instances

452 files changed:
.mailmap
Documentation/filesystems/Locking
Documentation/filesystems/porting
Documentation/filesystems/vfs.txt
Documentation/pinctrl.txt
Documentation/process/stable-kernel-rules.rst
Documentation/virtual/kvm/devices/arm-vgic.txt
MAINTAINERS
Makefile
arch/alpha/kernel/osf_sys.c
arch/arm/boot/dts/am335x-baltos.dtsi
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/sun8i-a33.dtsi
arch/arm/kvm/arm.c
arch/arm/kvm/mmu.c
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/omap-hotplug.c
arch/arm/mach-omap2/omap-mpuss-lowpower.c
arch/arm/mach-omap2/omap-smc.S
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-orion5x/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/mm/nommu.c
arch/arm/plat-orion/common.c
arch/arm/probes/kprobes/core.c
arch/arm/probes/kprobes/test-core.c
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/arm64/mm/fault.c
arch/arm64/mm/hugetlbpage.c
arch/ia64/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/ia64/lib/Makefile
arch/metag/include/asm/uaccess.h
arch/metag/lib/usercopy.c
arch/mips/Kconfig
arch/mips/include/asm/fpu.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/spinlock.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/genex.S
arch/mips/kernel/process.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/traps.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/mm/c-r4k.c
arch/mips/mm/tlbex.c
arch/mips/ralink/rt3883.c
arch/nios2/kernel/prom.c
arch/nios2/kernel/setup.c
arch/parisc/include/asm/uaccess.h
arch/parisc/lib/lusercopy.S
arch/powerpc/crypto/crc32c-vpmsum_glue.c
arch/powerpc/kernel/align.c
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/setup_64.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/mm/hash_native_64.c
arch/s390/boot/compressed/misc.c
arch/s390/include/asm/uaccess.h
arch/s390/kernel/smp.c
arch/s390/kvm/gaccess.c
arch/sparc/include/asm/page_64.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/processor_32.h
arch/sparc/include/asm/processor_64.h
arch/sparc/kernel/head_64.S
arch/sparc/kernel/misctrap.S
arch/sparc/kernel/rtrap_64.S
arch/sparc/kernel/spiterrs.S
arch/sparc/kernel/sun4v_tlb_miss.S
arch/sparc/kernel/urtt_fill.S
arch/sparc/kernel/winfixup.S
arch/sparc/lib/NG2memcpy.S
arch/sparc/lib/NG4memcpy.S
arch/sparc/lib/NG4memset.S
arch/sparc/lib/NGmemcpy.S
arch/sparc/mm/hugetlbpage.c
arch/sparc/mm/init_64.c
arch/sparc/mm/srmmu.c
arch/sparc/mm/tlb.c
arch/sparc/mm/tsb.c
arch/x86/entry/vdso/vdso32-setup.c
arch/x86/events/intel/lbr.c
arch/x86/include/asm/elf.h
arch/x86/include/asm/pmem.h
arch/x86/kernel/cpu/intel_rdt_schemata.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/signal.c
arch/x86/kernel/signal_compat.c
arch/x86/kernel/traps.c
arch/x86/kvm/vmx.c
arch/x86/mm/init.c
arch/x86/platform/efi/quirks.c
arch/xtensa/include/asm/page.h
arch/xtensa/include/uapi/asm/unistd.h
arch/xtensa/kernel/traps.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
block/elevator.c
crypto/ahash.c
crypto/algif_aead.c
crypto/lrw.c
crypto/xts.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/glue.c
drivers/acpi/nfit/core.c
drivers/acpi/scan.c
drivers/ata/pata_atiixp.c
drivers/ata/sata_via.c
drivers/block/zram/zram_drv.c
drivers/char/mem.c
drivers/char/virtio_console.c
drivers/cpufreq/cpufreq.c
drivers/crypto/caam/caampkc.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/intern.h
drivers/dax/Kconfig
drivers/dax/dax.c
drivers/firmware/efi/libstub/gop.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/hdmi/hdmi_audio.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/udl/udl_transfer.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-uclogic.c
drivers/iio/accel/hid-sensor-accel-3d.c
drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
drivers/iio/common/hid-sensors/hid-sensor-attributes.c
drivers/iio/gyro/bmg160_core.c
drivers/iio/industrialio-core.c
drivers/iio/pressure/st_pressure_core.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/input/joystick/xpad.c
drivers/irqchip/irq-imx-gpcv2.c
drivers/isdn/capi/kcapi.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-raid.c
drivers/md/dm-rq.c
drivers/md/dm-verity-fec.c
drivers/md/dm-verity-fec.h
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/can/rcar/rcar_can.c
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/moxa/moxart_ether.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/rocker/rocker_ofdpa.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/irda/vlsi_ir.c
drivers/net/phy/mdio-boardinfo.c
drivers/net/phy/phy.c
drivers/net/team/team.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/virtio_net.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/nvdimm/bus.c
drivers/nvdimm/claim.c
drivers/nvdimm/dimm_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/rdma.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/io-cmd.c
drivers/nvme/target/loop.c
drivers/pci/dwc/Kconfig
drivers/pci/dwc/pcie-artpec6.c
drivers/pci/dwc/pcie-designware-plat.c
drivers/pci/host/pci-thunder-pem.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/samsung/pinctrl-exynos.c
drivers/pinctrl/samsung/pinctrl-exynos.h
drivers/pinctrl/sh-pfc/pinctrl.c
drivers/pinctrl/ti/pinctrl-ti-iodelay.c
drivers/pwm/pwm-lpss-pci.c
drivers/pwm/pwm-lpss-platform.c
drivers/pwm/pwm-lpss.c
drivers/pwm/pwm-lpss.h
drivers/pwm/pwm-rockchip.c
drivers/reset/core.c
drivers/s390/crypto/pkey_api.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/ipr.c
drivers/scsi/qedf/qedf_fip.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sr.c
drivers/staging/android/ashmem.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/tty/tty_ldisc.c
drivers/usb/gadget/function/f_tcm.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/xen-fbfront.c
drivers/virtio/virtio.c
drivers/virtio/virtio_pci_common.c
drivers/virtio/virtio_pci_common.h
drivers/virtio/virtio_pci_legacy.c
drivers/virtio/virtio_pci_modern.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/btrfs/inode.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/ioctl.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/cifs/transport.c
fs/dax.c
fs/ext4/ext4.h
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/symlink.c
fs/hugetlbfs/inode.c
fs/namei.c
fs/orangefs/devorangefs-req.c
fs/orangefs/orangefs-kernel.h
fs/orangefs/super.c
fs/proc/proc_sysctl.c
fs/proc/task_mmu.c
fs/stat.c
fs/sysfs/file.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_dir2_priv.h
fs/xfs/libxfs/xfs_dir2_sf.c
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/libxfs/xfs_inode_fork.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_itable.c
include/asm-generic/vmlinux.lds.h
include/crypto/internal/hash.h
include/drm/ttm/ttm_object.h
include/kvm/arm_vgic.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/cgroup.h
include/linux/elevator.h
include/linux/irqchip/arm-gic.h
include/linux/mfd/cros_ec.h
include/linux/mmu_notifier.h
include/linux/nvme.h
include/linux/pinctrl/pinctrl.h
include/linux/reset.h
include/linux/sched.h
include/linux/stat.h
include/linux/uio.h
include/linux/virtio.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/target/target_core_base.h
include/uapi/linux/Kbuild
include/uapi/linux/stat.h
include/uapi/linux/virtio_pci.h
kernel/audit.c
kernel/audit.h
kernel/auditsc.c
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/irq/affinity.c
kernel/kthread.c
kernel/ptrace.c
kernel/sysctl.c
lib/iov_iter.c
mm/huge_memory.c
mm/internal.h
mm/mempolicy.c
mm/page_alloc.c
mm/page_vma_mapped.c
mm/swap.c
mm/swap_cgroup.c
mm/vmstat.c
mm/z3fold.c
mm/zsmalloc.c
net/bridge/br_device.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/core/datagram.c
net/core/dev.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/secure_seq.c
net/core/sysctl_net_core.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/nf_nat_snmp_basic.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv6/addrconf.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/mac80211/iface.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_redirect.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_hash.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TPROXY.c
net/openvswitch/conntrack.c
net/openvswitch/flow.c
net/packet/af_packet.c
net/sched/sch_generic.c
net/sctp/associola.c
net/sctp/input.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/proc.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/stream.c
net/sctp/transport.c
net/wireless/sysfs.c
samples/statx/test-statx.c
scripts/Makefile.lib
scripts/kconfig/gconf.c
tools/include/linux/filter.h
tools/perf/util/annotate.c
tools/power/cpupower/utils/helpers/cpuid.c
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/powerpc/Makefile
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic.h

index 67dc22ffc9a80cb4fd6abeeba3d2ec7f7ae2ba19..1d6f4e7280dc67f630b79456b3f1e5a4c0a41343 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -99,6 +99,8 @@ Linas Vepstas <linas@austin.ibm.com>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
 Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
 Mark Brown <broonie@sirena.org.uk>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com>
+Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com>
 Matthieu CASTET <castet.matthieu@free.fr>
 Mauro Carvalho Chehab <mchehab@kernel.org> <mchehab@brturbo.com.br>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com>
@@ -171,6 +173,7 @@ Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
 Takashi YOSHII <takashi.yoshii.zj@renesas.com>
+Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
index fdcfdd79682a00102b1c18c8b56e19e00a418d26..fe25787ff6d49748c3e1ecc494f17e401a60916a 100644 (file)
@@ -58,8 +58,7 @@ prototypes:
        int (*permission) (struct inode *, int, unsigned int);
        int (*get_acl)(struct inode *, int);
        int (*setattr) (struct dentry *, struct iattr *);
-       int (*getattr) (const struct path *, struct dentry *, struct kstat *,
-                       u32, unsigned int);
+       int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
        void (*update_time)(struct inode *, struct timespec *, int);
index 95280079c0b3af0c217270e6838b723eb818cbcd..5fb17f49f7a21907a2c446267cc952c749e42043 100644 (file)
@@ -600,3 +600,9 @@ in your dentry operations instead.
 [recommended]
        ->readlink is optional for symlinks.  Don't set, unless filesystem needs
        to fake something for readlink(2).
+--
+[mandatory]
+       ->getattr() is now passed a struct path rather than a vfsmount and
+       dentry separately, and it now has request_mask and query_flags arguments
+       to specify the fields and sync type requested by statx.  Filesystems not
+       supporting any statx-specific features may ignore the new arguments.
index 569211703721fe81e0cbbbd1cb979a10c276cf9e..94dd27ef4a766ca5414146fab1b76ea5aacb67de 100644 (file)
@@ -382,8 +382,7 @@ struct inode_operations {
        int (*permission) (struct inode *, int);
        int (*get_acl)(struct inode *, int);
        int (*setattr) (struct dentry *, struct iattr *);
-       int (*getattr) (const struct path *, struct dentry *, struct kstat *,
-                       u32, unsigned int);
+       int (*getattr) (const struct path *, struct kstat *, u32, unsigned int);
        ssize_t (*listxattr) (struct dentry *, char *, size_t);
        void (*update_time)(struct inode *, struct timespec *, int);
        int (*atomic_open)(struct inode *, struct dentry *, struct file *,
index 54bd5faa8782258f72b134f1aaca684c15a00eb6..f2af35f6d6b2bc8bbfa2ff5727f24b3fd1a9b2f6 100644 (file)
@@ -77,9 +77,15 @@ static struct pinctrl_desc foo_desc = {
 
 int __init foo_probe(void)
 {
+       int error;
+
        struct pinctrl_dev *pctl;
 
-       return pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
+       error = pinctrl_register_and_init(&foo_desc, <PARENT>, NULL, &pctl);
+       if (error)
+               return error;
+
+       return pinctrl_enable(pctl);
 }
 
 To enable the pinctrl subsystem and the subgroups for PINMUX and PINCONF and
index 11ec2d93a5e08dc02db1f39291fe8bbec13f8748..61e9c78bd6d1d999033d11170ea6062337984a7e 100644 (file)
@@ -124,7 +124,7 @@ specified in the following format in the sign-off area:
 
 .. code-block:: none
 
-     Cc: <stable@vger.kernel.org> # 3.3.x-
+     Cc: <stable@vger.kernel.org> # 3.3.x
 
 The tag has the meaning of:
 
index 76e61c883347415f22242060b46ec13ac68cc09c..b2f60ca8b60cad64d9ffba81f61919bc4b8edacb 100644 (file)
@@ -83,6 +83,12 @@ Groups:
 
     Bits for undefined preemption levels are RAZ/WI.
 
+    For historical reasons and to provide ABI compatibility with userspace we
+    export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask
+    field in the lower 5 bits of a word, meaning that userspace must always
+    use the lower 5 bits to communicate with the KVM device and must shift the
+    value left by 3 places to obtain the actual priority mask level.
+
   Limitations:
     - Priorities are not implemented, and registers are RAZ/WI
     - Currently only implemented for KVM_DEV_TYPE_ARM_VGIC_V2.
index 1b0a87ffffab6701691808e1ce3053bd040c5dfb..676c139bc883ef7906e854174b8177ef282e0c51 100644 (file)
@@ -4117,14 +4117,13 @@ F:      drivers/block/drbd/
 F:     lib/lru_cache.c
 F:     Documentation/blockdev/drbd/
 
-DRIVER CORE, KOBJECTS, DEBUGFS, KERNFS AND SYSFS
+DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
 S:     Supported
 F:     Documentation/kobject.txt
 F:     drivers/base/
 F:     fs/debugfs/
-F:     fs/kernfs/
 F:     fs/sysfs/
 F:     include/linux/debugfs.h
 F:     include/linux/kobj*
@@ -4928,6 +4927,7 @@ F:        include/linux/netfilter_bridge/
 F:     net/bridge/
 
 ETHERNET PHY LIBRARY
+M:     Andrew Lunn <andrew@lunn.ch>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -7089,9 +7089,9 @@ S:        Maintained
 F:     fs/autofs4/
 
 KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
+M:     Masahiro Yamada <yamada.masahiro@socionext.com>
 M:     Michal Marek <mmarek@suse.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git for-next
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild.git rc-fixes
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild.git
 L:     linux-kbuild@vger.kernel.org
 S:     Maintained
 F:     Documentation/kbuild/
@@ -7208,6 +7208,14 @@ F:       arch/mips/include/uapi/asm/kvm*
 F:     arch/mips/include/asm/kvm*
 F:     arch/mips/kvm/
 
+KERNFS
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M:     Tejun Heo <tj@kernel.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
+S:     Supported
+F:     include/linux/kernfs.h
+F:     fs/kernfs/
+
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
 W:     http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -10814,6 +10822,7 @@ F:      drivers/s390/block/dasd*
 F:     block/partitions/ibm.c
 
 S390 NETWORK DRIVERS
+M:     Julian Wiedmann <jwi@linux.vnet.ibm.com>
 M:     Ursula Braun <ubraun@linux.vnet.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -10844,6 +10853,7 @@ S:      Supported
 F:     drivers/s390/scsi/zfcp_*
 
 S390 IUCV NETWORK LAYER
+M:     Julian Wiedmann <jwi@linux.vnet.ibm.com>
 M:     Ursula Braun <ubraun@linux.vnet.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -13295,7 +13305,7 @@ F:      drivers/virtio/
 F:     tools/virtio/
 F:     drivers/net/virtio_net.c
 F:     drivers/block/virtio_blk.c
-F:     include/linux/virtio_*.h
+F:     include/linux/virtio*.h
 F:     include/uapi/linux/virtio_*.h
 F:     drivers/crypto/virtio/
 
index e11989d36c8752bfb8c0a6a4c477c009eef28b57..5039b9148d15de5762a2a33147535569d5be5746 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
@@ -372,7 +372,7 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 AFLAGS_KERNEL  =
 LDFLAGS_vmlinux =
-CFLAGS_GCOV    = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
+CFLAGS_GCOV    := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
 CFLAGS_KCOV    := $(call cc-option,-fsanitize-coverage=trace-pc,)
 
 
@@ -653,6 +653,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \
 # Tell gcc to never replace conditional load with a non-conditional one
 KBUILD_CFLAGS  += $(call cc-option,--param=allow-store-data-races=0)
 
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+       KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
 include scripts/Makefile.gcc-plugins
 
 ifdef CONFIG_READABLE_ASM
@@ -798,12 +804,6 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=incompatible-pointer-types)
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
-       KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
-       KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 include scripts/Makefile.ubsan
index 0b961093ca5cac69fe88598ed7ebc9c023a11a78..6d76e528ab8f2606ddde5e3b739598b86a7cb5b4 100644 (file)
@@ -1290,7 +1290,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
        /* copy relevant bits of struct timex. */
        if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
            copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) - 
-                          offsetof(struct timex32, time)))
+                          offsetof(struct timex32, tick)))
          return -EFAULT;
 
        ret = do_adjtimex(&txc);        
index efb5eae290a8b7437b95206fa9f69ae4ccecab44..d42b98f15e8b97aaa5956047b51bd1d4a8d610ed 100644 (file)
 
        phy1: ethernet-phy@1 {
                reg = <7>;
+               eee-broken-100tx;
+               eee-broken-1000t;
        };
 };
 
index 9e43c443738a8acedd2c0e160a5b6ddc9e0f6c39..9ba4b18c0cb21711dcd8ef60648a0916914819e7 100644 (file)
        ti,non-removable;
        bus-width = <4>;
        cap-power-off-card;
+       keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc2_pins>;
 
index 2c9e56f4aac53aa74c206b2cbb3622965d7ab792..bbfb9d5a70a98116d303844a91c6a65dd42cfb39 100644 (file)
                                device_type = "pci";
                                ranges = <0x81000000 0 0          0x03000 0 0x00010000
                                          0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+                               bus-range = <0x00 0xff>;
                                #interrupt-cells = <1>;
                                num-lanes = <1>;
                                linux,pci-domain = <0>;
                                device_type = "pci";
                                ranges = <0x81000000 0 0          0x03000 0 0x00010000
                                          0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+                               bus-range = <0x00 0xff>;
                                #interrupt-cells = <1>;
                                num-lanes = <1>;
                                linux,pci-domain = <1>;
index 8f9a69ca818cecb759e71c1b6b97e4073c3e22e4..efe53998c961244fc0cd1ff32a5e53c885b6322f 100644 (file)
 &i2c3 {
        clock-frequency = <400000>;
        at24@50 {
-               compatible = "at24,24c02";
+               compatible = "atmel,24c64";
                readonly;
                reg = <0x50>;
        };
index 0467fb365bfca714b5ce9021974470002139039f..306af6cadf26033c6102b61c6a6d7693faba30fe 100644 (file)
                        opp-microvolt = <1200000>;
                        clock-latency-ns = <244144>; /* 8 32k periods */
                };
-
-               opp@1200000000 {
-                       opp-hz = /bits/ 64 <1200000000>;
-                       opp-microvolt = <1320000>;
-                       clock-latency-ns = <244144>; /* 8 32k periods */
-               };
        };
 
        cpus {
                        operating-points-v2 = <&cpu0_opp_table>;
                };
 
+               cpu@1 {
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+
                cpu@2 {
                        compatible = "arm,cortex-a7";
                        device_type = "cpu";
                        reg = <2>;
+                       operating-points-v2 = <&cpu0_opp_table>;
                };
 
                cpu@3 {
                        compatible = "arm,cortex-a7";
                        device_type = "cpu";
                        reg = <3>;
+                       operating-points-v2 = <&cpu0_opp_table>;
                };
        };
 
index 96dba7cd8be7b4b6f29d9896e2d4515c477ca963..314eb6abe1ff9879272fdae542c2e0043f3759eb 100644 (file)
@@ -1124,6 +1124,9 @@ static void cpu_hyp_reinit(void)
                if (__hyp_get_vectors() == hyp_default_vectors)
                        cpu_init_hyp_mode(NULL);
        }
+
+       if (vgic_present)
+               kvm_vgic_init_cpu_hardware();
 }
 
 static void cpu_hyp_reset(void)
index 962616fd4ddd633289e98b99d99dea9fb758b820..582a972371cf886b8581de1abcfe609a53540245 100644 (file)
@@ -292,11 +292,18 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
        phys_addr_t addr = start, end = start + size;
        phys_addr_t next;
 
+       assert_spin_locked(&kvm->mmu_lock);
        pgd = kvm->arch.pgd + stage2_pgd_index(addr);
        do {
                next = stage2_pgd_addr_end(addr, end);
                if (!stage2_pgd_none(*pgd))
                        unmap_stage2_puds(kvm, pgd, addr, next);
+               /*
+                * If the range is too large, release the kvm->mmu_lock
+                * to prevent starvation and lockup detector warnings.
+                */
+               if (next != end)
+                       cond_resched_lock(&kvm->mmu_lock);
        } while (pgd++, addr = next, addr != end);
 }
 
@@ -803,6 +810,7 @@ void stage2_unmap_vm(struct kvm *kvm)
        int idx;
 
        idx = srcu_read_lock(&kvm->srcu);
+       down_read(&current->mm->mmap_sem);
        spin_lock(&kvm->mmu_lock);
 
        slots = kvm_memslots(kvm);
@@ -810,6 +818,7 @@ void stage2_unmap_vm(struct kvm *kvm)
                stage2_unmap_memslot(kvm, memslot);
 
        spin_unlock(&kvm->mmu_lock);
+       up_read(&current->mm->mmap_sem);
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -829,7 +838,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
        if (kvm->arch.pgd == NULL)
                return;
 
+       spin_lock(&kvm->mmu_lock);
        unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+       spin_unlock(&kvm->mmu_lock);
+
        /* Free the HW pgd, one page at a time */
        free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE);
        kvm->arch.pgd = NULL;
@@ -1801,6 +1813,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
            (KVM_PHYS_SIZE >> PAGE_SHIFT))
                return -EFAULT;
 
+       down_read(&current->mm->mmap_sem);
        /*
         * A memory region could potentially cover multiple VMAs, and any holes
         * between them, so iterate over all of them to find out if we can map
@@ -1844,8 +1857,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                        pa += vm_start - vma->vm_start;
 
                        /* IO region dirty page logging not allowed */
-                       if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
-                               return -EINVAL;
+                       if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
 
                        ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
                                                    vm_end - vm_start,
@@ -1857,7 +1872,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
        } while (hva < reg_end);
 
        if (change == KVM_MR_FLAGS_ONLY)
-               return ret;
+               goto out;
 
        spin_lock(&kvm->mmu_lock);
        if (ret)
@@ -1865,6 +1880,8 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
        else
                stage2_flush_memslot(kvm, memslot);
        spin_unlock(&kvm->mmu_lock);
+out:
+       up_read(&current->mm->mmap_sem);
        return ret;
 }
 
index c4f2ace91ea22d1a6a344388498ec8da30906e63..3089d3bfa19b4a5c595e6872824c81f103d4c5e2 100644 (file)
@@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops;
 extern int omap4_mpuss_init(void);
 extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
 extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
+extern u32 omap4_get_cpu1_ns_pa_addr(void);
 #else
 static inline int omap4_enter_lowpower(unsigned int cpu,
                                        unsigned int power_state)
index d3fb5661bb5d4bc098b05e36f12278301b4c5597..433db6d0b07396288f3b1e38c57d0a64b2ad1e66 100644 (file)
@@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu)
                omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
 
                if (omap_secure_apis_support())
-                       boot_cpu = omap_read_auxcoreboot0();
+                       boot_cpu = omap_read_auxcoreboot0() >> 9;
                else
                        boot_cpu =
                                readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5;
index 113ab2dd2ee91ccf9c238813bd6c4d7561ff7d97..03ec6d307c8235fc907a599322991b1efd6c27bf 100644 (file)
@@ -64,6 +64,7 @@
 #include "prm-regbits-44xx.h"
 
 static void __iomem *sar_base;
+static u32 old_cpu1_ns_pa_addr;
 
 #if defined(CONFIG_PM) && defined(CONFIG_SMP)
 
@@ -212,6 +213,11 @@ static void __init save_l2x0_context(void)
 {}
 #endif
 
+u32 omap4_get_cpu1_ns_pa_addr(void)
+{
+       return old_cpu1_ns_pa_addr;
+}
+
 /**
  * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
  * The purpose of this function is to manage low power programming
@@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void)
 void __init omap4_mpuss_early_init(void)
 {
        unsigned long startup_pa;
+       void __iomem *ns_pa_addr;
 
-       if (!(cpu_is_omap44xx() || soc_is_omap54xx()))
+       if (!(soc_is_omap44xx() || soc_is_omap54xx()))
                return;
 
        sar_base = omap4_get_sar_ram_base();
 
-       if (cpu_is_omap443x())
+       /* Save old NS_PA_ADDR for validity checks later on */
+       if (soc_is_omap44xx())
+               ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+       else
+               ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
+       old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
+
+       if (soc_is_omap443x())
                startup_pa = __pa_symbol(omap4_secondary_startup);
-       else if (cpu_is_omap446x())
+       else if (soc_is_omap446x())
                startup_pa = __pa_symbol(omap4460_secondary_startup);
        else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
                startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
        else
                startup_pa = __pa_symbol(omap5_secondary_startup);
 
-       if (cpu_is_omap44xx())
+       if (soc_is_omap44xx())
                writel_relaxed(startup_pa, sar_base +
                               CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
        else
index fd90125bffc70ad6719bfc2b9f3e22d21b595367..72506e6cf9e7423f946d83e61b5aca66d2fee0c0 100644 (file)
@@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0)
        ldr     r12, =0x103
        dsb
        smc     #0
-       mov     r0, r0, lsr #9
        ldmfd   sp!, {r2-r12, pc}
 ENDPROC(omap_read_auxcoreboot0)
index 003353b0b7944d9363fb6446e683314823cd82f9..3faf454ba4871c8f60d5e12e912a1ff9dd9af271 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/io.h>
 #include <linux/irqchip/arm-gic.h>
 
+#include <asm/sections.h>
 #include <asm/smp_scu.h>
 #include <asm/virt.h>
 
 
 #define OMAP5_CORE_COUNT       0x2
 
+#define AUX_CORE_BOOT0_GP_RELEASE      0x020
+#define AUX_CORE_BOOT0_HS_RELEASE      0x200
+
 struct omap_smp_config {
        unsigned long cpu1_rstctrl_pa;
        void __iomem *cpu1_rstctrl_va;
        void __iomem *scu_base;
+       void __iomem *wakeupgen_base;
        void *startup_addr;
 };
 
@@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
        static struct clockdomain *cpu1_clkdm;
        static bool booted;
        static struct powerdomain *cpu1_pwrdm;
-       void __iomem *base = omap_get_wakeupgen_base();
 
        /*
         * Set synchronisation state between this boot processor
@@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
         * A barrier is added to ensure that write buffer is drained
         */
        if (omap_secure_apis_support())
-               omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+               omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE,
+                                        0xfffffdff);
        else
-               writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0);
+               writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE,
+                              cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
 
        if (!cpu1_clkdm && !cpu1_pwrdm) {
                cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
@@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void)
                set_cpu_possible(i, true);
 }
 
+/*
+ * For now, just make sure the start-up address is not within the booting
+ * kernel space as that means we just overwrote whatever secondary_startup()
+ * code there was.
+ */
+static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr)
+{
+       if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start)))
+               return false;
+
+       return true;
+}
+
+/*
+ * We may need to reset CPU1 before configuring, otherwise kexec boot can end
+ * up trying to use old kernel startup address or suspend-resume will
+ * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper
+ * idle states.
+ */
+static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c)
+{
+       unsigned long cpu1_startup_pa, cpu1_ns_pa_addr;
+       bool needs_reset = false;
+       u32 released;
+
+       if (omap_secure_apis_support())
+               released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE;
+       else
+               released = readl_relaxed(cfg.wakeupgen_base +
+                                        OMAP_AUX_CORE_BOOT_0) &
+                                               AUX_CORE_BOOT0_GP_RELEASE;
+       if (released) {
+               pr_warn("smp: CPU1 not parked?\n");
+
+               return;
+       }
+
+       cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base +
+                                       OMAP_AUX_CORE_BOOT_1);
+       cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr();
+
+       /* Did the configured secondary_startup() get overwritten? */
+       if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa))
+               needs_reset = true;
+
+       /*
+        * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a
+        * deeper idle state in WFI and will wake to an invalid address.
+        */
+       if ((soc_is_omap44xx() || soc_is_omap54xx()) &&
+           !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr))
+               needs_reset = true;
+
+       if (!needs_reset || !c->cpu1_rstctrl_va)
+               return;
+
+       pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n",
+               cpu1_startup_pa, cpu1_ns_pa_addr);
+
+       writel_relaxed(1, c->cpu1_rstctrl_va);
+       readl_relaxed(c->cpu1_rstctrl_va);
+       writel_relaxed(0, c->cpu1_rstctrl_va);
+}
+
 static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
 {
-       void __iomem *base = omap_get_wakeupgen_base();
        const struct omap_smp_config *c = NULL;
 
        if (soc_is_omap443x())
@@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
        /* Must preserve cfg.scu_base set earlier */
        cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa;
        cfg.startup_addr = c->startup_addr;
+       cfg.wakeupgen_base = omap_get_wakeupgen_base();
 
        if (soc_is_dra74x() || soc_is_omap54xx()) {
                if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
@@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
        if (cfg.scu_base)
                scu_enable(cfg.scu_base);
 
-       /*
-        * Reset CPU1 before configuring, otherwise kexec will
-        * end up trying to use old kernel startup address.
-        */
-       if (cfg.cpu1_rstctrl_va) {
-               writel_relaxed(1, cfg.cpu1_rstctrl_va);
-               readl_relaxed(cfg.cpu1_rstctrl_va);
-               writel_relaxed(0, cfg.cpu1_rstctrl_va);
-       }
+       omap4_smp_maybe_reset_cpu1(&cfg);
 
        /*
         * Write the address of secondary startup routine into the
@@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
                omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
        else
                writel_relaxed(__pa_symbol(cfg.startup_addr),
-                              base + OMAP_AUX_CORE_BOOT_1);
+                              cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
 }
 
 const struct smp_operations omap4_smp_ops __initconst = {
index e920dd83e443753ccced325ce19c48c6bca398c6..f989145480c8fcd0c947beaadeefe6955896a434 100644 (file)
@@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
                                dev_err(dev, "failed to idle\n");
                }
                break;
+       case BUS_NOTIFY_BIND_DRIVER:
+               od = to_omap_device(pdev);
+               if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
+                   pm_runtime_status_suspended(dev)) {
+                       od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
+                       pm_runtime_set_active(dev);
+               }
+               break;
        case BUS_NOTIFY_ADD_DEVICE:
                if (pdev->dev.of_node)
                        omap_device_build_from_dt(pdev);
index 633442ad4e4c16d4c80564410167b53977e7575f..2a7bb6ccdcb7eb219f515c6e0f1ba2bfe573a349 100644 (file)
@@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X
        select GPIOLIB
        select MVEBU_MBUS
        select PCI
+       select PHYLIB if NETDEVICES
        select PLAT_ORION_LEGACY
        help
          Support for the following Marvell Orion 5x series SoCs:
index 63eabb06f9f17551695e89efc0ed59e0ce6ba186..475811f5383afc40d5e00ab15e03fca6d88d8866 100644 (file)
@@ -935,13 +935,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
        __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
 }
 
+/*
+ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
+ * that the intention is to allow exporting memory allocated via the
+ * coherent DMA APIs through the dma_buf API, which only accepts a
+ * scattertable.  This presents a couple of problems:
+ * 1. Not all memory allocated via the coherent DMA APIs is backed by
+ *    a struct page
+ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
+ *    as we will try to flush the memory through a different alias to that
+ *    actually being used (and the flushes are redundant.)
+ */
 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
                 void *cpu_addr, dma_addr_t handle, size_t size,
                 unsigned long attrs)
 {
-       struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+       unsigned long pfn = dma_to_pfn(dev, handle);
+       struct page *page;
        int ret;
 
+       /* If the PFN is not valid, we do not have a struct page */
+       if (!pfn_valid(pfn))
+               return -ENXIO;
+
+       page = pfn_to_page(pfn);
+
        ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
        if (unlikely(ret))
                return ret;
index 3b5c7aaf9c76c522f8c6cbd7890c5105b3a3e1d4..33a45bd9686012c2547b218133f59a0169fea33d 100644 (file)
@@ -303,7 +303,10 @@ static inline void set_vbar(unsigned long val)
  */
 static inline bool security_extensions_enabled(void)
 {
-       return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+       /* Check CPUID Identification Scheme before ID_PFR1 read */
+       if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
+               return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
+       return 0;
 }
 
 static unsigned long __init setup_vectors_base(void)
index 9255b6d67ba5e3a3b3586639cadc86342e8b4b04..aff6994950ba6db7eb6579a90cc94e5b2bfc7329 100644 (file)
@@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
                    eth_data, &orion_ge11);
 }
 
+#ifdef CONFIG_ARCH_ORION5X
 /*****************************************************************************
  * Ethernet switch
  ****************************************************************************/
@@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
        struct mdio_board_info *bd;
        unsigned int i;
 
+       if (!IS_BUILTIN(CONFIG_PHYLIB))
+               return;
+
        for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
                if (!strcmp(d->port_names[i], "cpu"))
                        break;
@@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d)
 
        mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
 }
+#endif
 
 /*****************************************************************************
  * I2C
index b6dc9d838a9a39b9ff2e22e9ed371a522ead0fa7..ad1f4e6a9e339374219d8a74ccd6bfbc01b41f17 100644 (file)
@@ -266,11 +266,20 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
 #endif
 
        if (p) {
-               if (cur) {
+               if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+                       /*
+                        * Probe hit but conditional execution check failed,
+                        * so just skip the instruction and continue as if
+                        * nothing had happened.
+                        * In this case, we can skip recursing check too.
+                        */
+                       singlestep_skip(p, regs);
+               } else if (cur) {
                        /* Kprobe is pending, so we're recursing. */
                        switch (kcb->kprobe_status) {
                        case KPROBE_HIT_ACTIVE:
                        case KPROBE_HIT_SSDONE:
+                       case KPROBE_HIT_SS:
                                /* A pre- or post-handler probe got us here. */
                                kprobes_inc_nmissed_count(p);
                                save_previous_kprobe(kcb);
@@ -279,11 +288,16 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
                                singlestep(p, regs, kcb);
                                restore_previous_kprobe(kcb);
                                break;
+                       case KPROBE_REENTER:
+                               /* A nested probe was hit in FIQ, it is a BUG */
+                               pr_warn("Unrecoverable kprobe detected at %p.\n",
+                                       p->addr);
+                               /* fall through */
                        default:
                                /* impossible cases */
                                BUG();
                        }
-               } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+               } else {
                        /* Probe hit and conditional execution check ok. */
                        set_current_kprobe(p);
                        kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -304,13 +318,6 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
                                }
                                reset_current_kprobe();
                        }
-               } else {
-                       /*
-                        * Probe hit but conditional execution check failed,
-                        * so just skip the instruction and continue as if
-                        * nothing had happened.
-                        */
-                       singlestep_skip(p, regs);
                }
        } else if (cur) {
                /* We probably hit a jprobe.  Call its break handler. */
@@ -434,6 +441,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
        struct hlist_node *tmp;
        unsigned long flags, orig_ret_address = 0;
        unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+       kprobe_opcode_t *correct_ret_addr = NULL;
 
        INIT_HLIST_HEAD(&empty_rp);
        kretprobe_hash_lock(current, &head, &flags);
@@ -456,14 +464,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
                        /* another task is sharing our hash bucket */
                        continue;
 
+               orig_ret_address = (unsigned long)ri->ret_addr;
+
+               if (orig_ret_address != trampoline_address)
+                       /*
+                        * This is the real return address. Any other
+                        * instances associated with this task are for
+                        * other calls deeper on the call stack
+                        */
+                       break;
+       }
+
+       kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+       correct_ret_addr = ri->ret_addr;
+       hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+               if (ri->task != current)
+                       /* another task is sharing our hash bucket */
+                       continue;
+
+               orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
                        __this_cpu_write(current_kprobe, &ri->rp->kp);
                        get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+                       ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
                        __this_cpu_write(current_kprobe, NULL);
                }
 
-               orig_ret_address = (unsigned long)ri->ret_addr;
                recycle_rp_inst(ri, &empty_rp);
 
                if (orig_ret_address != trampoline_address)
@@ -475,7 +503,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
                        break;
        }
 
-       kretprobe_assert(ri, orig_ret_address, trampoline_address);
        kretprobe_hash_unlock(current, &flags);
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
index c893726aa52d8da16e56d9df56ad1e6ca8685b29..1c98a87786ca768adb622f1720df442a272bfb75 100644 (file)
@@ -977,7 +977,10 @@ static void coverage_end(void)
 void __naked __kprobes_test_case_start(void)
 {
        __asm__ __volatile__ (
-               "stmdb  sp!, {r4-r11}                           \n\t"
+               "mov    r2, sp                                  \n\t"
+               "bic    r3, r2, #7                              \n\t"
+               "mov    sp, r3                                  \n\t"
+               "stmdb  sp!, {r2-r11}                           \n\t"
                "sub    sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
                "bic    r0, lr, #1  @ r0 = inline data          \n\t"
                "mov    r1, sp                                  \n\t"
@@ -997,7 +1000,8 @@ void __naked __kprobes_test_case_end_32(void)
                "movne  pc, r0                                  \n\t"
                "mov    r0, r4                                  \n\t"
                "add    sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
-               "ldmia  sp!, {r4-r11}                           \n\t"
+               "ldmia  sp!, {r2-r11}                           \n\t"
+               "mov    sp, r2                                  \n\t"
                "mov    pc, r0                                  \n\t"
        );
 }
@@ -1013,7 +1017,8 @@ void __naked __kprobes_test_case_end_16(void)
                "bxne   r0                                      \n\t"
                "mov    r0, r4                                  \n\t"
                "add    sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
-               "ldmia  sp!, {r4-r11}                           \n\t"
+               "ldmia  sp!, {r2-r11}                           \n\t"
+               "mov    sp, r2                                  \n\t"
                "bx     r0                                      \n\t"
        );
 }
index 1c64ea2d23f96a4a990f9f6e8cd4b22fd3fd3dd3..0565779e66fafd9755048c2a1c7f8ebc15533eff 100644 (file)
                usbphy: phy@01c19400 {
                        compatible = "allwinner,sun50i-a64-usb-phy";
                        reg = <0x01c19400 0x14>,
+                             <0x01c1a800 0x4>,
                              <0x01c1b800 0x4>;
                        reg-names = "phy_ctrl",
+                                   "pmu0",
                                    "pmu1";
                        clocks = <&ccu CLK_USB_PHY0>,
                                 <&ccu CLK_USB_PHY1>;
index 4bf899fb451baf652cafe6e3393be914af9674ba..1b35b8bddbfb07e778ab5c2bc511a86e04e2a255 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-static const char *fault_name(unsigned int esr);
+struct fault_info {
+       int     (*fn)(unsigned long addr, unsigned int esr,
+                     struct pt_regs *regs);
+       int     sig;
+       int     code;
+       const char *name;
+};
+
+static const struct fault_info fault_info[];
+
+static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
+{
+       return fault_info + (esr & 63);
+}
 
 #ifdef CONFIG_KPROBES
 static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
@@ -197,10 +210,12 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
                            struct pt_regs *regs)
 {
        struct siginfo si;
+       const struct fault_info *inf;
 
        if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
+               inf = esr_to_fault_info(esr);
                pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
-                       tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
+                       tsk->comm, task_pid_nr(tsk), inf->name, sig,
                        addr, esr);
                show_pte(tsk->mm, addr);
                show_regs(regs);
@@ -219,14 +234,16 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
 {
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->active_mm;
+       const struct fault_info *inf;
 
        /*
         * If we are in kernel mode at this point, we have no context to
         * handle this fault with.
         */
-       if (user_mode(regs))
-               __do_user_fault(tsk, addr, esr, SIGSEGV, SEGV_MAPERR, regs);
-       else
+       if (user_mode(regs)) {
+               inf = esr_to_fault_info(esr);
+               __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
+       } else
                __do_kernel_fault(mm, addr, esr, regs);
 }
 
@@ -488,12 +505,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
        return 1;
 }
 
-static const struct fault_info {
-       int     (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
-       int     sig;
-       int     code;
-       const char *name;
-} fault_info[] = {
+static const struct fault_info fault_info[] = {
        { do_bad,               SIGBUS,  0,             "ttbr address size fault"       },
        { do_bad,               SIGBUS,  0,             "level 1 address size fault"    },
        { do_bad,               SIGBUS,  0,             "level 2 address size fault"    },
@@ -560,19 +572,13 @@ static const struct fault_info {
        { do_bad,               SIGBUS,  0,             "unknown 63"                    },
 };
 
-static const char *fault_name(unsigned int esr)
-{
-       const struct fault_info *inf = fault_info + (esr & 63);
-       return inf->name;
-}
-
 /*
  * Dispatch a data abort to the relevant handler.
  */
 asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
                                         struct pt_regs *regs)
 {
-       const struct fault_info *inf = fault_info + (esr & 63);
+       const struct fault_info *inf = esr_to_fault_info(esr);
        struct siginfo info;
 
        if (!inf->fn(addr, esr, regs))
index e25584d723960e73fb8eec8d1a5f48fa57197582..7514a000e361f45e21d51601b1d351cb40c9306e 100644 (file)
@@ -294,10 +294,6 @@ static __init int setup_hugepagesz(char *opt)
                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
        } else if (ps == PUD_SIZE) {
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
-       } else if (ps == (PAGE_SIZE * CONT_PTES)) {
-               hugetlb_add_hstate(CONT_PTE_SHIFT);
-       } else if (ps == (PMD_SIZE * CONT_PMDS)) {
-               hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT);
        } else {
                hugetlb_bad_size();
                pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10);
@@ -306,13 +302,3 @@ static __init int setup_hugepagesz(char *opt)
        return 1;
 }
 __setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef CONFIG_ARM64_64K_PAGES
-static __init int add_default_hugepagesz(void)
-{
-       if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
-               hugetlb_add_hstate(CONT_PTE_SHIFT);
-       return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif
diff --git a/arch/ia64/include/asm/asm-prototypes.h b/arch/ia64/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..a2c1398
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_IA64_ASM_PROTOTYPES_H
+#define _ASM_IA64_ASM_PROTOTYPES_H
+
+#include <asm/cacheflush.h>
+#include <asm/checksum.h>
+#include <asm/esi.h>
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/pal.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+#include <asm/unwind.h>
+#include <asm/xor.h>
+
+extern const char ia64_ivt[];
+
+signed int __divsi3(signed int, unsigned int);
+signed int __modsi3(signed int, unsigned int);
+
+signed long long __divdi3(signed long long, unsigned long long);
+signed long long __moddi3(signed long long, unsigned long long);
+
+unsigned int __udivsi3(unsigned int, unsigned int);
+unsigned int __umodsi3(unsigned int, unsigned int);
+
+unsigned long long __udivdi3(unsigned long long, unsigned long long);
+unsigned long long __umoddi3(unsigned long long, unsigned long long);
+
+#endif /* _ASM_IA64_ASM_PROTOTYPES_H */
index 1f3d3877618fdc934ab20f07695476206fe35e00..0a40b14407b1692c7a684bb2ea689d130df36805 100644 (file)
@@ -24,25 +24,25 @@ AFLAGS___modsi3.o   =            -DMODULO
 AFLAGS___umodsi3.o     = -DUNSIGNED -DMODULO
 
 $(obj)/__divdi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__udivdi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__moddi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__umoddi3.o: $(src)/idiv64.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__divsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__udivsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__modsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
 
 $(obj)/__umodsi3.o: $(src)/idiv32.S FORCE
-       $(call if_changed_dep,as_o_S)
+       $(call if_changed_rule,as_o_S)
index 273e61225c277ae67ba28dfae4ef9123e3ef34a9..07238b39638cd2c933219bb957bff1d80212d8bc 100644 (file)
@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
 
 #define strlen_user(str) strnlen_user(str, 32767)
 
-extern unsigned long __must_check __copy_user_zeroing(void *to,
-                                                     const void __user *from,
-                                                     unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+                                       unsigned long n);
 
 static inline unsigned long
 copy_from_user(void *to, const void __user *from, unsigned long n)
 {
+       unsigned long res = n;
        if (likely(access_ok(VERIFY_READ, from, n)))
-               return __copy_user_zeroing(to, from, n);
-       memset(to, 0, n);
-       return n;
+               res = raw_copy_from_user(to, from, n);
+       if (unlikely(res))
+               memset(to + (n - res), 0, res);
+       return res;
 }
 
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
 #define __copy_from_user_inatomic __copy_from_user
 
 extern unsigned long __must_check __copy_user(void __user *to,
index b3ebfe9c8e886a5c0f8f309b1a390ac286208a86..2792fc621088bcd1c3d7bfe58b8560ec32e6f880 100644 (file)
@@ -29,7 +29,6 @@
                COPY                                             \
                "1:\n"                                           \
                "       .section .fixup,\"ax\"\n"                \
-               "       MOV D1Ar1,#0\n"                          \
                FIXUP                                            \
                "       MOVT    D1Ar1,#HI(1b)\n"                 \
                "       JUMP    D1Ar1,#LO(1b)\n"                 \
                "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
                "22:\n"                                                 \
                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
-               "SUB    %3, %3, #32\n"                                  \
                "23:\n"                                                 \
-               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "SUB    %3, %3, #32\n"                                  \
                "24:\n"                                                 \
+               "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "25:\n"                                                 \
                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "26:\n"                                                 \
                "SUB    %3, %3, #32\n"                                  \
                "DCACHE [%1+#-64], D0Ar6\n"                             \
                "BR     $Lloop"id"\n"                                   \
                                                                        \
                "MOV    RAPF, %1\n"                                     \
-               "25:\n"                                                 \
+               "27:\n"                                                 \
                "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "26:\n"                                                 \
+               "28:\n"                                                 \
                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "29:\n"                                                 \
                "SUB    %3, %3, #32\n"                                  \
-               "27:\n"                                                 \
+               "30:\n"                                                 \
                "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "28:\n"                                                 \
+               "31:\n"                                                 \
                "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "32:\n"                                                 \
                "SUB    %0, %0, #8\n"                                   \
-               "29:\n"                                                 \
+               "33:\n"                                                 \
                "SETL   [%0++], D0.7, D1.7\n"                           \
                "SUB    %3, %3, #32\n"                                  \
                "1:"                                                    \
                "       .long 26b,3b\n"                                 \
                "       .long 27b,3b\n"                                 \
                "       .long 28b,3b\n"                                 \
-               "       .long 29b,4b\n"                                 \
+               "       .long 29b,3b\n"                                 \
+               "       .long 30b,3b\n"                                 \
+               "       .long 31b,3b\n"                                 \
+               "       .long 32b,3b\n"                                 \
+               "       .long 33b,4b\n"                                 \
                "       .previous\n"                                    \
                : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
                : "0" (to), "1" (from), "2" (ret), "3" (n)              \
-               : "D1Ar1", "D0Ar2", "memory")
+               : "D1Ar1", "D0Ar2", "cc", "memory")
 
 /*     rewind 'to' and 'from'  pointers when a fault occurs
  *
 #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
        __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
                "LSR    D0Ar2, D0Ar2, #8\n"                             \
-               "AND    D0Ar2, D0Ar2, #0x7\n"                           \
+               "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
                "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
                "SUB    D0Ar2, D0Ar2, #1\n"                             \
                "MOV    D1Ar1, #4\n"                                    \
                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
                "22:\n"                                                 \
                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
-               "SUB    %3, %3, #16\n"                                  \
                "23:\n"                                                 \
-               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "24:\n"                                                 \
-               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
                "SUB    %3, %3, #16\n"                                  \
-               "25:\n"                                                 \
+               "24:\n"                                                 \
                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "26:\n"                                                 \
+               "25:\n"                                                 \
                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "26:\n"                                                 \
                "SUB    %3, %3, #16\n"                                  \
                "27:\n"                                                 \
                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
                "28:\n"                                                 \
                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "29:\n"                                                 \
+               "SUB    %3, %3, #16\n"                                  \
+               "30:\n"                                                 \
+               "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
+               "31:\n"                                                 \
+               "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "32:\n"                                                 \
                "SUB    %3, %3, #16\n"                                  \
                "DCACHE [%1+#-64], D0Ar6\n"                             \
                "BR     $Lloop"id"\n"                                   \
                                                                        \
                "MOV    RAPF, %1\n"                                     \
-               "29:\n"                                                 \
+               "33:\n"                                                 \
                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "30:\n"                                                 \
+               "34:\n"                                                 \
                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "35:\n"                                                 \
                "SUB    %3, %3, #16\n"                                  \
-               "31:\n"                                                 \
+               "36:\n"                                                 \
                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "32:\n"                                                 \
+               "37:\n"                                                 \
                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "38:\n"                                                 \
                "SUB    %3, %3, #16\n"                                  \
-               "33:\n"                                                 \
+               "39:\n"                                                 \
                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "34:\n"                                                 \
+               "40:\n"                                                 \
                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "41:\n"                                                 \
                "SUB    %3, %3, #16\n"                                  \
-               "35:\n"                                                 \
+               "42:\n"                                                 \
                "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
-               "36:\n"                                                 \
+               "43:\n"                                                 \
                "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
+               "44:\n"                                                 \
                "SUB    %0, %0, #4\n"                                   \
-               "37:\n"                                                 \
+               "45:\n"                                                 \
                "SETD   [%0++], D0.7\n"                                 \
                "SUB    %3, %3, #16\n"                                  \
                "1:"                                                    \
                "       .long 34b,3b\n"                                 \
                "       .long 35b,3b\n"                                 \
                "       .long 36b,3b\n"                                 \
-               "       .long 37b,4b\n"                                 \
+               "       .long 37b,3b\n"                                 \
+               "       .long 38b,3b\n"                                 \
+               "       .long 39b,3b\n"                                 \
+               "       .long 40b,3b\n"                                 \
+               "       .long 41b,3b\n"                                 \
+               "       .long 42b,3b\n"                                 \
+               "       .long 43b,3b\n"                                 \
+               "       .long 44b,3b\n"                                 \
+               "       .long 45b,4b\n"                                 \
                "       .previous\n"                                    \
                : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
                : "0" (to), "1" (from), "2" (ret), "3" (n)              \
-               : "D1Ar1", "D0Ar2", "memory")
+               : "D1Ar1", "D0Ar2", "cc", "memory")
 
 /*     rewind 'to' and 'from'  pointers when a fault occurs
  *
 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
        __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
                "LSR    D0Ar2, D0Ar2, #8\n"                             \
-               "AND    D0Ar2, D0Ar2, #0x7\n"                           \
+               "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
                "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
                "SUB    D0Ar2, D0Ar2, #1\n"                             \
                "MOV    D1Ar1, #4\n"                                    \
@@ -538,23 +561,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
        if ((unsigned long) src & 1) {
                __asm_copy_to_user_1(dst, src, retn);
                n--;
+               if (retn)
+                       return retn + n;
        }
        if ((unsigned long) dst & 1) {
                /* Worst case - byte copy */
                while (n > 0) {
                        __asm_copy_to_user_1(dst, src, retn);
                        n--;
+                       if (retn)
+                               return retn + n;
                }
        }
        if (((unsigned long) src & 2) && n >= 2) {
                __asm_copy_to_user_2(dst, src, retn);
                n -= 2;
+               if (retn)
+                       return retn + n;
        }
        if ((unsigned long) dst & 2) {
                /* Second worst case - word copy */
                while (n >= 2) {
                        __asm_copy_to_user_2(dst, src, retn);
                        n -= 2;
+                       if (retn)
+                               return retn + n;
                }
        }
 
@@ -569,6 +600,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
                while (n >= 8) {
                        __asm_copy_to_user_8x64(dst, src, retn);
                        n -= 8;
+                       if (retn)
+                               return retn + n;
                }
        }
        if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
                while (n >= 8) {
                        __asm_copy_to_user_8x64(dst, src, retn);
                        n -= 8;
+                       if (retn)
+                               return retn + n;
                }
        }
 #endif
@@ -588,11 +623,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
        while (n >= 16) {
                __asm_copy_to_user_16(dst, src, retn);
                n -= 16;
+               if (retn)
+                       return retn + n;
        }
 
        while (n >= 4) {
                __asm_copy_to_user_4(dst, src, retn);
                n -= 4;
+               if (retn)
+                       return retn + n;
        }
 
        switch (n) {
@@ -609,6 +648,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc,
                break;
        }
 
+       /*
+        * If we get here, retn correctly reflects the number of failing
+        * bytes.
+        */
        return retn;
 }
 EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@ EXPORT_SYMBOL(__copy_user);
        __asm_copy_user_cont(to, from, ret,     \
                "       GETB D1Ar1,[%1++]\n"    \
                "2:     SETB [%0++],D1Ar1\n",   \
-               "3:     ADD  %2,%2,#1\n"        \
-               "       SETB [%0++],D1Ar1\n",   \
+               "3:     ADD  %2,%2,#1\n",       \
                "       .long 2b,3b\n")
 
 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       GETW D1Ar1,[%1++]\n"            \
                "2:     SETW [%0++],D1Ar1\n" COPY,      \
-               "3:     ADD  %2,%2,#2\n"                \
-               "       SETW [%0++],D1Ar1\n" FIXUP,     \
+               "3:     ADD  %2,%2,#2\n" FIXUP,         \
                "       .long 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@ EXPORT_SYMBOL(__copy_user);
        __asm_copy_from_user_2x_cont(to, from, ret,     \
                "       GETB D1Ar1,[%1++]\n"            \
                "4:     SETB [%0++],D1Ar1\n",           \
-               "5:     ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
+               "5:     ADD  %2,%2,#1\n",               \
                "       .long 4b,5b\n")
 
 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
        __asm_copy_user_cont(to, from, ret,             \
                "       GETD D1Ar1,[%1++]\n"            \
                "2:     SETD [%0++],D1Ar1\n" COPY,      \
-               "3:     ADD  %2,%2,#4\n"                \
-               "       SETD [%0++],D1Ar1\n" FIXUP,     \
+               "3:     ADD  %2,%2,#4\n" FIXUP,         \
                "       .long 2b,3b\n" TENTRY)
 
 #define __asm_copy_from_user_4(to, from, ret) \
        __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
 
-#define __asm_copy_from_user_5(to, from, ret) \
-       __asm_copy_from_user_4x_cont(to, from, ret,     \
-               "       GETB D1Ar1,[%1++]\n"            \
-               "4:     SETB [%0++],D1Ar1\n",           \
-               "5:     ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
-               "       .long 4b,5b\n")
-
-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
-       __asm_copy_from_user_4x_cont(to, from, ret,     \
-               "       GETW D1Ar1,[%1++]\n"            \
-               "4:     SETW [%0++],D1Ar1\n" COPY,      \
-               "5:     ADD  %2,%2,#2\n"                \
-               "       SETW [%0++],D1Ar1\n" FIXUP,     \
-               "       .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_6(to, from, ret) \
-       __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_7(to, from, ret) \
-       __asm_copy_from_user_6x_cont(to, from, ret,     \
-               "       GETB D1Ar1,[%1++]\n"            \
-               "6:     SETB [%0++],D1Ar1\n",           \
-               "7:     ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
-               "       .long 6b,7b\n")
-
-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
-       __asm_copy_from_user_4x_cont(to, from, ret,     \
-               "       GETD D1Ar1,[%1++]\n"            \
-               "4:     SETD [%0++],D1Ar1\n" COPY,      \
-               "5:     ADD  %2,%2,#4\n"                        \
-               "       SETD [%0++],D1Ar1\n" FIXUP,             \
-               "       .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_8(to, from, ret) \
-       __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_9(to, from, ret) \
-       __asm_copy_from_user_8x_cont(to, from, ret,     \
-               "       GETB D1Ar1,[%1++]\n"            \
-               "6:     SETB [%0++],D1Ar1\n",           \
-               "7:     ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
-               "       .long 6b,7b\n")
-
-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
-       __asm_copy_from_user_8x_cont(to, from, ret,     \
-               "       GETW D1Ar1,[%1++]\n"            \
-               "6:     SETW [%0++],D1Ar1\n" COPY,      \
-               "7:     ADD  %2,%2,#2\n"                \
-               "       SETW [%0++],D1Ar1\n" FIXUP,     \
-               "       .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_10(to, from, ret) \
-       __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_11(to, from, ret)         \
-       __asm_copy_from_user_10x_cont(to, from, ret,    \
-               "       GETB D1Ar1,[%1++]\n"            \
-               "8:     SETB [%0++],D1Ar1\n",           \
-               "9:     ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
-               "       .long 8b,9b\n")
-
-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
-       __asm_copy_from_user_8x_cont(to, from, ret,     \
-               "       GETD D1Ar1,[%1++]\n"            \
-               "6:     SETD [%0++],D1Ar1\n" COPY,      \
-               "7:     ADD  %2,%2,#4\n"                \
-               "       SETD [%0++],D1Ar1\n" FIXUP,     \
-               "       .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_12(to, from, ret) \
-       __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_13(to, from, ret) \
-       __asm_copy_from_user_12x_cont(to, from, ret,    \
-               "       GETB D1Ar1,[%1++]\n"            \
-               "8:     SETB [%0++],D1Ar1\n",           \
-               "9:     ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
-               "       .long 8b,9b\n")
-
-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
-       __asm_copy_from_user_12x_cont(to, from, ret,    \
-               "       GETW D1Ar1,[%1++]\n"            \
-               "8:     SETW [%0++],D1Ar1\n" COPY,      \
-               "9:     ADD  %2,%2,#2\n"                \
-               "       SETW [%0++],D1Ar1\n" FIXUP,     \
-               "       .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_14(to, from, ret) \
-       __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_15(to, from, ret) \
-       __asm_copy_from_user_14x_cont(to, from, ret,    \
-               "       GETB D1Ar1,[%1++]\n"            \
-               "10:    SETB [%0++],D1Ar1\n",           \
-               "11:    ADD  %2,%2,#1\n"                \
-               "       SETB [%0++],D1Ar1\n",           \
-               "       .long 10b,11b\n")
-
-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
-       __asm_copy_from_user_12x_cont(to, from, ret,    \
-               "       GETD D1Ar1,[%1++]\n"            \
-               "8:     SETD [%0++],D1Ar1\n" COPY,      \
-               "9:     ADD  %2,%2,#4\n"                \
-               "       SETD [%0++],D1Ar1\n" FIXUP,     \
-               "       .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_16(to, from, ret) \
-       __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
-
 #define __asm_copy_from_user_8x64(to, from, ret) \
        asm volatile (                          \
                "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
                "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
                "1:\n"                                  \
                "       .section .fixup,\"ax\"\n"       \
-               "       MOV D1Ar1,#0\n"                 \
-               "       MOV D0Ar2,#0\n"                 \
                "3:     ADD  %2,%2,#8\n"                \
-               "       SETL [%0++],D0Ar2,D1Ar1\n"      \
                "       MOVT    D0Ar2,#HI(1b)\n"        \
                "       JUMP    D0Ar2,#LO(1b)\n"        \
                "       .previous\n"                    \
@@ -789,36 +711,57 @@ EXPORT_SYMBOL(__copy_user);
  *
  *     Rationale:
  *             A fault occurs while reading from user buffer, which is the
- *             source. Since the fault is at a single address, we only
- *             need to rewind by 8 bytes.
+ *             source.
  *             Since we don't write to kernel buffer until we read first,
  *             the kernel buffer is at the right state and needn't be
- *             corrected.
+ *             corrected, but the source must be rewound to the beginning of
+ *             the block, which is LSM_STEP*8 bytes.
+ *             LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ *             and stored in D0Ar2
+ *
+ *             NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ *                     LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ *                     a fault happens at the 4th write, LSM_STEP will be 0
+ *                     instead of 4. The code copes with that.
  */
 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)     \
        __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
-               "SUB    %1, %1, #8\n")
+               "LSR    D0Ar2, D0Ar2, #5\n"                             \
+               "ANDS   D0Ar2, D0Ar2, #0x38\n"                          \
+               "ADDZ   D0Ar2, D0Ar2, #32\n"                            \
+               "SUB    %1, %1, D0Ar2\n")
 
 /*     rewind 'from' pointer when a fault occurs
  *
  *     Rationale:
  *             A fault occurs while reading from user buffer, which is the
- *             source. Since the fault is at a single address, we only
- *             need to rewind by 4 bytes.
+ *             source.
  *             Since we don't write to kernel buffer until we read first,
  *             the kernel buffer is at the right state and needn't be
- *             corrected.
+ *             corrected, but the source must be rewound to the beginning of
+ *             the block, which is LSM_STEP*4 bytes.
+ *             LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ *             and stored in D0Ar2
+ *
+ *             NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ *                     LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ *                     a fault happens at the 4th write, LSM_STEP will be 0
+ *                     instead of 4. The code copes with that.
  */
 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)     \
        __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
-               "SUB    %1, %1, #4\n")
+               "LSR    D0Ar2, D0Ar2, #6\n"                             \
+               "ANDS   D0Ar2, D0Ar2, #0x1c\n"                          \
+               "ADDZ   D0Ar2, D0Ar2, #16\n"                            \
+               "SUB    %1, %1, D0Ar2\n")
 
 
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
-   userland.  The return-value is the number of bytes that were
-   inaccessible.  */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
-                                 unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+                                unsigned long n)
 {
        register char *dst asm ("A0.2") = pdst;
        register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
        if ((unsigned long) src & 1) {
                __asm_copy_from_user_1(dst, src, retn);
                n--;
+               if (retn)
+                       return retn + n;
        }
        if ((unsigned long) dst & 1) {
                /* Worst case - byte copy */
@@ -837,12 +782,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_1(dst, src, retn);
                        n--;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
        if (((unsigned long) src & 2) && n >= 2) {
                __asm_copy_from_user_2(dst, src, retn);
                n -= 2;
+               if (retn)
+                       return retn + n;
        }
        if ((unsigned long) dst & 2) {
                /* Second worst case - word copy */
@@ -850,16 +797,10 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_2(dst, src, retn);
                        n -= 2;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
 
-       /* We only need one check after the unalignment-adjustments,
-          because if both adjustments were done, either both or
-          neither reference had an exception.  */
-       if (retn != 0)
-               goto copy_exception_bytes;
-
 #ifdef USE_RAPF
        /* 64 bit copy loop */
        if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_8x64(dst, src, retn);
                        n -= 8;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
 
@@ -888,7 +829,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                        __asm_copy_from_user_8x64(dst, src, retn);
                        n -= 8;
                        if (retn)
-                               goto copy_exception_bytes;
+                               return retn + n;
                }
        }
 #endif
@@ -898,7 +839,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
                n -= 4;
 
                if (retn)
-                       goto copy_exception_bytes;
+                       return retn + n;
        }
 
        /* If we get here, there were no memory read faults.  */
@@ -924,21 +865,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
        /* If we get here, retn correctly reflects the number of failing
           bytes.  */
        return retn;
-
- copy_exception_bytes:
-       /* We already have "retn" bytes cleared, and need to clear the
-          remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
-          memset is preferred here, since this isn't speed-critical code and
-          we'd rather have this a leaf-function than calling memset.  */
-       {
-               char *endp;
-               for (endp = dst + n; dst < endp; dst++)
-                       *dst = 0;
-       }
-
-       return retn + n;
 }
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
 
 #define __asm_clear_8x64(to, ret) \
        asm volatile (                                  \
index a008a9f03072deb900409ad2ef93a6bd65cdb48e..e0bb576410bbdf3bd85cea7a2bb6aae219656c56 100644 (file)
@@ -1531,7 +1531,7 @@ config CPU_MIPS64_R6
        select CPU_SUPPORTS_HIGHMEM
        select CPU_SUPPORTS_MSA
        select GENERIC_CSUM
-       select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+       select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
        select HAVE_KVM
        help
          Choose this option to build a kernel for release 6 or later of the
index f94455f964ec00b1e1b17120349b1543ed9dec97..a2813fe381cf5442ca5a4fb2a3f22d619d7ef557 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/cpu-features.h>
 #include <asm/fpu_emulator.h>
 #include <asm/hazards.h>
+#include <asm/ptrace.h>
 #include <asm/processor.h>
 #include <asm/current.h>
 #include <asm/msa.h>
index 956db6e201d1877ca6f6b3f8d48fc9ecd6799396..ddd1c918103bcc347c6a350d28d213ba314a90d9 100644 (file)
 #include <irq.h>
 
 #define IRQ_STACK_SIZE                 THREAD_SIZE
+#define IRQ_STACK_START                        (IRQ_STACK_SIZE - sizeof(unsigned long))
 
 extern void *irq_stack[NR_CPUS];
 
+/*
+ * The highest address on the IRQ stack contains a dummy frame put down in
+ * genex.S (handle_int & except_vec_vi_handler) which is structured as follows:
+ *
+ *   top ------------
+ *       | task sp  | <- irq_stack[cpu] + IRQ_STACK_START
+ *       ------------
+ *       |          | <- First frame of IRQ context
+ *       ------------
+ *
+ * task sp holds a copy of the task stack pointer where the struct pt_regs
+ * from exception entry can be found.
+ */
+
 static inline bool on_irq_stack(int cpu, unsigned long sp)
 {
        unsigned long low = (unsigned long)irq_stack[cpu];
index f485afe51514765710eaf990b674e92378effe61..a8df44d60607baf6df350fdaf7ba6eaeefd2ff56 100644 (file)
@@ -127,7 +127,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "       andi    %[ticket], %[ticket], 0xffff            \n"
                "       bne     %[ticket], %[my_ticket], 4f             \n"
                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
-               "2:                                                     \n"
+               "2:     .insn                                           \n"
                "       .subsection 2                                   \n"
                "4:     andi    %[ticket], %[ticket], 0xffff            \n"
                "       sll     %[ticket], 5                            \n"
@@ -202,7 +202,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "       sc      %[ticket], %[ticket_ptr]                \n"
                "       beqz    %[ticket], 1b                           \n"
                "        li     %[ticket], 1                            \n"
-               "2:                                                     \n"
+               "2:     .insn                                           \n"
                "       .subsection 2                                   \n"
                "3:     b       2b                                      \n"
                "        li     %[ticket], 0                            \n"
@@ -382,7 +382,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                "       .set    reorder                                 \n"
                __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
-               "2:                                                     \n"
+               "2:     .insn                                           \n"
                : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
                : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
@@ -422,7 +422,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                        "       lui     %1, 0x8000                      \n"
                        "       sc      %1, %0                          \n"
                        "       li      %2, 1                           \n"
-                       "2:                                             \n"
+                       "2:     .insn                                   \n"
                        : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
                          "=&r" (ret)
                        : GCC_OFF_SMALL_ASM() (rw->lock)
index 3e940dbe02629ad6916f1870d2bf5a57086e89b8..78faf4292e907c175e0ff70e961a1c768f6df7c0 100644 (file)
 #define __NR_pkey_mprotect             (__NR_Linux + 363)
 #define __NR_pkey_alloc                        (__NR_Linux + 364)
 #define __NR_pkey_free                 (__NR_Linux + 365)
+#define __NR_statx                     (__NR_Linux + 366)
 
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            365
+#define __NR_Linux_syscalls            366
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                365
+#define __NR_O32_Linux_syscalls                366
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_pkey_mprotect             (__NR_Linux + 323)
 #define __NR_pkey_alloc                        (__NR_Linux + 324)
 #define __NR_pkey_free                 (__NR_Linux + 325)
+#define __NR_statx                     (__NR_Linux + 326)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            325
+#define __NR_Linux_syscalls            326
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         325
+#define __NR_64_Linux_syscalls         326
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_pkey_mprotect             (__NR_Linux + 327)
 #define __NR_pkey_alloc                        (__NR_Linux + 328)
 #define __NR_pkey_free                 (__NR_Linux + 329)
+#define __NR_statx                     (__NR_Linux + 330)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            329
+#define __NR_Linux_syscalls            330
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                329
+#define __NR_N32_Linux_syscalls                330
 
 #endif /* _UAPI_ASM_UNISTD_H */
index bb5c5d34ba8152459eb4f9bfeace3e04315b7528..a670c0c11875d28e6b18d163066934aa27259139 100644 (file)
@@ -102,6 +102,7 @@ void output_thread_info_defines(void)
        DEFINE(_THREAD_SIZE, THREAD_SIZE);
        DEFINE(_THREAD_MASK, THREAD_MASK);
        DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+       DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
        BLANK();
 }
 
index 59476a607adda07c5be321b8f21653668c1555b8..a00e87b0256d3d2a031ac1300239804edfcc8604 100644 (file)
@@ -361,7 +361,7 @@ LEAF(mips_cps_get_bootcfg)
        END(mips_cps_get_bootcfg)
 
 LEAF(mips_cps_boot_vpes)
-       PTR_L   ta2, COREBOOTCFG_VPEMASK(a0)
+       lw      ta2, COREBOOTCFG_VPEMASK(a0)
        PTR_L   ta3, COREBOOTCFG_VPECONFIG(a0)
 
 #if defined(CONFIG_CPU_MIPSR6)
index 07718bb5fc9d8612d98cc78d46a7ca10851f8245..12422fd4af2335dab2f90f1b0b47684a5b08e377 100644 (file)
@@ -1824,7 +1824,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
                }
 
                decode_configs(c);
-               c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+               c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
                c->writecombine = _CACHE_UNCACHED_ACCELERATED;
                break;
        default:
index 7ec9612cb0078a4c6ca9825f2e5f2a80e05d3508..ae810da4d499e6282638d8edadb722078b5f04cb 100644 (file)
@@ -215,9 +215,11 @@ NESTED(handle_int, PT_SIZE, sp)
        beq     t0, t1, 2f
 
        /* Switch to IRQ stack */
-       li      t1, _IRQ_STACK_SIZE
+       li      t1, _IRQ_STACK_START
        PTR_ADD sp, t0, t1
 
+       /* Save task's sp on IRQ stack so that unwinding can follow it */
+       LONG_S  s1, 0(sp)
 2:
        jal     plat_irq_dispatch
 
@@ -325,9 +327,11 @@ NESTED(except_vec_vi_handler, 0, sp)
        beq     t0, t1, 2f
 
        /* Switch to IRQ stack */
-       li      t1, _IRQ_STACK_SIZE
+       li      t1, _IRQ_STACK_START
        PTR_ADD sp, t0, t1
 
+       /* Save task's sp on IRQ stack so that unwinding can follow it */
+       LONG_S  s1, 0(sp)
 2:
        jalr    v0
 
@@ -519,7 +523,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        BUILD_HANDLER reserved reserved sti verbose     /* others */
 
        .align  5
-       LEAF(handle_ri_rdhwr_vivt)
+       LEAF(handle_ri_rdhwr_tlbp)
        .set    push
        .set    noat
        .set    noreorder
@@ -538,7 +542,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
        .set    pop
        bltz    k1, handle_ri   /* slow path */
        /* fall thru */
-       END(handle_ri_rdhwr_vivt)
+       END(handle_ri_rdhwr_tlbp)
 
        LEAF(handle_ri_rdhwr)
        .set    push
index fb6b6b650719adf6943c9e8ed6524d7ccfbd5983..b68e10fc453d113dbdc67f11f2a7e59cb582d5a1 100644 (file)
@@ -488,31 +488,52 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
                                              unsigned long pc,
                                              unsigned long *ra)
 {
+       unsigned long low, high, irq_stack_high;
        struct mips_frame_info info;
        unsigned long size, ofs;
+       struct pt_regs *regs;
        int leaf;
-       extern void ret_from_irq(void);
-       extern void ret_from_exception(void);
 
        if (!stack_page)
                return 0;
 
        /*
-        * If we reached the bottom of interrupt context,
-        * return saved pc in pt_regs.
+        * IRQ stacks start at IRQ_STACK_START
+        * task stacks at THREAD_SIZE - 32
         */
-       if (pc == (unsigned long)ret_from_irq ||
-           pc == (unsigned long)ret_from_exception) {
-               struct pt_regs *regs;
-               if (*sp >= stack_page &&
-                   *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
-                       regs = (struct pt_regs *)*sp;
-                       pc = regs->cp0_epc;
-                       if (!user_mode(regs) && __kernel_text_address(pc)) {
-                               *sp = regs->regs[29];
-                               *ra = regs->regs[31];
-                               return pc;
-                       }
+       low = stack_page;
+       if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
+               high = stack_page + IRQ_STACK_START;
+               irq_stack_high = high;
+       } else {
+               high = stack_page + THREAD_SIZE - 32;
+               irq_stack_high = 0;
+       }
+
+       /*
+        * If we reached the top of the interrupt stack, start unwinding
+        * the interrupted task stack.
+        */
+       if (unlikely(*sp == irq_stack_high)) {
+               unsigned long task_sp = *(unsigned long *)*sp;
+
+               /*
+                * Check that the pointer saved in the IRQ stack head points to
+                * something within the stack of the current task
+                */
+               if (!object_is_on_stack((void *)task_sp))
+                       return 0;
+
+               /*
+                * Follow pointer to tasks kernel stack frame where interrupted
+                * state was saved.
+                */
+               regs = (struct pt_regs *)task_sp;
+               pc = regs->cp0_epc;
+               if (!user_mode(regs) && __kernel_text_address(pc)) {
+                       *sp = regs->regs[29];
+                       *ra = regs->regs[31];
+                       return pc;
                }
                return 0;
        }
@@ -533,8 +554,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
        if (leaf < 0)
                return 0;
 
-       if (*sp < stack_page ||
-           *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
+       if (*sp < low || *sp + info.frame_size > high)
                return 0;
 
        if (leaf)
index c29d397eee86cf48a05d7945efe3cdf4a38a7273..80ed68b2c95e4161ffe6f8f6e6fd0fd5855efb73 100644 (file)
@@ -600,3 +600,4 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_mprotect
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
+       PTR     sys_statx
index 0687f96ee912698285a92abde87a7376897f076b..49765b44aa9b3bfaf923710d1c95ea0390d6b0f5 100644 (file)
@@ -438,4 +438,5 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_mprotect
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 5325 */
+       PTR     sys_statx
        .size   sys_call_table,.-sys_call_table
index 0331ba39a065b8530818093d7b707921242a3672..90bad2d1b2d3e2f62afa505f14836d510a5c077b 100644 (file)
@@ -433,4 +433,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_pkey_mprotect
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free
+       PTR     sys_statx                       /* 6330 */
        .size   sysn32_call_table,.-sysn32_call_table
index 5a47042dd25f7ae7f93cee8a596f311bf17a9382..2dd70bd104e1a0ff816f87a6d1ded9f852a95de6 100644 (file)
@@ -588,4 +588,5 @@ EXPORT(sys32_call_table)
        PTR     sys_pkey_mprotect
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
+       PTR     sys_statx
        .size   sys32_call_table,.-sys32_call_table
index c7d17cfb32f67877cfd62d10c2e88181dcb74014..b49e7bf9f95023fe5837b7d7ed317693ef8ac0a2 100644 (file)
@@ -83,7 +83,7 @@ extern asmlinkage void handle_dbe(void);
 extern asmlinkage void handle_sys(void);
 extern asmlinkage void handle_bp(void);
 extern asmlinkage void handle_ri(void);
-extern asmlinkage void handle_ri_rdhwr_vivt(void);
+extern asmlinkage void handle_ri_rdhwr_tlbp(void);
 extern asmlinkage void handle_ri_rdhwr(void);
 extern asmlinkage void handle_cpu(void);
 extern asmlinkage void handle_ov(void);
@@ -2408,9 +2408,18 @@ void __init trap_init(void)
 
        set_except_vector(EXCCODE_SYS, handle_sys);
        set_except_vector(EXCCODE_BP, handle_bp);
-       set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
-                         (cpu_has_vtag_icache ?
-                          handle_ri_rdhwr_vivt : handle_ri_rdhwr));
+
+       if (rdhwr_noopt)
+               set_except_vector(EXCCODE_RI, handle_ri);
+       else {
+               if (cpu_has_vtag_icache)
+                       set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+               else if (current_cpu_type() == CPU_LOONGSON3)
+                       set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+               else
+                       set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
+       }
+
        set_except_vector(EXCCODE_CPU, handle_cpu);
        set_except_vector(EXCCODE_OV, handle_ov);
        set_except_vector(EXCCODE_TR, handle_tr);
index 3c3aa05891dd78dd336ca80524870e21df6380ff..95bec460b651fd1cdad1b1e262463408742c7795 100644 (file)
@@ -467,7 +467,7 @@ void __init ltq_soc_init(void)
 
                if (!np_xbar)
                        panic("Failed to load xbar nodes from devicetree");
-               if (of_address_to_resource(np_pmu, 0, &res_xbar))
+               if (of_address_to_resource(np_xbar, 0, &res_xbar))
                        panic("Failed to get xbar resources");
                if (!request_mem_region(res_xbar.start, resource_size(&res_xbar),
                        res_xbar.name))
index e7f798d55fbcca06eaa0fe07a6822c3aedd18cc8..3fe99cb271a9cad44c55dfbf841b9de0e9093974 100644 (file)
@@ -1562,6 +1562,7 @@ static void probe_vcache(void)
        vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
 
        c->vcache.waybit = 0;
+       c->vcache.waysize = vcache_size / c->vcache.ways;
 
        pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
                vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
@@ -1664,6 +1665,7 @@ static void __init loongson3_sc_init(void)
        /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
        scache_size *= 4;
        c->scache.waybit = 0;
+       c->scache.waysize = scache_size / c->scache.ways;
        pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
               scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
        if (scache_size)
index 9bfee8988eaf11a8618ca4f6ce101a8b70ad2823..4f642e07c2b198b3c9e46e3bc9f64bbe2bcb6606 100644 (file)
@@ -760,7 +760,8 @@ static void build_huge_update_entries(u32 **p, unsigned int pte,
 static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
                                    struct uasm_label **l,
                                    unsigned int pte,
-                                   unsigned int ptr)
+                                   unsigned int ptr,
+                                   unsigned int flush)
 {
 #ifdef CONFIG_SMP
        UASM_i_SC(p, pte, 0, ptr);
@@ -769,6 +770,22 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
 #else
        UASM_i_SW(p, pte, 0, ptr);
 #endif
+       if (cpu_has_ftlb && flush) {
+               BUG_ON(!cpu_has_tlbinv);
+
+               UASM_i_MFC0(p, ptr, C0_ENTRYHI);
+               uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+               UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+               build_tlb_write_entry(p, l, r, tlb_indexed);
+
+               uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+               UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+               build_huge_update_entries(p, pte, ptr);
+               build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
+
+               return;
+       }
+
        build_huge_update_entries(p, pte, ptr);
        build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
 }
@@ -2199,7 +2216,7 @@ static void build_r4000_tlb_load_handler(void)
                uasm_l_tlbl_goaround2(&l, p);
        }
        uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
-       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
 #endif
 
        uasm_l_nopage_tlbl(&l, p);
@@ -2254,7 +2271,7 @@ static void build_r4000_tlb_store_handler(void)
        build_tlb_probe_entry(&p);
        uasm_i_ori(&p, wr.r1, wr.r1,
                   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
-       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
 #endif
 
        uasm_l_nopage_tlbs(&l, p);
@@ -2310,7 +2327,7 @@ static void build_r4000_tlb_modify_handler(void)
        build_tlb_probe_entry(&p);
        uasm_i_ori(&p, wr.r1, wr.r1,
                   _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
-       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+       build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
 #endif
 
        uasm_l_nopage_tlbm(&l, p);
index c4ffd43d3996ac26af1afa42c9c8a876f7fc564c..48ce701557a451826fba7c7116420b731b7874e6 100644 (file)
@@ -35,7 +35,7 @@ static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
 static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
 static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
 static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
 static struct rt2880_pmx_func pci_func[] = {
        FUNC("pci-dev", 0, 40, 32),
        FUNC("pci-host2", 1, 40, 32),
@@ -43,7 +43,7 @@ static struct rt2880_pmx_func pci_func[] = {
        FUNC("pci-fnc", 3, 40, 32)
 };
 static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
 
 static struct rt2880_pmx_group rt3883_pinmux_data[] = {
        GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
index 367c5426157ba14dfe8799664c3f11dd6eb9c8a2..3901b80d442021e17e96e29f33cea2b1c28ec345 100644 (file)
@@ -48,6 +48,13 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
        return alloc_bootmem_align(size, align);
 }
 
+int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+                                            bool nomap)
+{
+       reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+       return 0;
+}
+
 void __init early_init_devtree(void *params)
 {
        __be32 *dtb = (u32 *)__dtb_start;
index 6e57ffa5db2769babe8c285f1e88e16fe13ed998..6044d9be28b4493323d362162e7e5ec6c56e1f33 100644 (file)
@@ -201,6 +201,9 @@ void __init setup_arch(char **cmdline_p)
        }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+       early_init_fdt_reserve_self();
+       early_init_fdt_scan_reserved_mem();
+
        unflatten_and_copy_device_tree();
 
        setup_cpuinfo();
index 8442727f28d2732eecb583d46bdcc88258a590dd..cbd4f4af8108bc8fa9ec36504589eac7107e6b1c 100644 (file)
 #define get_user __get_user
 
 #if !defined(CONFIG_64BIT)
-#define LDD_USER(ptr)          __get_user_asm64(ptr)
+#define LDD_USER(val, ptr)     __get_user_asm64(val, ptr)
 #define STD_USER(x, ptr)       __put_user_asm64(x, ptr)
 #else
-#define LDD_USER(ptr)          __get_user_asm("ldd", ptr)
+#define LDD_USER(val, ptr)     __get_user_asm(val, "ldd", ptr)
 #define STD_USER(x, ptr)       __put_user_asm("std", x, ptr)
 #endif
 
@@ -97,63 +97,87 @@ struct exception_data {
                " mtsp %0,%%sr2\n\t"            \
                : : "r"(get_fs()) : )
 
-#define __get_user(x, ptr)                               \
-({                                                       \
-       register long __gu_err __asm__ ("r8") = 0;       \
-       register long __gu_val;                          \
-                                                        \
-       load_sr2();                                      \
-       switch (sizeof(*(ptr))) {                        \
-           case 1: __get_user_asm("ldb", ptr); break;   \
-           case 2: __get_user_asm("ldh", ptr); break;   \
-           case 4: __get_user_asm("ldw", ptr); break;   \
-           case 8: LDD_USER(ptr);  break;               \
-           default: BUILD_BUG(); break;                 \
-       }                                                \
-                                                        \
-       (x) = (__force __typeof__(*(ptr))) __gu_val;     \
-       __gu_err;                                        \
+#define __get_user_internal(val, ptr)                  \
+({                                                     \
+       register long __gu_err __asm__ ("r8") = 0;      \
+                                                       \
+       switch (sizeof(*(ptr))) {                       \
+       case 1: __get_user_asm(val, "ldb", ptr); break; \
+       case 2: __get_user_asm(val, "ldh", ptr); break; \
+       case 4: __get_user_asm(val, "ldw", ptr); break; \
+       case 8: LDD_USER(val, ptr); break;              \
+       default: BUILD_BUG();                           \
+       }                                               \
+                                                       \
+       __gu_err;                                       \
 })
 
-#define __get_user_asm(ldx, ptr)                        \
+#define __get_user(val, ptr)                           \
+({                                                     \
+       load_sr2();                                     \
+       __get_user_internal(val, ptr);                  \
+})
+
+#define __get_user_asm(val, ldx, ptr)                  \
+{                                                      \
+       register long __gu_val;                         \
+                                                       \
        __asm__("1: " ldx " 0(%%sr2,%2),%0\n"           \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = (__force __typeof__(*(ptr))) __gu_val;  \
+}
 
 #if !defined(CONFIG_64BIT)
 
-#define __get_user_asm64(ptr)                          \
+#define __get_user_asm64(val, ptr)                     \
+{                                                      \
+       union {                                         \
+               unsigned long long      l;              \
+               __typeof__(*(ptr))      t;              \
+       } __gu_tmp;                                     \
+                                                       \
        __asm__("   copy %%r0,%R0\n"                    \
                "1: ldw 0(%%sr2,%2),%0\n"               \
                "2: ldw 4(%%sr2,%2),%R0\n"              \
                "9:\n"                                  \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
                ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
-               : "=r"(__gu_val), "=r"(__gu_err)        \
-               : "r"(ptr), "1"(__gu_err));
+               : "=&r"(__gu_tmp.l), "=r"(__gu_err)     \
+               : "r"(ptr), "1"(__gu_err));             \
+                                                       \
+       (val) = __gu_tmp.t;                             \
+}
 
 #endif /* !defined(CONFIG_64BIT) */
 
 
-#define __put_user(x, ptr)                                      \
+#define __put_user_internal(x, ptr)                            \
 ({                                                             \
        register long __pu_err __asm__ ("r8") = 0;              \
         __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);      \
                                                                \
-       load_sr2();                                             \
        switch (sizeof(*(ptr))) {                               \
-           case 1: __put_user_asm("stb", __x, ptr); break;     \
-           case 2: __put_user_asm("sth", __x, ptr); break;     \
-           case 4: __put_user_asm("stw", __x, ptr); break;     \
-           case 8: STD_USER(__x, ptr); break;                  \
-           default: BUILD_BUG(); break;                        \
-       }                                                       \
+       case 1: __put_user_asm("stb", __x, ptr); break;         \
+       case 2: __put_user_asm("sth", __x, ptr); break;         \
+       case 4: __put_user_asm("stw", __x, ptr); break;         \
+       case 8: STD_USER(__x, ptr); break;                      \
+       default: BUILD_BUG();                                   \
+       }                                                       \
                                                                \
        __pu_err;                                               \
 })
 
+#define __put_user(x, ptr)                                     \
+({                                                             \
+       load_sr2();                                             \
+       __put_user_internal(x, ptr);                            \
+})
+
+
 /*
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
index f01188c044ee83e41ba52162544464781056f262..85c28bb80fb7433dfcfac2fb10f6cc121448119f 100644 (file)
@@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
        add     dst,len,end
 
        /* short copy with less than 16 bytes? */
-       cmpib,>>=,n 15,len,.Lbyte_loop
+       cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
        /* same alignment? */
        xor     src,dst,t0
@@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
        /* loop until we are 64-bit aligned */
 .Lalign_loop64:
        extru   dst,31,3,t1
-       cmpib,=,n       0,t1,.Lcopy_loop_16
+       cmpib,=,n       0,t1,.Lcopy_loop_16_start
 20:    ldb,ma  1(srcspc,src),t1
 21:    stb,ma  t1,1(dstspc,dst)
        b       .Lalign_loop64
@@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
        ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
+.Lcopy_loop_16_start:
        ldi     31,t0
 .Lcopy_loop_16:
        cmpb,COND(>>=),n t0,len,.Lword_loop
@@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
        /* loop until we are 32-bit aligned */
 .Lalign_loop32:
        extru   dst,31,2,t1
-       cmpib,=,n       0,t1,.Lcopy_loop_4
+       cmpib,=,n       0,t1,.Lcopy_loop_8
 20:    ldb,ma  1(srcspc,src),t1
 21:    stb,ma  t1,1(dstspc,dst)
        b       .Lalign_loop32
@@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
 
-.Lcopy_loop_4:
+.Lcopy_loop_8:
        cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
 10:    ldw     0(srcspc,src),t1
@@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
        ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
 
-       b       .Lcopy_loop_4
+       b       .Lcopy_loop_8
        ldo     -16(len),len
 
 .Lbyte_loop:
@@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
 .Lunaligned_copy:
        /* align until dst is 32bit-word-aligned */
        extru   dst,31,2,t1
-       cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
+       cmpib,=,n       0,t1,.Lcopy_dstaligned
 20:    ldb     0(srcspc,src),t1
        ldo     1(src),src
 21:    stb,ma  t1,1(dstspc,dst)
@@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
        cmpiclr,<> 1,t0,%r0
        b,n .Lcase1
 .Lcase0:
-       cmpb,= %r0,len,.Lcda_finish
+       cmpb,COND(=) %r0,len,.Lcda_finish
        nop
 
 1:     ldw,ma 4(srcspc,src), a3
@@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
 1:     ldw,ma 4(srcspc,src), a3
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
        ldo -1(len),len
-       cmpb,=,n %r0,len,.Ldo0
+       cmpb,COND(=),n %r0,len,.Ldo0
 .Ldo4:
 1:     ldw,ma 4(srcspc,src), a0
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
@@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
 1:     stw,ma t0, 4(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
        ldo -4(len),len
-       cmpb,<> %r0,len,.Ldo4
+       cmpb,COND(<>) %r0,len,.Ldo4
        nop
 .Ldo0:
        shrpw a2, a3, %sar, t0
@@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
        /* fault exception fixup handlers: */
 #ifdef CONFIG_64BIT
 .Lcopy16_fault:
-10:    b       .Lcopy_done
-       std,ma  t1,8(dstspc,dst)
+       b       .Lcopy_done
+10:    std,ma  t1,8(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 #endif
 
 .Lcopy8_fault:
-10:    b       .Lcopy_done
-       stw,ma  t1,4(dstspc,dst)
+       b       .Lcopy_done
+10:    stw,ma  t1,4(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 
        .exit
index 411994551afc138b0b733fdda2b6398140f7127c..f058e0c3e4d4b4d1e22b84973ad8cc054958a3c2 100644 (file)
@@ -33,10 +33,13 @@ static u32 crc32c_vpmsum(u32 crc, unsigned char const *p, size_t len)
        }
 
        if (len & ~VMX_ALIGN_MASK) {
+               preempt_disable();
                pagefault_disable();
                enable_kernel_altivec();
                crc = __crc32c_vpmsum(crc, p, len & ~VMX_ALIGN_MASK);
+               disable_kernel_altivec();
                pagefault_enable();
+               preempt_enable();
        }
 
        tail = len & VMX_ALIGN_MASK;
index cbc7c42cdb7494be1fc10d67fcb49381f3b40d05..ec7a8b099dd960b43896ad2cd50b1e5f87f420f7 100644 (file)
@@ -807,14 +807,25 @@ int fix_alignment(struct pt_regs *regs)
        nb = aligninfo[instr].len;
        flags = aligninfo[instr].flags;
 
-       /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
-       if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
-               nb = 8;
-               flags = LD+SW;
-       } else if (IS_XFORM(instruction) &&
-                  ((instruction >> 1) & 0x3ff) == 660) {
-               nb = 8;
-               flags = ST+SW;
+       /*
+        * Handle some cases which give overlaps in the DSISR values.
+        */
+       if (IS_XFORM(instruction)) {
+               switch (get_xop(instruction)) {
+               case 532:       /* ldbrx */
+                       nb = 8;
+                       flags = LD+SW;
+                       break;
+               case 660:       /* stdbrx */
+                       nb = 8;
+                       flags = ST+SW;
+                       break;
+               case 20:        /* lwarx */
+               case 84:        /* ldarx */
+               case 116:       /* lharx */
+               case 276:       /* lqarx */
+                       return 0;       /* not emulated ever */
+               }
        }
 
        /* Byteswap little endian loads and stores */
index ae179cb1bb3c02a21b19e2793ea71697c658c7cf..c119044cad0d58e94bb0d4a7557ed27da8f7a390 100644 (file)
@@ -67,7 +67,7 @@ PPC64_CACHES:
  *   flush all bytes from start through stop-1 inclusive
  */
 
-_GLOBAL(flush_icache_range)
+_GLOBAL_TOC(flush_icache_range)
 BEGIN_FTR_SECTION
        PURGE_PREFETCHED_INS
        blr
@@ -120,7 +120,7 @@ EXPORT_SYMBOL(flush_icache_range)
  *
  *    flush all bytes from start to stop-1 inclusive
  */
-_GLOBAL(flush_dcache_range)
+_GLOBAL_TOC(flush_dcache_range)
 
 /*
  * Flush the data cache to memory 
index 9cfaa8b69b5f32eb64d7adcd8504ccdb6cd87b32..f997154dfc41bb9dc12d514890d6c425f197048b 100644 (file)
@@ -236,6 +236,15 @@ static void cpu_ready_for_interrupts(void)
                mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
        }
 
+       /*
+        * Fixup HFSCR:TM based on CPU features. The bit is set by our
+        * early asm init because at that point we haven't updated our
+        * CPU features from firmware and device-tree. Here we have,
+        * so let's do it.
+        */
+       if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
+               mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+
        /* Set IR and DR in PACA MSR */
        get_paca()->kernel_msr = MSR_KERNEL;
 }
index 8c68145ba1bd35f4e86f0a3da729ee5387a7c781..710e491206ed0a11ff96b4994d8dbb91f264b94a 100644 (file)
@@ -1487,6 +1487,10 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
        /* start new resize */
 
        resize = kzalloc(sizeof(*resize), GFP_KERNEL);
+       if (!resize) {
+               ret = -ENOMEM;
+               goto out;
+       }
        resize->order = shift;
        resize->kvm = kvm;
        INIT_WORK(&resize->work, resize_hpt_prepare_work);
index cc332608e65664f40f337cd4418c70a164b6259d..65bb8f33b399bf13bcedd115f4ce86ed1c0dad5b 100644 (file)
@@ -638,6 +638,10 @@ static void native_flush_hash_range(unsigned long number, int local)
        unsigned long psize = batch->psize;
        int ssize = batch->ssize;
        int i;
+       unsigned int use_local;
+
+       use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
+               mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
 
        local_irq_save(flags);
 
@@ -667,8 +671,7 @@ static void native_flush_hash_range(unsigned long number, int local)
                } pte_iterate_hashed_end();
        }
 
-       if (mmu_has_feature(MMU_FTR_TLBIEL) &&
-           mmu_psize_defs[psize].tlbiel && local) {
+       if (use_local) {
                asm volatile("ptesync":::"memory");
                for (i = 0; i < number; i++) {
                        vpn = batch->vpn[i];
index fa95041fa9f6844a2ab13ac4dee90967a6a0f84b..33ca29333e1808ae4dc0f9e8875902ae2bb307f5 100644 (file)
@@ -141,31 +141,34 @@ static void check_ipl_parmblock(void *start, unsigned long size)
 
 unsigned long decompress_kernel(void)
 {
-       unsigned long output_addr;
-       unsigned char *output;
+       void *output, *kernel_end;
 
-       output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
-       check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
-       memset(&_bss, 0, &_ebss - &_bss);
-       free_mem_ptr = (unsigned long)&_end;
-       free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-       output = (unsigned char *) output_addr;
+       output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+       kernel_end = output + SZ__bss_start;
+       check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /*
         * Move the initrd right behind the end of the decompressed
-        * kernel image.
+        * kernel image. This also prevents initrd corruption caused by
+        * bss clearing since kernel_end will always be located behind the
+        * current bss section..
         */
-       if (INITRD_START && INITRD_SIZE &&
-           INITRD_START < (unsigned long) output + SZ__bss_start) {
-               check_ipl_parmblock(output + SZ__bss_start,
-                                   INITRD_START + INITRD_SIZE);
-               memmove(output + SZ__bss_start,
-                       (void *) INITRD_START, INITRD_SIZE);
-               INITRD_START = (unsigned long) output + SZ__bss_start;
+       if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+               check_ipl_parmblock(kernel_end, INITRD_SIZE);
+               memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+               INITRD_START = (unsigned long) kernel_end;
        }
 #endif
 
+       /*
+        * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+        * initialized afterwards since they reside in bss.
+        */
+       memset(&_bss, 0, &_ebss - &_bss);
+       free_mem_ptr = (unsigned long) &_end;
+       free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
        puts("Uncompressing Linux... ");
        __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
        puts("Ok, booting the kernel.\n");
index 136932ff42502027820a94702a924d65b3049622..3ea1554d04b3776e90fa1c311ff227a9a201925c 100644 (file)
@@ -147,7 +147,7 @@ unsigned long __must_check __copy_to_user(void __user *to, const void *from,
                "       jg      2b\n"                           \
                ".popsection\n"                                 \
                EX_TABLE(0b,3b) EX_TABLE(1b,3b)                 \
-               : "=d" (__rc), "=Q" (*(to))                     \
+               : "=d" (__rc), "+Q" (*(to))                     \
                : "d" (size), "Q" (*(from)),                    \
                  "d" (__reg0), "K" (-EFAULT)                   \
                : "cc");                                        \
index 47a973b5b4f184adfa3855828d042bd73d33e61c..5dab859b0d543be205eaa5a176728e87f5e3bfc6 100644 (file)
@@ -909,13 +909,11 @@ void __init smp_prepare_boot_cpu(void)
 {
        struct pcpu *pcpu = pcpu_devices;
 
+       WARN_ON(!cpu_present(0) || !cpu_online(0));
        pcpu->state = CPU_STATE_CONFIGURED;
-       pcpu->address = stap();
        pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
        S390_lowcore.percpu_offset = __per_cpu_offset[0];
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
-       set_cpu_present(0, true);
-       set_cpu_online(0, true);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
@@ -924,6 +922,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
 void __init smp_setup_processor_id(void)
 {
+       pcpu_devices[0].address = stap();
        S390_lowcore.cpu_nr = 0;
        S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
 }
index d55c829a5944c28449e734e227ec4621bcd67d9e..ddbffb715b40fd27a705396ffcd2221af74bab26 100644 (file)
@@ -168,8 +168,7 @@ union page_table_entry {
                unsigned long z  : 1; /* Zero Bit */
                unsigned long i  : 1; /* Page-Invalid Bit */
                unsigned long p  : 1; /* DAT-Protection Bit */
-               unsigned long co : 1; /* Change-Recording Override */
-               unsigned long    : 8;
+               unsigned long    : 9;
        };
 };
 
@@ -745,8 +744,6 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
                return PGM_PAGE_TRANSLATION;
        if (pte.z)
                return PGM_TRANSLATION_SPEC;
-       if (pte.co && !edat1)
-               return PGM_TRANSLATION_SPEC;
        dat_protection |= pte.p;
        raddr.pfra = pte.pfra;
 real_address:
@@ -1182,7 +1179,7 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
                rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
        if (!rc && pte.i)
                rc = PGM_PAGE_TRANSLATION;
-       if (!rc && (pte.z || (pte.co && sg->edat_level < 1)))
+       if (!rc && pte.z)
                rc = PGM_TRANSLATION_SPEC;
 shadow_page:
        pte.p |= dat_protection;
index f294dd42fc7d3833ccc5b92fa6077e3b7d869d26..5961b2d8398a9cdfa359483f009f0c30cfcca9a8 100644 (file)
@@ -17,6 +17,7 @@
 
 #define HPAGE_SHIFT            23
 #define REAL_HPAGE_SHIFT       22
+#define HPAGE_2GB_SHIFT                31
 #define HPAGE_256MB_SHIFT      28
 #define HPAGE_64K_SHIFT                16
 #define REAL_HPAGE_SIZE                (_AC(1,UL) << REAL_HPAGE_SHIFT)
@@ -27,7 +28,7 @@
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 #define REAL_HPAGE_PER_HPAGE   (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
-#define HUGE_MAX_HSTATE                3
+#define HUGE_MAX_HSTATE                4
 #endif
 
 #ifndef __ASSEMBLY__
index 8a598528ec1f0455508c1389982e832460c12b81..6fbd931f0570021fd297db9163ed170fc21aafe2 100644 (file)
@@ -679,26 +679,27 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
        return pte_pfn(pte);
 }
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline unsigned long pmd_dirty(pmd_t pmd)
+#define __HAVE_ARCH_PMD_WRITE
+static inline unsigned long pmd_write(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
 
-       return pte_dirty(pte);
+       return pte_write(pte);
 }
 
-static inline unsigned long pmd_young(pmd_t pmd)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long pmd_dirty(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
 
-       return pte_young(pte);
+       return pte_dirty(pte);
 }
 
-static inline unsigned long pmd_write(pmd_t pmd)
+static inline unsigned long pmd_young(pmd_t pmd)
 {
        pte_t pte = __pte(pmd_val(pmd));
 
-       return pte_write(pte);
+       return pte_young(pte);
 }
 
 static inline unsigned long pmd_trans_huge(pmd_t pmd)
index 365d4cb267b4397c8a108eaff279be292c4e52e5..dd27159819ebedce4d0479ec800e91d56706311f 100644 (file)
 #include <asm/signal.h>
 #include <asm/page.h>
 
-/*
- * The sparc has no problems with write protection
- */
-#define wp_works_ok 1
-#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
-
 /* Whee, this is STACK_TOP + PAGE_SIZE and the lowest kernel address too...
  * That one page is used to protect kernel from intruders, so that
  * we can make our access_ok test faster
index 6448cfc8292f72d329704c92c993e7864ee6ae3d..b58ee90184334224b756360e769c47a0d10e088a 100644 (file)
 #include <asm/ptrace.h>
 #include <asm/page.h>
 
-/* The sparc has no problems with write protection */
-#define wp_works_ok 1
-#define wp_works_ok__is_a_macro /* for versions in ksyms.c */
-
 /*
  * User lives in his very own context, and cannot reference us. Note
  * that TASK_SIZE is a misnomer, it really gives maximum user virtual
index 6aa3da152c20008a08e752c4f9708ff6a89e3d72..44101196d02b5dacb5e3c67248b3c3de77c4a9b4 100644 (file)
@@ -96,6 +96,7 @@ sparc64_boot:
        andn    %g1, PSTATE_AM, %g1
        wrpr    %g1, 0x0, %pstate
        ba,a,pt %xcc, 1f
+        nop
 
        .globl  prom_finddev_name, prom_chosen_path, prom_root_node
        .globl  prom_getprop_name, prom_mmu_name, prom_peer_name
@@ -613,6 +614,7 @@ niagara_tlb_fixup:
         nop
 
        ba,a,pt %xcc, 80f
+        nop
 niagara4_patch:
        call    niagara4_patch_copyops
         nop
@@ -622,6 +624,7 @@ niagara4_patch:
         nop
 
        ba,a,pt %xcc, 80f
+        nop
 
 niagara2_patch:
        call    niagara2_patch_copyops
@@ -632,6 +635,7 @@ niagara2_patch:
         nop
 
        ba,a,pt %xcc, 80f
+        nop
 
 niagara_patch:
        call    niagara_patch_copyops
index 34b4933900bf7665b30e791565795d5782351b60..9276d2f0dd8684edffa5a2a4f2f72ac4488a8c99 100644 (file)
@@ -82,6 +82,7 @@ do_stdfmna:
        call            handle_stdfmna
         add            %sp, PTREGS_OFF, %o0
        ba,a,pt         %xcc, rtrap
+        nop
        .size           do_stdfmna,.-do_stdfmna
 
        .type           breakpoint_trap,#function
index 216948ca43829d0c0a5be837ff183f86d03dfb49..709a82ebd294c07bd4b87698c695325e5093a7e4 100644 (file)
@@ -237,6 +237,7 @@ rt_continue:        ldx                     [%sp + PTREGS_OFF + PT_V9_G1], %g1
                bne,pt                  %xcc, user_rtt_fill_32bit
                 wrpr                   %g1, %cwp
                ba,a,pt                 %xcc, user_rtt_fill_64bit
+                nop
 
 user_rtt_fill_fixup_dax:
                ba,pt   %xcc, user_rtt_fill_fixup_common
index 4a73009f66a5727e43ad45dbe1a2f7bded8cd48f..d7e5408428098d1138b0c132baa2d43dff3196a4 100644 (file)
@@ -86,6 +86,7 @@ __spitfire_cee_trap_continue:
         rd             %pc, %g7
 
        ba,a,pt         %xcc, 2f
+        nop
 
 1:     ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
index 6179e19bc9b98ea4542b59bb4953c1f9f2718330..c19f352f46c7e2d6c08f203e4e926982fd00423c 100644 (file)
@@ -352,6 +352,7 @@ sun4v_mna:
        call    sun4v_do_mna
         add    %sp, PTREGS_OFF, %o0
        ba,a,pt %xcc, rtrap
+        nop
 
        /* Privileged Action.  */
 sun4v_privact:
index 5604a2b051d46bb822873a31b9b1f88a85253562..364af3250646817e58ce37c96f6c12faf8b37dde 100644 (file)
@@ -92,6 +92,7 @@ user_rtt_fill_fixup_common:
                call    sun4v_data_access_exception
                 nop
                ba,a,pt %xcc, rtrap
+                nop
 
 1:             call    spitfire_data_access_exception
                 nop
index 855019a8590ea5d556b71c9ae9356f7dcb629588..1ee173cc3c3943e4883670fd56537c63d199257b 100644 (file)
@@ -152,6 +152,8 @@ fill_fixup_dax:
        call    sun4v_data_access_exception
         nop
        ba,a,pt %xcc, rtrap
+        nop
 1:     call    spitfire_data_access_exception
         nop
        ba,a,pt %xcc, rtrap
+        nop
index c629dbd121b6e4fe64494c62bcad656747f05ef2..64dcd6cdb606c819e48efe0e5d755edcc816dbea 100644 (file)
@@ -326,11 +326,13 @@ FUNC_NAME:        /* %o0=dst, %o1=src, %o2=len */
        blu             170f
         nop
        ba,a,pt         %xcc, 180f
+        nop
 
 4:     /* 32 <= low bits < 48 */
        blu             150f
         nop
        ba,a,pt         %xcc, 160f
+        nop
 5:     /* 0 < low bits < 32 */
        blu,a           6f
         cmp            %g2, 8
@@ -338,6 +340,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
        blu             130f
         nop
        ba,a,pt         %xcc, 140f
+        nop
 6:     /* 0 < low bits < 16 */
        bgeu            120f
         nop
@@ -475,6 +478,7 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
        brz,pt          %o2, 85f
         sub            %o0, %o1, GLOBAL_SPARE
        ba,a,pt         %XCC, 90f
+        nop
 
        .align          64
 75: /* 16 < len <= 64 */
index 75bb93b1437f7f6f29ab17c96bc0b4c322f2a373..78ea962edcbee4c974481520de516e751d78a61d 100644 (file)
@@ -530,4 +530,5 @@ FUNC_NAME:  /* %o0=dst, %o1=src, %o2=len */
        bne,pt          %icc, 1b
         EX_ST(STORE(stb, %g1, %o0 - 0x01), NG4_retl_o2_plus_1)
        ba,a,pt         %icc, .Lexit
+        nop
        .size           FUNC_NAME, .-FUNC_NAME
index 41da4bdd95cbff451889992b06c8dd6893dfed30..7c0c81f18837e11902f8b473b7b480bfc13ab5a7 100644 (file)
@@ -102,4 +102,5 @@ NG4bzero:
        bne,pt          %icc, 1b
         add            %o0, 0x30, %o0
        ba,a,pt         %icc, .Lpostloop
+        nop
        .size           NG4bzero,.-NG4bzero
index d88c4ed50a0023cd8e2daa06689abc0d46d79f46..cd654a719b278b6c925a730a63195462d6e076df 100644 (file)
@@ -394,6 +394,7 @@ FUNC_NAME:  /* %i0=dst, %i1=src, %i2=len */
        brz,pt          %i2, 85f
         sub            %o0, %i1, %i3
        ba,a,pt         %XCC, 90f
+        nop
 
        .align          64
 70: /* 16 < len <= 64 */
index 323bc6b6e3ad0eceb0ea4cdb6a1cb6da898227b8..ee5273ad918de6302cb0f6644b2a4179f8c26351 100644 (file)
@@ -143,6 +143,10 @@ static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
        pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
 
        switch (shift) {
+       case HPAGE_2GB_SHIFT:
+               hugepage_size = _PAGE_SZ2GB_4V;
+               pte_val(entry) |= _PAGE_PMD_HUGE;
+               break;
        case HPAGE_256MB_SHIFT:
                hugepage_size = _PAGE_SZ256MB_4V;
                pte_val(entry) |= _PAGE_PMD_HUGE;
@@ -183,6 +187,9 @@ static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
        unsigned int shift;
 
        switch (tte_szbits) {
+       case _PAGE_SZ2GB_4V:
+               shift = HPAGE_2GB_SHIFT;
+               break;
        case _PAGE_SZ256MB_4V:
                shift = HPAGE_256MB_SHIFT;
                break;
@@ -261,7 +268,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                if (!pmd)
                        return NULL;
 
-               if (sz == PMD_SHIFT)
+               if (sz >= PMD_SIZE)
                        pte = (pte_t *)pmd;
                else
                        pte = pte_alloc_map(mm, pmd, addr);
index ccd4553289899ee2e585e409189206f67c7e77f2..0cda653ae007645fa01f05b4c40518332159a6ac 100644 (file)
@@ -337,6 +337,10 @@ static int __init setup_hugepagesz(char *string)
        hugepage_shift = ilog2(hugepage_size);
 
        switch (hugepage_shift) {
+       case HPAGE_2GB_SHIFT:
+               hv_pgsz_mask = HV_PGSZ_MASK_2GB;
+               hv_pgsz_idx = HV_PGSZ_IDX_2GB;
+               break;
        case HPAGE_256MB_SHIFT:
                hv_pgsz_mask = HV_PGSZ_MASK_256MB;
                hv_pgsz_idx = HV_PGSZ_IDX_256MB;
@@ -1563,7 +1567,7 @@ bool kern_addr_valid(unsigned long addr)
        if ((long)addr < 0L) {
                unsigned long pa = __pa(addr);
 
-               if ((addr >> max_phys_bits) != 0UL)
+               if ((pa >> max_phys_bits) != 0UL)
                        return false;
 
                return pfn_valid(pa >> PAGE_SHIFT);
index def82f6d626f774772807427c6fe6e67fd343bf1..8e76ebba29863ecd18dd2b9ba29147072f3c4518 100644 (file)
@@ -54,6 +54,7 @@
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
 int vac_cache_size;
+EXPORT_SYMBOL(vac_cache_size);
 int vac_line_size;
 
 extern struct resource sparc_iomap;
index afda3bbf78542a0297849d65fe7470a5e73716f1..ee8066c3d96c95cea2d174dedf856c429aa6dbb9 100644 (file)
@@ -154,7 +154,7 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
                if (pte_val(*pte) & _PAGE_VALID) {
                        bool exec = pte_exec(*pte);
 
-                       tlb_batch_add_one(mm, vaddr, exec, false);
+                       tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
                }
                pte++;
                vaddr += PAGE_SIZE;
@@ -209,9 +209,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                        pte_t orig_pte = __pte(pmd_val(orig));
                        bool exec = pte_exec(orig_pte);
 
-                       tlb_batch_add_one(mm, addr, exec, true);
+                       tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
                        tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
-                                       true);
+                                         REAL_HPAGE_SHIFT);
                } else {
                        tlb_batch_pmd_scan(mm, addr, orig);
                }
index 0a04811f06b78ceb7851d70b56de63e4d40b6bf4..bedf08b22a4773c5a104b56f7de8b44c461630c1 100644 (file)
@@ -122,7 +122,7 @@ void flush_tsb_user(struct tlb_batch *tb)
 
        spin_lock_irqsave(&mm->context.lock, flags);
 
-       if (tb->hugepage_shift < HPAGE_SHIFT) {
+       if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
                base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
                nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
                if (tlb_type == cheetah_plus || tlb_type == hypervisor)
@@ -155,7 +155,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
 
        spin_lock_irqsave(&mm->context.lock, flags);
 
-       if (hugepage_shift < HPAGE_SHIFT) {
+       if (hugepage_shift < REAL_HPAGE_SHIFT) {
                base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
                nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
                if (tlb_type == cheetah_plus || tlb_type == hypervisor)
index 7853b53959cd35a8d555a5ddebfbb4af1f40562a..3f9d1a83891adf9f47cba058078b447fb1de7ecf 100644 (file)
@@ -30,8 +30,10 @@ static int __init vdso32_setup(char *s)
 {
        vdso32_enabled = simple_strtoul(s, NULL, 0);
 
-       if (vdso32_enabled > 1)
+       if (vdso32_enabled > 1) {
                pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
+               vdso32_enabled = 0;
+       }
 
        return 1;
 }
@@ -62,13 +64,18 @@ subsys_initcall(sysenter_setup);
 /* Register vsyscall32 into the ABI table */
 #include <linux/sysctl.h>
 
+static const int zero;
+static const int one = 1;
+
 static struct ctl_table abi_table2[] = {
        {
                .procname       = "vsyscall32",
                .data           = &vdso32_enabled,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = (int *)&zero,
+               .extra2         = (int *)&one,
        },
        {}
 };
index 81b321ace8e0194d3ce18b29fcb42c20b834a918..f924629836a8ec23eefe0642f48c2f898ea84f2b 100644 (file)
@@ -507,6 +507,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
                cpuc->lbr_entries[i].to         = msr_lastbranch.to;
                cpuc->lbr_entries[i].mispred    = 0;
                cpuc->lbr_entries[i].predicted  = 0;
+               cpuc->lbr_entries[i].in_tx      = 0;
+               cpuc->lbr_entries[i].abort      = 0;
+               cpuc->lbr_entries[i].cycles     = 0;
                cpuc->lbr_entries[i].reserved   = 0;
        }
        cpuc->lbr_stack.nr = i;
index 9d49c18b5ea9360feb5e5bb1fe378914154f34d5..3762536619f8cb83c17777e60bbd56830d792cde 100644 (file)
@@ -287,7 +287,7 @@ struct task_struct;
 
 #define        ARCH_DLINFO_IA32                                                \
 do {                                                                   \
-       if (vdso32_enabled) {                                           \
+       if (VDSO_CURRENT_BASE) {                                        \
                NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY);                    \
                NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE);        \
        }                                                               \
index 2c1ebeb4d7376db6350b7266048a1d37d800ac86..529bb4a6487a9e42d07d0c4ceb226e41b5808f0f 100644 (file)
@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
  * @size:      number of bytes to write back
  *
  * Write back a cache range using the CLWB (cache line write back)
- * instruction.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
  */
 static inline void arch_wb_cache_pmem(void *addr, size_t size)
 {
@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
                clwb(p);
 }
 
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
-       return iter_is_iovec(i) == false;
-}
-
 /**
  * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
  * @addr:      PMEM destination address
@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
        /* TODO: skip the write-back by always using non-temporal stores */
        len = copy_from_iter_nocache(addr, bytes, i);
 
-       if (__iter_needs_pmem_wb(i))
+       /*
+        * In the iovec case on x86_64 copy_from_iter_nocache() uses
+        * non-temporal stores for the bulk of the transfer, but we need
+        * to manually flush if the transfer is unaligned. A cached
+        * memory copy is used when destination or size is not naturally
+        * aligned. That is:
+        *   - Require 8-byte alignment when size is 8 bytes or larger.
+        *   - Require 4-byte alignment when size is 4 bytes.
+        *
+        * In the non-iovec case the entire destination needs to be
+        * flushed.
+        */
+       if (iter_is_iovec(i)) {
+               unsigned long flushed, dest = (unsigned long) addr;
+
+               if (bytes < 8) {
+                       if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+                               arch_wb_cache_pmem(addr, 1);
+               } else {
+                       if (!IS_ALIGNED(dest, 8)) {
+                               dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+                               arch_wb_cache_pmem(addr, 1);
+                       }
+
+                       flushed = dest - (unsigned long) addr;
+                       if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+                               arch_wb_cache_pmem(addr + bytes - 1, 1);
+               }
+       } else
                arch_wb_cache_pmem(addr, bytes);
 
        return len;
index f369cb8db0d5b4332125745dd5a587148f109597..badd2b31a5605f3df24dd250d1f08c9df022b7b2 100644 (file)
@@ -200,11 +200,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
        }
 
 out:
-       rdtgroup_kn_unlock(of->kn);
        for_each_enabled_rdt_resource(r) {
                kfree(r->tmp_cbms);
                r->tmp_cbms = NULL;
        }
+       rdtgroup_kn_unlock(of->kn);
        return ret ?: nbytes;
 }
 
index 8e9725c607ea6acb7a91deed9b72b2c9a873803e..5accfbdee3f06fac48eaf4423d66d7bee1e3a0a3 100644 (file)
@@ -54,6 +54,8 @@
 
 static DEFINE_MUTEX(mce_chrdev_read_mutex);
 
+static int mce_chrdev_open_count;      /* #times opened */
+
 #define mce_log_get_idx_check(p) \
 ({ \
        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
@@ -598,6 +600,10 @@ static int mce_default_notifier(struct notifier_block *nb, unsigned long val,
        if (atomic_read(&num_notifiers) > 2)
                return NOTIFY_DONE;
 
+       /* Don't print when mcelog is running */
+       if (mce_chrdev_open_count > 0)
+               return NOTIFY_DONE;
+
        __print_mce(m);
 
        return NOTIFY_DONE;
@@ -1828,7 +1834,6 @@ void mcheck_cpu_clear(struct cpuinfo_x86 *c)
  */
 
 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
-static int mce_chrdev_open_count;      /* #times opened */
 static int mce_chrdev_open_exclu;      /* already open exclusive? */
 
 static int mce_chrdev_open(struct inode *inode, struct file *file)
index 396c042e9d0ee58873de5c68e6e7e22186070147..cc30a74e4adb2c3499b52488c49498dea1851ae7 100644 (file)
@@ -846,7 +846,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
                       task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
                       me->comm, me->pid, where, frame,
                       regs->ip, regs->sp, regs->orig_ax);
-               print_vma_addr(" in ", regs->ip);
+               print_vma_addr(KERN_CONT " in ", regs->ip);
                pr_cont("\n");
        }
 
index ec1f756f9dc9ace1badccd544b6032d64e520360..71beb28600d4531d93a9966401a1ca082a9f0c2e 100644 (file)
@@ -151,8 +151,8 @@ int __copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from,
 
                                if (from->si_signo == SIGSEGV) {
                                        if (from->si_code == SEGV_BNDERR) {
-                                               compat_uptr_t lower = (unsigned long)&to->si_lower;
-                                               compat_uptr_t upper = (unsigned long)&to->si_upper;
+                                               compat_uptr_t lower = (unsigned long)from->si_lower;
+                                               compat_uptr_t upper = (unsigned long)from->si_upper;
                                                put_user_ex(lower, &to->si_lower);
                                                put_user_ex(upper, &to->si_upper);
                                        }
index 948443e115c147c28a6445eca725fad698d1a56f..4e496379a871687281ddc0c69c0e10d7ec036f09 100644 (file)
@@ -255,7 +255,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
                pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
                        tsk->comm, tsk->pid, str,
                        regs->ip, regs->sp, error_code);
-               print_vma_addr(" in ", regs->ip);
+               print_vma_addr(KERN_CONT " in ", regs->ip);
                pr_cont("\n");
        }
 
@@ -519,7 +519,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
                pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
                        tsk->comm, task_pid_nr(tsk),
                        regs->ip, regs->sp, error_code);
-               print_vma_addr(" in ", regs->ip);
+               print_vma_addr(KERN_CONT " in ", regs->ip);
                pr_cont("\n");
        }
 
index 2ee00dbbbd5188ccd51324085d51b0de679666bb..259e9b28ccf8e7ff2434d6911ffcfbfc4ef89f42 100644 (file)
@@ -8198,6 +8198,9 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
                return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
        case EXIT_REASON_PREEMPTION_TIMER:
                return false;
+       case EXIT_REASON_PML_FULL:
+               /* We don't expose PML support to L1. */
+               return false;
        default:
                return true;
        }
@@ -10267,6 +10270,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
        }
 
+       if (enable_pml) {
+               /*
+                * Conceptually we want to copy the PML address and index from
+                * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+                * since we always flush the log on each vmexit, this happens
+                * to be equivalent to simply resetting the fields in vmcs02.
+                */
+               ASSERT(vmx->pml_pg);
+               vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+               vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+       }
+
        if (nested_cpu_has_ept(vmcs12)) {
                kvm_mmu_unload(vcpu);
                nested_ept_init_mmu_context(vcpu);
index 22af912d66d258f413414c7355ac82da2814b074..889e7619a0914d87ad49dbb5b960933e5dd316ad 100644 (file)
@@ -643,21 +643,40 @@ void __init init_mem_mapping(void)
  * devmem_is_allowed() checks to see if /dev/mem access to a certain address
  * is valid. The argument is a physical page number.
  *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains BIOS code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
  */
 int devmem_is_allowed(unsigned long pagenr)
 {
-       if (pagenr < 256)
-               return 1;
-       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+       if (page_is_ram(pagenr)) {
+               /*
+                * For disallowed memory regions in the low 1MB range,
+                * request that the page be shown as all zeros.
+                */
+               if (pagenr < 256)
+                       return 2;
+
+               return 0;
+       }
+
+       /*
+        * This must follow RAM test, since System RAM is considered a
+        * restricted resource under CONFIG_STRICT_IOMEM.
+        */
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+               /* Low 1MB bypasses iomem restrictions. */
+               if (pagenr < 256)
+                       return 1;
+
                return 0;
-       if (!page_is_ram(pagenr))
-               return 1;
-       return 0;
+       }
+
+       return 1;
 }
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
index 30031d5293c483202c526d5045cda23be6617359..cdfe8c62895981029b8f69218b717b05d0b8961a 100644 (file)
@@ -201,6 +201,10 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
                return;
        }
 
+       /* No need to reserve regions that will never be freed. */
+       if (md.attribute & EFI_MEMORY_RUNTIME)
+               return;
+
        size += addr % EFI_PAGE_SIZE;
        size = round_up(size, EFI_PAGE_SIZE);
        addr = round_down(addr, EFI_PAGE_SIZE);
index 976b1d70edbc0a2d77016409fec93ec597df9cad..4ddbfd57a7c824c7d05aaa3d6d8824a849d03628 100644 (file)
@@ -164,8 +164,21 @@ void copy_user_highpage(struct page *to, struct page *from,
 
 #define ARCH_PFN_OFFSET                (PHYS_OFFSET >> PAGE_SHIFT)
 
+#ifdef CONFIG_MMU
+static inline unsigned long ___pa(unsigned long va)
+{
+       unsigned long off = va - PAGE_OFFSET;
+
+       if (off >= XCHAL_KSEG_SIZE)
+               off -= XCHAL_KSEG_SIZE;
+
+       return off + PHYS_OFFSET;
+}
+#define __pa(x)        ___pa((unsigned long)(x))
+#else
 #define __pa(x)        \
        ((unsigned long) (x) - PAGE_OFFSET + PHYS_OFFSET)
+#endif
 #define __va(x)        \
        ((void *)((unsigned long) (x) - PHYS_OFFSET + PAGE_OFFSET))
 #define pfn_valid(pfn) \
index cd400af4a6b25597756cda04826278fea75ecf33..6be7eb27fd29d68b7c3a13643f628b7f2ae40c57 100644 (file)
@@ -774,7 +774,10 @@ __SYSCALL(349, sys_pkey_alloc, 2)
 #define __NR_pkey_free                         350
 __SYSCALL(350, sys_pkey_free, 1)
 
-#define __NR_syscall_count                     351
+#define __NR_statx                             351
+__SYSCALL(351, sys_statx, 5)
+
+#define __NR_syscall_count                     352
 
 /*
  * sysxtensa syscall handler
index c82c43bff2968cd3bab83688bc8a362671792150..bae697a06a984536bc51ce21cb3e402d5cfbd065 100644 (file)
@@ -483,10 +483,8 @@ void show_regs(struct pt_regs * regs)
 
 static int show_trace_cb(struct stackframe *frame, void *data)
 {
-       if (kernel_text_address(frame->pc)) {
-               pr_cont(" [<%08lx>]", frame->pc);
-               print_symbol(" %s\n", frame->pc);
-       }
+       if (kernel_text_address(frame->pc))
+               pr_cont(" [<%08lx>] %pB\n", frame->pc, (void *)frame->pc);
        return 0;
 }
 
index 09af8ff18719a42c662cbb9267812975b71666b0..c974a1bbf4cbad70ccea464e94d2788c62078dde 100644 (file)
@@ -171,7 +171,8 @@ void blk_mq_sched_put_request(struct request *rq)
 
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
 {
-       struct elevator_queue *e = hctx->queue->elevator;
+       struct request_queue *q = hctx->queue;
+       struct elevator_queue *e = q->elevator;
        const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
        bool did_work = false;
        LIST_HEAD(rq_list);
@@ -203,10 +204,10 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
         */
        if (!list_empty(&rq_list)) {
                blk_mq_sched_mark_restart_hctx(hctx);
-               did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
+               did_work = blk_mq_dispatch_rq_list(q, &rq_list);
        } else if (!has_sched_dispatch) {
                blk_mq_flush_busy_ctxs(hctx, &rq_list);
-               blk_mq_dispatch_rq_list(hctx, &rq_list);
+               blk_mq_dispatch_rq_list(q, &rq_list);
        }
 
        /*
@@ -222,7 +223,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
                        if (!rq)
                                break;
                        list_add(&rq->queuelist, &rq_list);
-               } while (blk_mq_dispatch_rq_list(hctx, &rq_list));
+               } while (blk_mq_dispatch_rq_list(q, &rq_list));
        }
 }
 
@@ -317,25 +318,68 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
        return true;
 }
 
-static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
 {
        if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
                clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-               if (blk_mq_hctx_has_pending(hctx))
+               if (blk_mq_hctx_has_pending(hctx)) {
                        blk_mq_run_hw_queue(hctx, true);
+                       return true;
+               }
        }
+       return false;
 }
 
-void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
-{
-       struct request_queue *q = hctx->queue;
-       unsigned int i;
+/**
+ * list_for_each_entry_rcu_rr - iterate in a round-robin fashion over rcu list
+ * @pos:    loop cursor.
+ * @skip:   the list element that will not be examined. Iteration starts at
+ *          @skip->next.
+ * @head:   head of the list to examine. This list must have at least one
+ *          element, namely @skip.
+ * @member: name of the list_head structure within typeof(*pos).
+ */
+#define list_for_each_entry_rcu_rr(pos, skip, head, member)            \
+       for ((pos) = (skip);                                            \
+            (pos = (pos)->member.next != (head) ? list_entry_rcu(      \
+                       (pos)->member.next, typeof(*pos), member) :     \
+             list_entry_rcu((pos)->member.next->next, typeof(*pos), member)), \
+            (pos) != (skip); )
 
-       if (test_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
-               if (test_and_clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags)) {
-                       queue_for_each_hw_ctx(q, hctx, i)
-                               blk_mq_sched_restart_hctx(hctx);
+/*
+ * Called after a driver tag has been freed to check whether a hctx needs to
+ * be restarted. Restarts @hctx if its tag set is not shared. Restarts hardware
+ * queues in a round-robin fashion if the tag set of @hctx is shared with other
+ * hardware queues.
+ */
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *const hctx)
+{
+       struct blk_mq_tags *const tags = hctx->tags;
+       struct blk_mq_tag_set *const set = hctx->queue->tag_set;
+       struct request_queue *const queue = hctx->queue, *q;
+       struct blk_mq_hw_ctx *hctx2;
+       unsigned int i, j;
+
+       if (set->flags & BLK_MQ_F_TAG_SHARED) {
+               rcu_read_lock();
+               list_for_each_entry_rcu_rr(q, queue, &set->tag_list,
+                                          tag_set_list) {
+                       queue_for_each_hw_ctx(q, hctx2, i)
+                               if (hctx2->tags == tags &&
+                                   blk_mq_sched_restart_hctx(hctx2))
+                                       goto done;
+               }
+               j = hctx->queue_num + 1;
+               for (i = 0; i < queue->nr_hw_queues; i++, j++) {
+                       if (j == queue->nr_hw_queues)
+                               j = 0;
+                       hctx2 = queue->queue_hw_ctx[j];
+                       if (hctx2->tags == tags &&
+                           blk_mq_sched_restart_hctx(hctx2))
+                               break;
                }
+done:
+               rcu_read_unlock();
        } else {
                blk_mq_sched_restart_hctx(hctx);
        }
@@ -431,11 +475,67 @@ static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
        }
 }
 
-int blk_mq_sched_setup(struct request_queue *q)
+static int blk_mq_sched_alloc_tags(struct request_queue *q,
+                                  struct blk_mq_hw_ctx *hctx,
+                                  unsigned int hctx_idx)
+{
+       struct blk_mq_tag_set *set = q->tag_set;
+       int ret;
+
+       hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
+                                              set->reserved_tags);
+       if (!hctx->sched_tags)
+               return -ENOMEM;
+
+       ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
+       if (ret)
+               blk_mq_sched_free_tags(set, hctx, hctx_idx);
+
+       return ret;
+}
+
+static void blk_mq_sched_tags_teardown(struct request_queue *q)
 {
        struct blk_mq_tag_set *set = q->tag_set;
        struct blk_mq_hw_ctx *hctx;
-       int ret, i;
+       int i;
+
+       queue_for_each_hw_ctx(q, hctx, i)
+               blk_mq_sched_free_tags(set, hctx, i);
+}
+
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+                          unsigned int hctx_idx)
+{
+       struct elevator_queue *e = q->elevator;
+
+       if (!e)
+               return 0;
+
+       return blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
+}
+
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+                           unsigned int hctx_idx)
+{
+       struct elevator_queue *e = q->elevator;
+
+       if (!e)
+               return;
+
+       blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
+}
+
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+       int ret;
+
+       if (!e) {
+               q->elevator = NULL;
+               return 0;
+       }
 
        /*
         * Default to 256, since we don't split into sync/async like the
@@ -443,49 +543,30 @@ int blk_mq_sched_setup(struct request_queue *q)
         */
        q->nr_requests = 2 * BLKDEV_MAX_RQ;
 
-       /*
-        * We're switching to using an IO scheduler, so setup the hctx
-        * scheduler tags and switch the request map from the regular
-        * tags to scheduler tags. First allocate what we need, so we
-        * can safely fail and fallback, if needed.
-        */
-       ret = 0;
        queue_for_each_hw_ctx(q, hctx, i) {
-               hctx->sched_tags = blk_mq_alloc_rq_map(set, i,
-                               q->nr_requests, set->reserved_tags);
-               if (!hctx->sched_tags) {
-                       ret = -ENOMEM;
-                       break;
-               }
-               ret = blk_mq_alloc_rqs(set, hctx->sched_tags, i, q->nr_requests);
+               ret = blk_mq_sched_alloc_tags(q, hctx, i);
                if (ret)
-                       break;
+                       goto err;
        }
 
-       /*
-        * If we failed, free what we did allocate
-        */
-       if (ret) {
-               queue_for_each_hw_ctx(q, hctx, i) {
-                       if (!hctx->sched_tags)
-                               continue;
-                       blk_mq_sched_free_tags(set, hctx, i);
-               }
-
-               return ret;
-       }
+       ret = e->ops.mq.init_sched(q, e);
+       if (ret)
+               goto err;
 
        return 0;
+
+err:
+       blk_mq_sched_tags_teardown(q);
+       q->elevator = NULL;
+       return ret;
 }
 
-void blk_mq_sched_teardown(struct request_queue *q)
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
 {
-       struct blk_mq_tag_set *set = q->tag_set;
-       struct blk_mq_hw_ctx *hctx;
-       int i;
-
-       queue_for_each_hw_ctx(q, hctx, i)
-               blk_mq_sched_free_tags(set, hctx, i);
+       if (e->type->ops.mq.exit_sched)
+               e->type->ops.mq.exit_sched(e);
+       blk_mq_sched_tags_teardown(q);
+       q->elevator = NULL;
 }
 
 int blk_mq_sched_init(struct request_queue *q)
index a75b16b123f7aadac672651a7eef5c79f5553e16..3a9e6e40558b599dee22115be54f5f7ce9f11080 100644 (file)
@@ -19,7 +19,7 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
                                struct request **merged_request);
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
-void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
+void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
 
 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
                                 bool run_queue, bool async, bool can_block);
@@ -32,8 +32,13 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
                        struct list_head *rq_list,
                        struct request *(*get_rq)(struct blk_mq_hw_ctx *));
 
-int blk_mq_sched_setup(struct request_queue *q);
-void blk_mq_sched_teardown(struct request_queue *q);
+int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
+void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+
+int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+                          unsigned int hctx_idx);
+void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
+                           unsigned int hctx_idx);
 
 int blk_mq_sched_init(struct request_queue *q);
 
@@ -131,20 +136,6 @@ static inline void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
                set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
 }
 
-/*
- * Mark a hardware queue and the request queue it belongs to as needing a
- * restart.
- */
-static inline void blk_mq_sched_mark_restart_queue(struct blk_mq_hw_ctx *hctx)
-{
-       struct request_queue *q = hctx->queue;
-
-       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
-               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
-       if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
-               set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
-}
-
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 {
        return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
index 6b6e7bc041dbf3c4699ca9bfa8e36c58a8f119d0..572966f495966b7fe8ce486ad4e5912a0d044eb5 100644 (file)
@@ -321,7 +321,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 
        rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
 
-       blk_mq_put_ctx(alloc_data.ctx);
        blk_queue_exit(q);
 
        if (!rq)
@@ -349,7 +348,7 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
                blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
        if (sched_tag != -1)
                blk_mq_sched_completed_request(hctx, rq);
-       blk_mq_sched_restart_queues(hctx);
+       blk_mq_sched_restart(hctx);
        blk_queue_exit(q);
 }
 
@@ -846,12 +845,8 @@ bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
                .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
        };
 
-       if (rq->tag != -1) {
-done:
-               if (hctx)
-                       *hctx = data.hctx;
-               return true;
-       }
+       if (rq->tag != -1)
+               goto done;
 
        if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
                data.flags |= BLK_MQ_REQ_RESERVED;
@@ -863,10 +858,12 @@ done:
                        atomic_inc(&data.hctx->nr_active);
                }
                data.hctx->tags->rqs[rq->tag] = rq;
-               goto done;
        }
 
-       return false;
+done:
+       if (hctx)
+               *hctx = data.hctx;
+       return rq->tag != -1;
 }
 
 static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
@@ -963,14 +960,17 @@ static bool blk_mq_dispatch_wait_add(struct blk_mq_hw_ctx *hctx)
        return true;
 }
 
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 {
-       struct request_queue *q = hctx->queue;
+       struct blk_mq_hw_ctx *hctx;
        struct request *rq;
        LIST_HEAD(driver_list);
        struct list_head *dptr;
        int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
 
+       if (list_empty(list))
+               return false;
+
        /*
         * Start off with dptr being NULL, so we start the first request
         * immediately, even if we have more pending.
@@ -981,7 +981,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
         * Now process all the entries, sending them to the driver.
         */
        errors = queued = 0;
-       while (!list_empty(list)) {
+       do {
                struct blk_mq_queue_data bd;
 
                rq = list_first_entry(list, struct request, queuelist);
@@ -1052,7 +1052,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                 */
                if (!dptr && list->next != list->prev)
                        dptr = &driver_list;
-       }
+       } while (!list_empty(list));
 
        hctx->dispatched[queued_to_index(queued)]++;
 
@@ -1135,7 +1135,8 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
        return hctx->next_cpu;
 }
 
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+                                       unsigned long msecs)
 {
        if (unlikely(blk_mq_hctx_stopped(hctx) ||
                     !blk_mq_hw_queue_mapped(hctx)))
@@ -1152,7 +1153,24 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
                put_cpu();
        }
 
-       kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
+       if (msecs == 0)
+               kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
+                                        &hctx->run_work);
+       else
+               kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+                                                &hctx->delayed_run_work,
+                                                msecs_to_jiffies(msecs));
+}
+
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+{
+       __blk_mq_delay_run_hw_queue(hctx, true, msecs);
+}
+EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+       __blk_mq_delay_run_hw_queue(hctx, async, 0);
 }
 
 void blk_mq_run_hw_queues(struct request_queue *q, bool async)
@@ -1255,6 +1273,15 @@ static void blk_mq_run_work_fn(struct work_struct *work)
        __blk_mq_run_hw_queue(hctx);
 }
 
+static void blk_mq_delayed_run_work_fn(struct work_struct *work)
+{
+       struct blk_mq_hw_ctx *hctx;
+
+       hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
+
+       __blk_mq_run_hw_queue(hctx);
+}
+
 static void blk_mq_delay_work_fn(struct work_struct *work)
 {
        struct blk_mq_hw_ctx *hctx;
@@ -1924,6 +1951,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
                                       hctx->fq->flush_rq, hctx_idx,
                                       flush_start_tag + hctx_idx);
 
+       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
+
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
 
@@ -1960,6 +1989,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
                node = hctx->numa_node = set->numa_node;
 
        INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
+       INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
        INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
        spin_lock_init(&hctx->lock);
        INIT_LIST_HEAD(&hctx->dispatch);
@@ -1990,9 +2020,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
            set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
                goto free_bitmap;
 
+       if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
+               goto exit_hctx;
+
        hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
        if (!hctx->fq)
-               goto exit_hctx;
+               goto sched_exit_hctx;
 
        if (set->ops->init_request &&
            set->ops->init_request(set->driver_data,
@@ -2007,6 +2040,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
 
  free_fq:
        kfree(hctx->fq);
+ sched_exit_hctx:
+       blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
  exit_hctx:
        if (set->ops->exit_hctx)
                set->ops->exit_hctx(hctx, hctx_idx);
@@ -2233,8 +2268,6 @@ void blk_mq_release(struct request_queue *q)
        struct blk_mq_hw_ctx *hctx;
        unsigned int i;
 
-       blk_mq_sched_teardown(q);
-
        /* hctx kobj stays in hctx */
        queue_for_each_hw_ctx(q, hctx, i) {
                if (!hctx)
@@ -2565,6 +2598,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
        return 0;
 }
 
+static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+{
+       if (set->ops->map_queues)
+               return set->ops->map_queues(set);
+       else
+               return blk_mq_map_queues(set);
+}
+
 /*
  * Alloc a tag set to be associated with one or more request queues.
  * May fail with EINVAL for various error conditions. May adjust the
@@ -2619,10 +2660,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
        if (!set->mq_map)
                goto out_free_tags;
 
-       if (set->ops->map_queues)
-               ret = set->ops->map_queues(set);
-       else
-               ret = blk_mq_map_queues(set);
+       ret = blk_mq_update_queue_map(set);
        if (ret)
                goto out_free_mq_map;
 
@@ -2714,6 +2752,7 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
                blk_mq_freeze_queue(q);
 
        set->nr_hw_queues = nr_hw_queues;
+       blk_mq_update_queue_map(set);
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_realloc_hw_ctxs(set, q);
 
index b79f9a7d8cf62010dd9a91d3b271e5d2474cb836..660a17e1d0331ce2a518722501b65ad6404bb73b 100644 (file)
@@ -31,7 +31,7 @@ void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
+bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *);
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
 bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
index c44b321335f3ebbcc662f0f70b7605f5019c60b7..37f0b3ad635ea63c2b176fff051646d600dd453d 100644 (file)
@@ -816,7 +816,7 @@ static void blk_release_queue(struct kobject *kobj)
 
        if (q->elevator) {
                ioc_clear_queue(q);
-               elevator_exit(q->elevator);
+               elevator_exit(q, q->elevator);
        }
 
        blk_exit_rl(&q->root_rl);
index 01139f549b5be73047f346153f5b8fedcb23b3d0..dbeecf7be719eaada4457ed3c7ac799fd0bbacec 100644 (file)
@@ -242,26 +242,21 @@ int elevator_init(struct request_queue *q, char *name)
                }
        }
 
-       if (e->uses_mq) {
-               err = blk_mq_sched_setup(q);
-               if (!err)
-                       err = e->ops.mq.init_sched(q, e);
-       } else
+       if (e->uses_mq)
+               err = blk_mq_init_sched(q, e);
+       else
                err = e->ops.sq.elevator_init_fn(q, e);
-       if (err) {
-               if (e->uses_mq)
-                       blk_mq_sched_teardown(q);
+       if (err)
                elevator_put(e);
-       }
        return err;
 }
 EXPORT_SYMBOL(elevator_init);
 
-void elevator_exit(struct elevator_queue *e)
+void elevator_exit(struct request_queue *q, struct elevator_queue *e)
 {
        mutex_lock(&e->sysfs_lock);
        if (e->uses_mq && e->type->ops.mq.exit_sched)
-               e->type->ops.mq.exit_sched(e);
+               blk_mq_exit_sched(q, e);
        else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
                e->type->ops.sq.elevator_exit_fn(e);
        mutex_unlock(&e->sysfs_lock);
@@ -946,6 +941,45 @@ void elv_unregister(struct elevator_type *e)
 }
 EXPORT_SYMBOL_GPL(elv_unregister);
 
+static int elevator_switch_mq(struct request_queue *q,
+                             struct elevator_type *new_e)
+{
+       int ret;
+
+       blk_mq_freeze_queue(q);
+       blk_mq_quiesce_queue(q);
+
+       if (q->elevator) {
+               if (q->elevator->registered)
+                       elv_unregister_queue(q);
+               ioc_clear_queue(q);
+               elevator_exit(q, q->elevator);
+       }
+
+       ret = blk_mq_init_sched(q, new_e);
+       if (ret)
+               goto out;
+
+       if (new_e) {
+               ret = elv_register_queue(q);
+               if (ret) {
+                       elevator_exit(q, q->elevator);
+                       goto out;
+               }
+       }
+
+       if (new_e)
+               blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
+       else
+               blk_add_trace_msg(q, "elv switch: none");
+
+out:
+       blk_mq_unfreeze_queue(q);
+       blk_mq_start_stopped_hw_queues(q, true);
+       return ret;
+
+}
+
 /*
  * switch to new_e io scheduler. be careful not to introduce deadlocks -
  * we don't free the old io scheduler, before we have allocated what we
@@ -958,10 +992,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        bool old_registered = false;
        int err;
 
-       if (q->mq_ops) {
-               blk_mq_freeze_queue(q);
-               blk_mq_quiesce_queue(q);
-       }
+       if (q->mq_ops)
+               return elevator_switch_mq(q, new_e);
 
        /*
         * Turn on BYPASS and drain all requests w/ elevator private data.
@@ -973,11 +1005,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        if (old) {
                old_registered = old->registered;
 
-               if (old->uses_mq)
-                       blk_mq_sched_teardown(q);
-
-               if (!q->mq_ops)
-                       blk_queue_bypass_start(q);
+               blk_queue_bypass_start(q);
 
                /* unregister and clear all auxiliary data of the old elevator */
                if (old_registered)
@@ -987,56 +1015,32 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
        }
 
        /* allocate, init and register new elevator */
-       if (new_e) {
-               if (new_e->uses_mq) {
-                       err = blk_mq_sched_setup(q);
-                       if (!err)
-                               err = new_e->ops.mq.init_sched(q, new_e);
-               } else
-                       err = new_e->ops.sq.elevator_init_fn(q, new_e);
-               if (err)
-                       goto fail_init;
+       err = new_e->ops.sq.elevator_init_fn(q, new_e);
+       if (err)
+               goto fail_init;
 
-               err = elv_register_queue(q);
-               if (err)
-                       goto fail_register;
-       } else
-               q->elevator = NULL;
+       err = elv_register_queue(q);
+       if (err)
+               goto fail_register;
 
        /* done, kill the old one and finish */
        if (old) {
-               elevator_exit(old);
-               if (!q->mq_ops)
-                       blk_queue_bypass_end(q);
+               elevator_exit(q, old);
+               blk_queue_bypass_end(q);
        }
 
-       if (q->mq_ops) {
-               blk_mq_unfreeze_queue(q);
-               blk_mq_start_stopped_hw_queues(q, true);
-       }
-
-       if (new_e)
-               blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
-       else
-               blk_add_trace_msg(q, "elv switch: none");
+       blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
 
        return 0;
 
 fail_register:
-       if (q->mq_ops)
-               blk_mq_sched_teardown(q);
-       elevator_exit(q->elevator);
+       elevator_exit(q, q->elevator);
 fail_init:
        /* switch failed, restore and re-register old elevator */
        if (old) {
                q->elevator = old;
                elv_register_queue(q);
-               if (!q->mq_ops)
-                       blk_queue_bypass_end(q);
-       }
-       if (q->mq_ops) {
-               blk_mq_unfreeze_queue(q);
-               blk_mq_start_stopped_hw_queues(q, true);
+               blk_queue_bypass_end(q);
        }
 
        return err;
index e58c4970c22b7cc1cdb5e8f08875d9c1e7714568..826cd7ab4d4a2ec830438b40e987e230bd03f32f 100644 (file)
@@ -32,6 +32,7 @@ struct ahash_request_priv {
        crypto_completion_t complete;
        void *data;
        u8 *result;
+       u32 flags;
        void *ubuf[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        priv->result = req->result;
        priv->complete = req->base.complete;
        priv->data = req->base.data;
+       priv->flags = req->base.flags;
+
        /*
         * WARNING: We do not backup req->priv here! The req->priv
         *          is for internal use of the Crypto API and the
@@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
        return 0;
 }
 
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
 {
        struct ahash_request_priv *priv = req->priv;
 
+       if (!err)
+               memcpy(priv->result, req->result,
+                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
        /* Restore the original crypto request. */
        req->result = priv->result;
-       req->base.complete = priv->complete;
-       req->base.data = priv->data;
+
+       ahash_request_set_callback(req, priv->flags,
+                                  priv->complete, priv->data);
        req->priv = NULL;
 
        /* Free the req->priv.priv from the ADJUSTED request. */
        kzfree(priv);
 }
 
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
 {
        struct ahash_request_priv *priv = req->priv;
+       struct crypto_async_request oreq;
 
-       if (err == -EINPROGRESS)
-               return;
-
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+       oreq.data = priv->data;
 
-       ahash_restore_req(req);
+       priv->complete(&oreq, -EINPROGRESS);
 }
 
 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
        /*
         * Restore the original request, see ahash_op_unaligned() for what
         * goes where.
@@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
         */
 
        /* First copy req->result into req->priv.result */
-       ahash_op_unaligned_finish(areq, err);
+       ahash_restore_req(areq, err);
 
        /* Complete the ORIGINAL request. */
        areq->base.complete(&areq->base, err);
@@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req,
                return err;
 
        err = op(req);
-       ahash_op_unaligned_finish(req, err);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
+       ahash_restore_req(req, err);
 
        return err;
 }
@@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req)
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
+static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
 {
-       struct ahash_request_priv *priv = req->priv;
+       struct ahash_request *areq = req->data;
 
        if (err == -EINPROGRESS)
                return;
 
-       if (!err)
-               memcpy(priv->result, req->result,
-                      crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
-       ahash_restore_req(req);
-}
-
-static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
-{
-       struct ahash_request *areq = req->data;
-
-       ahash_def_finup_finish2(areq, err);
+       ahash_restore_req(areq, err);
 
        areq->base.complete(&areq->base, err);
 }
@@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
                goto out;
 
        req->base.complete = ahash_def_finup_done2;
-       req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = crypto_ahash_reqtfm(req)->final(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
 
 out:
-       ahash_def_finup_finish2(req, err);
+       ahash_restore_req(req, err);
        return err;
 }
 
@@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
 {
        struct ahash_request *areq = req->data;
 
+       if (err == -EINPROGRESS) {
+               ahash_notify_einprogress(areq);
+               return;
+       }
+
+       areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
        err = ahash_def_finup_finish1(areq, err);
+       if (areq->priv)
+               return;
 
        areq->base.complete(&areq->base, err);
 }
@@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req)
                return err;
 
        err = tfm->update(req);
+       if (err == -EINPROGRESS ||
+           (err == -EBUSY && (ahash_request_flags(req) &
+                              CRYPTO_TFM_REQ_MAY_BACKLOG)))
+               return err;
+
        return ahash_def_finup_finish1(req, err);
 }
 
index 5a805375865731f4cc7e94790362c208dd58d05b..ef59d9926ee99bccfbdc6077451cb95008d07651 100644 (file)
@@ -40,6 +40,7 @@ struct aead_async_req {
        struct aead_async_rsgl first_rsgl;
        struct list_head list;
        struct kiocb *iocb;
+       struct sock *sk;
        unsigned int tsgls;
        char iv[];
 };
@@ -379,12 +380,10 @@ unlock:
 
 static void aead_async_cb(struct crypto_async_request *_req, int err)
 {
-       struct sock *sk = _req->data;
-       struct alg_sock *ask = alg_sk(sk);
-       struct aead_ctx *ctx = ask->private;
-       struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req);
-       struct aead_request *req = aead_request_cast(_req);
+       struct aead_request *req = _req->data;
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct aead_async_req *areq = GET_ASYM_REQ(req, tfm);
+       struct sock *sk = areq->sk;
        struct scatterlist *sg = areq->tsgl;
        struct aead_async_rsgl *rsgl;
        struct kiocb *iocb = areq->iocb;
@@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg,
        memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl));
        INIT_LIST_HEAD(&areq->list);
        areq->iocb = msg->msg_iocb;
+       areq->sk = sk;
        memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm));
        aead_request_set_tfm(req, tfm);
        aead_request_set_ad(req, ctx->aead_assoclen);
        aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                 aead_async_cb, sk);
+                                 aead_async_cb, req);
        used -= ctx->aead_assoclen;
 
        /* take over all tx sgls from ctx */
index 3ea095adafd9af0067f695c4c70e2af702f70923..a8bfae4451bfcbfd26e25bbb39280818dcc139d4 100644 (file)
@@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index c976bfac29da526844f6a5578fea1455945c26f3..89ace5ebc2da88df3e049e89e1d2640b2e361e3d 100644 (file)
@@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
@@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        struct rctx *rctx;
 
        rctx = skcipher_request_ctx(req);
+
+       if (err == -EINPROGRESS) {
+               if (rctx->left != req->cryptlen)
+                       return;
+               goto out;
+       }
+
        subreq = &rctx->subreq;
        subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
 
@@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err)
        if (rctx->left)
                return;
 
+out:
        skcipher_request_complete(req, err);
 }
 
index c86bae7b1d0fc4d0fce5832a1f2716f59d91a7e6..ff096d9755b925d9f72105f42993ebcc7c0522e1 100644 (file)
@@ -421,10 +421,8 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
 
        ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
 
-       /*
-        * The absolute minimum resource template is one end_tag descriptor.
-        * However, we will treat a lone end_tag as just a simple buffer.
-        */
+       /* The absolute minimum resource template is one end_tag descriptor */
+
        if (aml_length < sizeof(struct aml_resource_end_tag)) {
                return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
        }
@@ -456,8 +454,9 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
                /* Invoke the user function */
 
                if (user_function) {
-                       status = user_function(aml, length, offset,
-                                              resource_index, context);
+                       status =
+                           user_function(aml, length, offset, resource_index,
+                                         context);
                        if (ACPI_FAILURE(status)) {
                                return_ACPI_STATUS(status);
                        }
@@ -481,12 +480,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
                                *context = aml;
                        }
 
-                       /* Check if buffer is defined to be longer than the resource length */
-
-                       if (aml_length > (offset + length)) {
-                               return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
-                       }
-
                        /* Normal exit */
 
                        return_ACPI_STATUS(AE_OK);
index fb19e1cdb6415ac8d1913e7017106911dcb7a7dd..edc8663b5db313dc83e47b58cdec33544f236f67 100644 (file)
@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
                return -ENODEV;
 
        /*
-        * If the device has a _HID (or _CID) returning a valid ACPI/PNP
-        * device ID, it is better to make it look less attractive here, so that
-        * the other device with the same _ADR value (that may not have a valid
-        * device ID) can be matched going forward.  [This means a second spec
-        * violation in a row, so whatever we do here is best effort anyway.]
+        * If the device has a _HID returning a valid ACPI/PNP device ID, it is
+        * better to make it look less attractive here, so that the other device
+        * with the same _ADR value (that may not have a valid device ID) can be
+        * matched going forward.  [This means a second spec violation in a row,
+        * so whatever we do here is best effort anyway.]
         */
-       return sta_present && list_empty(&adev->pnp.ids) ?
+       return sta_present && !adev->pnp.type.platform_id ?
                        FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
 }
 
index 662036bdc65eca8d531886cc658fd9829cc60c00..c8ea9d698cd0f30546d731df6429b3f556140a76 100644 (file)
@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
        const struct nfit_set_info_map *map0 = m0;
        const struct nfit_set_info_map *map1 = m1;
 
-       return map0->region_offset - map1->region_offset;
+       if (map0->region_offset < map1->region_offset)
+               return -1;
+       else if (map0->region_offset > map1->region_offset)
+               return 1;
+       return 0;
 }
 
 /* Retrieve the nth entry referencing this spa */
index 192691880d55c9499e1d77e68058a56d8e5318f3..2433569b02ef5cf40dd82cebec6fc83186f4dc3b 100644 (file)
@@ -1857,15 +1857,20 @@ static void acpi_bus_attach(struct acpi_device *device)
                return;
 
        device->flags.match_driver = true;
-       if (!ret) {
-               ret = device_attach(&device->dev);
-               if (ret < 0)
-                       return;
-
-               if (!ret && device->pnp.type.platform_id)
-                       acpi_default_enumeration(device);
+       if (ret > 0) {
+               acpi_device_set_enumerated(device);
+               goto ok;
        }
 
+       ret = device_attach(&device->dev);
+       if (ret < 0)
+               return;
+
+       if (ret > 0 || !device->pnp.type.platform_id)
+               acpi_device_set_enumerated(device);
+       else
+               acpi_default_enumeration(device);
+
  ok:
        list_for_each_entry(child, &device->children, node)
                acpi_bus_attach(child);
index 6c9aa95a9a050cc070ab222a382f5dabd55ec7ea..49d705c9f0f7b9c6b2ef2549769b6901438c2854 100644 (file)
@@ -278,11 +278,6 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        };
        const struct ata_port_info *ppi[] = { &info, &info };
 
-       /* SB600/700 don't have secondary port wired */
-       if ((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE) ||
-               (pdev->device == PCI_DEVICE_ID_ATI_IXP700_IDE))
-               ppi[1] = &ata_dummy_port_info;
-
        return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
                                      ATA_HOST_PARALLEL_SCAN);
 }
index 0636d84fbefe0acc889004b39e83c0137fc0f6a0..f3f538eec7b3bb85b682368ffb42496989c97263 100644 (file)
@@ -644,14 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id,
                pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
        }
 
-       /* enable IRQ on hotplug */
-       pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
-       if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
-               dev_dbg(&pdev->dev,
-                       "enabling SATA hotplug (0x%x)\n",
-                       (int) tmp8);
-               tmp8 |= SATA_HOTPLUG;
-               pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+       if (board_id == vt6421) {
+               /* enable IRQ on hotplug */
+               pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
+               if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
+                       dev_dbg(&pdev->dev,
+                               "enabling SATA hotplug (0x%x)\n",
+                               (int) tmp8);
+                       tmp8 |= SATA_HOTPLUG;
+                       pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
+               }
        }
 
        /*
index dceb5edd1e5455f4c1b101e8ad3ce4dba46ac22f..0c09d42561081ebb393398ae21a6900283d8f007 100644 (file)
@@ -523,7 +523,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
 
        cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
        if (size == PAGE_SIZE) {
-               copy_page(mem, cmem);
+               memcpy(mem, cmem, PAGE_SIZE);
        } else {
                struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp);
 
@@ -717,7 +717,7 @@ compress_again:
 
        if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
                src = kmap_atomic(page);
-               copy_page(cmem, src);
+               memcpy(cmem, src, PAGE_SIZE);
                kunmap_atomic(src);
        } else {
                memcpy(cmem, src, clen);
@@ -928,7 +928,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
        }
 
        index = sector >> SECTORS_PER_PAGE_SHIFT;
-       offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
+       offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
 
        bv.bv_page = page;
        bv.bv_len = PAGE_SIZE;
index 6d9cc2d39d22306fd68f30bac6f4a60e6cfa5a87..7e4a9d1296bb7fb666f6b37ced2757a8585b7d75 100644 (file)
@@ -60,6 +60,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 #endif
 
 #ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+       return devmem_is_allowed(pfn);
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
        u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -75,6 +79,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        return 1;
 }
 #else
+static inline int page_is_allowed(unsigned long pfn)
+{
+       return 1;
+}
 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 {
        return 1;
@@ -122,23 +130,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
 
        while (count > 0) {
                unsigned long remaining;
+               int allowed;
 
                sz = size_inside_page(p, count);
 
-               if (!range_is_allowed(p >> PAGE_SHIFT, count))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
+               if (allowed == 2) {
+                       /* Show zeros for restricted memory. */
+                       remaining = clear_user(buf, sz);
+               } else {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr)
+                               return -EFAULT;
 
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr)
-                       return -EFAULT;
+                       remaining = copy_to_user(buf, ptr, sz);
+
+                       unxlate_dev_mem_ptr(p, ptr);
+               }
 
-               remaining = copy_to_user(buf, ptr, sz);
-               unxlate_dev_mem_ptr(p, ptr);
                if (remaining)
                        return -EFAULT;
 
@@ -181,30 +197,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
 #endif
 
        while (count > 0) {
+               int allowed;
+
                sz = size_inside_page(p, count);
 
-               if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+               allowed = page_is_allowed(p >> PAGE_SHIFT);
+               if (!allowed)
                        return -EPERM;
 
-               /*
-                * On ia64 if a page has been mapped somewhere as uncached, then
-                * it must also be accessed uncached by the kernel or data
-                * corruption may occur.
-                */
-               ptr = xlate_dev_mem_ptr(p);
-               if (!ptr) {
-                       if (written)
-                               break;
-                       return -EFAULT;
-               }
+               /* Skip actual writing when a page is marked as restricted. */
+               if (allowed == 1) {
+                       /*
+                        * On ia64 if a page has been mapped somewhere as
+                        * uncached, then it must also be accessed uncached
+                        * by the kernel or data corruption may occur.
+                        */
+                       ptr = xlate_dev_mem_ptr(p);
+                       if (!ptr) {
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
 
-               copied = copy_from_user(ptr, buf, sz);
-               unxlate_dev_mem_ptr(p, ptr);
-               if (copied) {
-                       written += sz - copied;
-                       if (written)
-                               break;
-                       return -EFAULT;
+                       copied = copy_from_user(ptr, buf, sz);
+                       unxlate_dev_mem_ptr(p, ptr);
+                       if (copied) {
+                               written += sz - copied;
+                               if (written)
+                                       break;
+                               return -EFAULT;
+                       }
                }
 
                buf += sz;
index e9b7e0b3cabe60d3be3ab8a092159b137854d8ec..87fe111d0be6b03ec8157f0a688046d490d7e887 100644 (file)
@@ -2202,14 +2202,16 @@ static int virtcons_freeze(struct virtio_device *vdev)
 
        vdev->config->reset(vdev);
 
-       virtqueue_disable_cb(portdev->c_ivq);
+       if (use_multiport(portdev))
+               virtqueue_disable_cb(portdev->c_ivq);
        cancel_work_sync(&portdev->control_work);
        cancel_work_sync(&portdev->config_work);
        /*
         * Once more: if control_work_handler() was running, it would
         * enable the cb as the last step.
         */
-       virtqueue_disable_cb(portdev->c_ivq);
+       if (use_multiport(portdev))
+               virtqueue_disable_cb(portdev->c_ivq);
        remove_controlq_data(portdev);
 
        list_for_each_entry(port, &portdev->ports, list) {
index bc96d423781aa8a300725f8fbe0a052be12cd4b5..0e3f6496524d92c7c1717d8d2259684952d7acb8 100644 (file)
@@ -2398,6 +2398,20 @@ EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
  *********************************************************************/
 static enum cpuhp_state hp_online;
 
+static int cpuhp_cpufreq_online(unsigned int cpu)
+{
+       cpufreq_online(cpu);
+
+       return 0;
+}
+
+static int cpuhp_cpufreq_offline(unsigned int cpu)
+{
+       cpufreq_offline(cpu);
+
+       return 0;
+}
+
 /**
  * cpufreq_register_driver - register a CPU Frequency driver
  * @driver_data: A struct cpufreq_driver containing the values#
@@ -2460,8 +2474,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
        }
 
        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
-                                       cpufreq_online,
-                                       cpufreq_offline);
+                                       cpuhp_cpufreq_online,
+                                       cpuhp_cpufreq_offline);
        if (ret < 0)
                goto err_if_unreg;
        hp_online = ret;
index 32100c4851dd4bd94b9b5b6814558812d8b13629..49cbdcba7883072decb2683857ebb8da856cd68c 100644 (file)
@@ -506,7 +506,7 @@ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
        ctx->dev = caam_jr_alloc();
 
        if (IS_ERR(ctx->dev)) {
-               dev_err(ctx->dev, "Job Ring Device allocation for transform failed\n");
+               pr_err("Job Ring Device allocation for transform failed\n");
                return PTR_ERR(ctx->dev);
        }
 
index fef39f9f41ee200c5ed7138edae76beaefdff3cb..5d7f73d60515effd80861603cb183595ea7fbb85 100644 (file)
@@ -281,7 +281,8 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
                        /* Try to run it through DECO0 */
                        ret = run_descriptor_deco0(ctrldev, desc, &status);
 
-                       if (ret || status) {
+                       if (ret ||
+                           (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
                                dev_err(ctrldev,
                                        "Failed to deinstantiate RNG4 SH%d\n",
                                        sh_idx);
@@ -301,15 +302,13 @@ static int caam_remove(struct platform_device *pdev)
        struct device *ctrldev;
        struct caam_drv_private *ctrlpriv;
        struct caam_ctrl __iomem *ctrl;
-       int ring;
 
        ctrldev = &pdev->dev;
        ctrlpriv = dev_get_drvdata(ctrldev);
        ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
 
-       /* Remove platform devices for JobRs */
-       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++)
-               of_device_unregister(ctrlpriv->jrpdev[ring]);
+       /* Remove platform devices under the crypto node */
+       of_platform_depopulate(ctrldev);
 
        /* De-initialize RNG state handles initialized by this driver. */
        if (ctrlpriv->rng4_sh_init)
@@ -418,10 +417,21 @@ DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n");
 DEFINE_SIMPLE_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n");
 #endif
 
+static const struct of_device_id caam_match[] = {
+       {
+               .compatible = "fsl,sec-v4.0",
+       },
+       {
+               .compatible = "fsl,sec4.0",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, caam_match);
+
 /* Probe routine for CAAM top (controller) level */
 static int caam_probe(struct platform_device *pdev)
 {
-       int ret, ring, ridx, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
+       int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
        u64 caam_id;
        struct device *dev;
        struct device_node *nprop, *np;
@@ -597,47 +607,24 @@ static int caam_probe(struct platform_device *pdev)
                goto iounmap_ctrl;
        }
 
-       /*
-        * Detect and enable JobRs
-        * First, find out how many ring spec'ed, allocate references
-        * for all, then go probe each one.
-        */
-       rspec = 0;
-       for_each_available_child_of_node(nprop, np)
-               if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
-                   of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
-                       rspec++;
-
-       ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
-                                       sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
-       if (ctrlpriv->jrpdev == NULL) {
-               ret = -ENOMEM;
+       ret = of_platform_populate(nprop, caam_match, NULL, dev);
+       if (ret) {
+               dev_err(dev, "JR platform devices creation error\n");
                goto iounmap_ctrl;
        }
 
        ring = 0;
-       ridx = 0;
-       ctrlpriv->total_jobrs = 0;
        for_each_available_child_of_node(nprop, np)
                if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
                    of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
-                       ctrlpriv->jrpdev[ring] =
-                               of_platform_device_create(np, NULL, dev);
-                       if (!ctrlpriv->jrpdev[ring]) {
-                               pr_warn("JR physical index %d: Platform device creation error\n",
-                                       ridx);
-                               ridx++;
-                               continue;
-                       }
                        ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
                                             ((__force uint8_t *)ctrl +
-                                            (ridx + JR_BLOCK_NUMBER) *
+                                            (ring + JR_BLOCK_NUMBER) *
                                              BLOCK_OFFSET
                                             );
                        ctrlpriv->total_jobrs++;
                        ring++;
-                       ridx++;
-       }
+               }
 
        /* Check to see if QI present. If so, enable */
        ctrlpriv->qi_present =
@@ -847,17 +834,6 @@ disable_caam_ipg:
        return ret;
 }
 
-static struct of_device_id caam_match[] = {
-       {
-               .compatible = "fsl,sec-v4.0",
-       },
-       {
-               .compatible = "fsl,sec4.0",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, caam_match);
-
 static struct platform_driver caam_driver = {
        .driver = {
                .name = "caam",
index e2bcacc1a921675cf30f70a40816e1306a8c3ef9..dbed8baeebe5d4765b741f562b99df206bc4b47b 100644 (file)
@@ -66,7 +66,6 @@ struct caam_drv_private_jr {
 struct caam_drv_private {
 
        struct device *dev;
-       struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
        struct platform_device *pdev;
 
        /* Physical-presence section */
index 3e2ab3b14eea205f19e8b436291e5117cec9567d..9e95bf94eb13ff2e830905694e0ce4c045fc76b1 100644 (file)
@@ -2,6 +2,7 @@ menuconfig DEV_DAX
        tristate "DAX: direct access to differentiated memory"
        default m if NVDIMM_DAX
        depends on TRANSPARENT_HUGEPAGE
+       select SRCU
        help
          Support raw access to differentiated (persistence, bandwidth,
          latency...) memory via an mmap(2) capable character
index 80c6db279ae10cb8558b2e90a91a4c4dafa917e0..806f180c80d816b313319f960479a7ad2848c672 100644 (file)
@@ -25,6 +25,7 @@
 #include "dax.h"
 
 static dev_t dax_devt;
+DEFINE_STATIC_SRCU(dax_srcu);
 static struct class *dax_class;
 static DEFINE_IDA(dax_minor_ida);
 static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -60,7 +61,7 @@ struct dax_region {
  * @region - parent region
  * @dev - device backing the character device
  * @cdev - core chardev data
- * @alive - !alive + rcu grace period == no new mappings can be established
+ * @alive - !alive + srcu grace period == no new mappings can be established
  * @id - child id in the region
  * @num_resources - number of physical address extents in this device
  * @res - array of physical address ranges
@@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
 static int dax_dev_huge_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size)
 {
-       int rc;
+       int rc, id;
        struct file *filp = vmf->vma->vm_file;
        struct dax_dev *dax_dev = filp->private_data;
 
@@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
                        ? "write" : "read",
                        vmf->vma->vm_start, vmf->vma->vm_end);
 
-       rcu_read_lock();
+       id = srcu_read_lock(&dax_srcu);
        switch (pe_size) {
        case PE_SIZE_PTE:
                rc = __dax_dev_pte_fault(dax_dev, vmf);
@@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
        default:
                return VM_FAULT_FALLBACK;
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&dax_srcu, id);
 
        return rc;
 }
@@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
         * Note, rcu is not protecting the liveness of dax_dev, rcu is
         * ensuring that any fault handlers that might have seen
         * dax_dev->alive == true, have completed.  Any fault handlers
-        * that start after synchronize_rcu() has started will abort
+        * that start after synchronize_srcu() has started will abort
         * upon seeing dax_dev->alive == false.
         */
        dax_dev->alive = false;
-       synchronize_rcu();
+       synchronize_srcu(&dax_srcu);
        unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
        cdev_del(cdev);
        device_unregister(dev);
index 932742e4cf23147e304d6df2a1c8f37c8f89d296..24c461dea7afb146a509e097b581aa2fdaede132 100644 (file)
@@ -149,7 +149,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
 
                status = __gop_query32(sys_table_arg, gop32, &info, &size,
                                       &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+                   info->pixel_format != PIXEL_BLT_ONLY) {
                        /*
                         * Systems that use the UEFI Console Splitter may
                         * provide multiple GOP devices, not all of which are
@@ -266,7 +267,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
 
                status = __gop_query64(sys_table_arg, gop64, &info, &size,
                                       &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+               if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
+                   info->pixel_format != PIXEL_BLT_ONLY) {
                        /*
                         * Systems that use the UEFI Console Splitter may
                         * provide multiple GOP devices, not all of which are
index 9b37a3692b3feed18578c39f4acc22ba03182e8c..2bd683e2be022dd2c9a2b5c6cd7285d4c68e69eb 100644 (file)
@@ -266,6 +266,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
                goto fail_free_event;
        }
 
+       if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
+               enable_irq_wake(irq);
+
        list_add_tail(&event->node, &acpi_gpio->events);
        return AE_OK;
 
@@ -339,6 +342,9 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
+               if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
+                       disable_irq_wake(event->irq);
+
                free_irq(event->irq, event);
                desc = event->desc;
                if (WARN_ON(IS_ERR(desc)))
@@ -571,8 +577,10 @@ struct gpio_desc *acpi_find_gpio(struct device *dev,
                }
 
                desc = acpi_get_gpiod_by_index(adev, propname, idx, &info);
-               if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER))
+               if (!IS_ERR(desc))
                        break;
+               if (PTR_ERR(desc) == -EPROBE_DEFER)
+                       return ERR_CAST(desc);
        }
 
        /* Then from plain _CRS GPIOs */
index da48819ff2e6550c0a7d6206569d85e8a880c0c5..b78d9239e48fb0fc3b02129fe97795a6b94bed70 100644 (file)
@@ -1317,7 +1317,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        if (!fence) {
                event_free(gpu, event);
                ret = -ENOMEM;
-               goto out_pm_put;
+               goto out_unlock;
        }
 
        gpu->event[event].fence = fence;
@@ -1357,6 +1357,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        hangcheck_timer_reset(gpu);
        ret = 0;
 
+out_unlock:
        mutex_unlock(&gpu->lock);
 
 out_pm_put:
index b7d7721e72faddc2a2d4fc76d69d795b8053cfad..40af17ec6312533d4080cc1581faa4683a0405b9 100644 (file)
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
 {
        int ret;
 
-       if (vgpu->failsafe)
-               return 0;
-
        if (WARN_ON(bytes > 4))
                return -EINVAL;
 
index f1f426a97aa9d43826010d7be90f6bffdbe59426..d186c157f65fefe3c64b45b86f0ffa6ab824df2f 100644 (file)
@@ -775,7 +775,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
                        _EL_OFFSET_STATUS_PTR);
 
        ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
-       ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+       ctx_status_ptr.read_ptr = 0;
+       ctx_status_ptr.write_ptr = 0x7;
        vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
 }
 
index 933a7c211a1c29ab77357119e37b0de2bb3dd521..dce8d15f706f58b4cf1019ddc1d5b609c903980c 100644 (file)
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
        struct gvt_firmware_header *h;
        void *firmware;
        void *p;
-       unsigned long size;
+       unsigned long size, crc32_start;
        int i;
        int ret;
 
-       size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+       size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
        firmware = vzalloc(size);
        if (!firmware)
                return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
 
        memcpy(gvt->firmware.mmio, p, info->mmio_size);
 
+       crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+       h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
+
        firmware_attr.size = size;
        firmware_attr.private = firmware;
 
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
 
        firmware->mmio = mem;
 
-       sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+       sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
                 GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
                 pdev->revision);
 
index 3b9d59e457ba7dbf2a1baffff7cd4f4c9aa75f3f..ef3baa0c4754566319a4706d50a77d2e1c6e2255 100644 (file)
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
        .vgpu_create = intel_gvt_create_vgpu,
        .vgpu_destroy = intel_gvt_destroy_vgpu,
        .vgpu_reset = intel_gvt_reset_vgpu,
+       .vgpu_activate = intel_gvt_activate_vgpu,
+       .vgpu_deactivate = intel_gvt_deactivate_vgpu,
 };
 
 /**
index 6dfc48b63b718b4c4e6f5c62794db5ce279b18a4..becae2fa3b29d9956cf69469968c6a1c7c74b770 100644 (file)
@@ -382,7 +382,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                                 unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
-
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
 
 /* validating GM functions */
 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +450,8 @@ struct intel_gvt_ops {
                                struct intel_vgpu_type *);
        void (*vgpu_destroy)(struct intel_vgpu *);
        void (*vgpu_reset)(struct intel_vgpu *);
+       void (*vgpu_activate)(struct intel_vgpu *);
+       void (*vgpu_deactivate)(struct intel_vgpu *);
 };
 
 
index d641214578a7dc6631e866bbc91c2d38f3e95a76..e466259034e24b2c62b82265978298c3500b79c5 100644 (file)
@@ -544,6 +544,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
        if (ret)
                goto undo_group;
 
+       intel_gvt_ops->vgpu_activate(vgpu);
+
        atomic_set(&vgpu->vdev.released, 0);
        return ret;
 
@@ -569,6 +571,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
        if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
                return;
 
+       intel_gvt_ops->vgpu_deactivate(vgpu);
+
        ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
                                        &vgpu->vdev.iommu_notifier);
        WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -1340,13 +1344,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
 
 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
 {
-       struct intel_vgpu *vgpu = info->vgpu;
-
-       if (!info) {
-               gvt_vgpu_err("kvmgt_guest_info invalid\n");
-               return false;
-       }
-
        kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
        kvm_put_kvm(info->kvm);
        kvmgt_protect_table_destroy(info);
index 41cfa5ccae84ce4020c6b2ff4a051ce8e17f6298..649ef280cc9a5bc10f4bebdc2f43c27e5249bd7f 100644 (file)
@@ -72,7 +72,7 @@ static struct {
        char *name;
 } vgpu_types[] = {
 /* Fixed vGPU type table */
-       { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
+       { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, GVT_EDID_1024_768, "8" },
        { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
        { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
        { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
@@ -179,20 +179,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
 }
 
 /**
- * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * intel_gvt_active_vgpu - activate a virtual GPU
  * @vgpu: virtual GPU
  *
- * This function is called when user wants to destroy a virtual GPU.
+ * This function is called when user wants to activate a virtual GPU.
  *
  */
-void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
+{
+       mutex_lock(&vgpu->gvt->lock);
+       vgpu->active = true;
+       mutex_unlock(&vgpu->gvt->lock);
+}
+
+/**
+ * intel_gvt_deactive_vgpu - deactivate a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to deactivate a virtual GPU.
+ * All virtual GPU runtime information will be destroyed.
+ *
+ */
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
 
        mutex_lock(&gvt->lock);
 
        vgpu->active = false;
-       idr_remove(&gvt->vgpu_idr, vgpu->id);
 
        if (atomic_read(&vgpu->running_workload_num)) {
                mutex_unlock(&gvt->lock);
@@ -201,6 +215,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        }
 
        intel_vgpu_stop_schedule(vgpu);
+
+       mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+
+       mutex_lock(&gvt->lock);
+
+       WARN(vgpu->active, "vGPU is still active!\n");
+
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
        intel_vgpu_clean_sched_policy(vgpu);
        intel_vgpu_clean_gvt_context(vgpu);
        intel_vgpu_clean_execlist(vgpu);
@@ -277,7 +311,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_shadow_ctx;
 
-       vgpu->active = true;
        mutex_unlock(&gvt->lock);
 
        return vgpu;
index 1c75402a59c1377e7abd410f2dc58dcec0df7094..5c089b3c2a7efdb29343de77a43dfc1e1f720057 100644 (file)
@@ -1434,8 +1434,6 @@ static int i915_drm_suspend(struct drm_device *dev)
                goto out;
        }
 
-       intel_guc_suspend(dev_priv);
-
        intel_display_suspend(dev);
 
        intel_dp_mst_suspend(dev);
index 1e53c31b6826ec996b2d153e1dd32232b77dd9d7..46fcd8b7080aafca8d589ca25ef6d57a9dc27a48 100644 (file)
@@ -806,6 +806,7 @@ struct intel_csr {
        func(has_resource_streamer); \
        func(has_runtime_pm); \
        func(has_snoop); \
+       func(unfenced_needs_alignment); \
        func(cursor_needs_physical); \
        func(hws_needs_physical); \
        func(overlay_needs_physical); \
index 67b1fc5a03313b80bc9459543b8dec3743ea953b..fe531f90406241bfa2cfc89e6155f1ef25802404 100644 (file)
@@ -4348,6 +4348,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
+       intel_guc_suspend(dev_priv);
+
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        cancel_delayed_work_sync(&dev_priv->gt.retire_work);
 
index 30e0675fd7dab7949d3cb3cb498be852d7448ac9..15a15d00a6bfa07cbe93ac2669cefaee3cb5ed2c 100644 (file)
@@ -888,6 +888,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
        struct list_head ordered_vmas;
        struct list_head pinned_vmas;
        bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
+       bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
        int retry;
 
        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -908,7 +909,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
                if (!has_fenced_gpu_access)
                        entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
-                       entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+                       (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+                        needs_unfenced_map) &&
                        i915_gem_object_is_tiled(obj);
                need_mappable = need_fence || need_reloc_mappable(vma);
 
index 2801a4d5632491787009c4ae62130dc732215136..96e45a4d54410b085191e09ab33c6f4ceca0100a 100644 (file)
@@ -2704,7 +2704,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
        if (unlikely(ggtt->do_idle_maps)) {
-               if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+               if (i915_gem_wait_for_idle(dev_priv, 0)) {
                        DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index e7c3c0318ff60f2bf60b3c5afce405d11ce54a5c..da70bfe97ec5843adbdac276e5c86fa608266087 100644 (file)
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
 {
+       /* The timeline struct (as part of the ppgtt underneath a context)
+        * may be freed when the request is no longer in use by the GPU.
+        * We could extend the life of a context to beyond that of all
+        * fences, possibly keeping the hw resource around indefinitely,
+        * or we just give them a false name. Since
+        * dma_fence_ops.get_timeline_name is a debug feature, the occasional
+        * lie seems justifiable.
+        */
+       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+               return "signaled";
+
        return to_request(fence)->timeline->common->name;
 }
 
index d5d2b4c6ed382d687719a088d943580ccacbba15..70b3832a79dd40d066a6f2deb34cbc6d436559e1 100644 (file)
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
        BUG();
 }
 
+static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
+{
+       if (!unlock)
+               return;
+
+       mutex_unlock(&dev->struct_mutex);
+
+       /* expedite the RCU grace period to free some request slabs */
+       synchronize_rcu_expedited();
+}
+
 static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                intel_runtime_pm_put(dev_priv);
 
        i915_gem_retire_requests(dev_priv);
-       if (unlock)
-               mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       /* expedite the RCU grace period to free some request slabs */
-       synchronize_rcu_expedited();
+       i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
 
        return count;
 }
@@ -293,8 +301,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+       i915_gem_shrinker_unlock(dev, unlock);
 
        return count;
 }
@@ -321,8 +328,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                                         sc->nr_to_scan - freed,
                                         I915_SHRINK_BOUND |
                                         I915_SHRINK_UNBOUND);
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+
+       i915_gem_shrinker_unlock(dev, unlock);
 
        return freed;
 }
@@ -364,8 +371,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
                                         struct shrinker_lock_uninterruptible *slu)
 {
        dev_priv->mm.interruptible = slu->was_interruptible;
-       if (slu->unlock)
-               mutex_unlock(&dev_priv->drm.struct_mutex);
+       i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
 }
 
 static int
index ecb487b5356fe68696b19d3054dc61339a61f406..9bbbd4e83e3c5d99cb4cdc9d4422f586159826bf 100644 (file)
@@ -60,6 +60,7 @@
        .has_overlay = 1, .overlay_needs_physical = 1, \
        .has_gmch_display = 1, \
        .hws_needs_physical = 1, \
+       .unfenced_needs_alignment = 1, \
        .ring_mask = RENDER_RING, \
        GEN_DEFAULT_PIPEOFFSETS, \
        CURSOR_OFFSETS
@@ -101,6 +102,7 @@ static const struct intel_device_info intel_i915g_info = {
        .platform = INTEL_I915G, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i915gm_info = {
@@ -112,6 +114,7 @@ static const struct intel_device_info intel_i915gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i945g_info = {
@@ -120,6 +123,7 @@ static const struct intel_device_info intel_i945g_info = {
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_i945gm_info = {
@@ -130,6 +134,7 @@ static const struct intel_device_info intel_i945gm_info = {
        .supports_tv = 1,
        .has_fbc = 1,
        .hws_needs_physical = 1,
+       .unfenced_needs_alignment = 1,
 };
 
 static const struct intel_device_info intel_g33_info = {
index a1b7eec58be2742e6d94e5566b09fbb61e54df9d..70964ca9251e04939225c87a773e9a6bf760b1ac 100644 (file)
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
         */
        if (WARN_ON(stream->sample_flags != props->sample_flags)) {
                ret = -ENODEV;
-               goto err_alloc;
+               goto err_flags;
        }
 
        list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
 
 err_open:
        list_del(&stream->link);
+err_flags:
        if (stream->ops->destroy)
                stream->ops->destroy(stream);
 err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
                if (ret)
                        return ret;
 
+               if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
+                       DRM_DEBUG("Unknown i915 perf property ID\n");
+                       return -EINVAL;
+               }
+
                switch ((enum drm_i915_perf_property_id)id) {
                case DRM_I915_PERF_PROP_CTX_HANDLE:
                        props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
                        props->oa_periodic = true;
                        props->oa_period_exponent = value;
                        break;
-               default:
+               case DRM_I915_PERF_PROP_MAX:
                        MISSING_CASE(id);
-                       DRM_DEBUG("Unknown i915 perf property ID\n");
                        return -EINVAL;
                }
 
index 471af3b480adc38a3a48c27d1999805836cf5630..47517a02f0a439125b3b3a769e6848a4c4928ca2 100644 (file)
@@ -670,15 +670,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
 static struct intel_engine_cs *
 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 {
-       struct intel_engine_cs *engine;
+       struct intel_engine_cs *engine =
+               container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+
+       GEM_BUG_ON(!locked);
 
-       engine = container_of(pt,
-                             struct drm_i915_gem_request,
-                             priotree)->engine;
        if (engine != locked) {
-               if (locked)
-                       spin_unlock_irq(&locked->timeline->lock);
-               spin_lock_irq(&engine->timeline->lock);
+               spin_unlock(&locked->timeline->lock);
+               spin_lock(&engine->timeline->lock);
        }
 
        return engine;
@@ -686,7 +685,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 
 static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
 {
-       struct intel_engine_cs *engine = NULL;
+       struct intel_engine_cs *engine;
        struct i915_dependency *dep, *p;
        struct i915_dependency stack;
        LIST_HEAD(dfs);
@@ -720,26 +719,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
        list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
                struct i915_priotree *pt = dep->signaler;
 
-               list_for_each_entry(p, &pt->signalers_list, signal_link)
+               /* Within an engine, there can be no cycle, but we may
+                * refer to the same dependency chain multiple times
+                * (redundant dependencies are not eliminated) and across
+                * engines.
+                */
+               list_for_each_entry(p, &pt->signalers_list, signal_link) {
+                       GEM_BUG_ON(p->signaler->priority < pt->priority);
                        if (prio > READ_ONCE(p->signaler->priority))
                                list_move_tail(&p->dfs_link, &dfs);
+               }
 
                list_safe_reset_next(dep, p, dfs_link);
-               if (!RB_EMPTY_NODE(&pt->node))
-                       continue;
-
-               engine = pt_lock_engine(pt, engine);
-
-               /* If it is not already in the rbtree, we can update the
-                * priority inplace and skip over it (and its dependencies)
-                * if it is referenced *again* as we descend the dfs.
-                */
-               if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
-                       pt->priority = prio;
-                       list_del_init(&dep->dfs_link);
-               }
        }
 
+       engine = request->engine;
+       spin_lock_irq(&engine->timeline->lock);
+
        /* Fifo and depth-first replacement ensure our deps execute before us */
        list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
                struct i915_priotree *pt = dep->signaler;
@@ -751,16 +747,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
                if (prio <= pt->priority)
                        continue;
 
-               GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
-
                pt->priority = prio;
-               rb_erase(&pt->node, &engine->execlist_queue);
-               if (insert_request(pt, &engine->execlist_queue))
-                       engine->execlist_first = &pt->node;
+               if (!RB_EMPTY_NODE(&pt->node)) {
+                       rb_erase(&pt->node, &engine->execlist_queue);
+                       if (insert_request(pt, &engine->execlist_queue))
+                               engine->execlist_first = &pt->node;
+               }
        }
 
-       if (engine)
-               spin_unlock_irq(&engine->timeline->lock);
+       spin_unlock_irq(&engine->timeline->lock);
 
        /* XXX Do we need to preempt to make room for us and our deps? */
 }
@@ -1440,7 +1435,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
        GEM_BUG_ON(request->ctx != port[0].request->ctx);
 
        /* Reset WaIdleLiteRestore:bdw,skl as well */
-       request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
+       request->tail =
+               intel_ring_wrap(request->ring,
+                               request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
 }
 
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
index 13dccb18cd43ed85aee58a6a15618c515094a83b..8cb2078c5bfc4abc7aeaa7fe51974266edcc8016 100644 (file)
@@ -521,11 +521,17 @@ static inline void intel_ring_advance(struct intel_ring *ring)
         */
 }
 
+static inline u32
+intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+       return pos & (ring->size - 1);
+}
+
 static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
 {
        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
        u32 offset = addr - ring->vaddr;
-       return offset & (ring->size - 1);
+       return intel_ring_wrap(ring, offset);
 }
 
 int __intel_ring_space(int head, int tail, int size);
index 4414cf73735d26ccb655756327af8226f539f4f5..36602ac7e24835fb9350b3040037524f4c95b7d1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
        }
 
        if (a5xx_gpu->gpmu_bo) {
-               if (a5xx_gpu->gpmu_bo)
+               if (a5xx_gpu->gpmu_iova)
                        msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
                drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
        }
@@ -860,7 +860,9 @@ static const struct adreno_gpu_funcs funcs = {
                .idle = a5xx_idle,
                .irq = a5xx_irq,
                .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
                .show = a5xx_show,
+#endif
        },
        .get_timestamp = a5xx_get_timestamp,
 };
index c9bd1e6225f4f96e3b8a79a101508f244de17baf..5ae65426b4e5593c5f88b19627c10b403995ed73 100644 (file)
@@ -418,18 +418,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
        return 0;
 }
 
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
 {
-       if (gpu->memptrs_bo) {
-               if (gpu->memptrs)
-                       msm_gem_put_vaddr(gpu->memptrs_bo);
+       struct msm_gpu *gpu = &adreno_gpu->base;
+
+       if (adreno_gpu->memptrs_bo) {
+               if (adreno_gpu->memptrs)
+                       msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
+
+               if (adreno_gpu->memptrs_iova)
+                       msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+
+               drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
+       }
+       release_firmware(adreno_gpu->pm4);
+       release_firmware(adreno_gpu->pfp);
 
-               if (gpu->memptrs_iova)
-                       msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+       msm_gpu_cleanup(gpu);
 
-               drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+       if (gpu->aspace) {
+               gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+                       iommu_ports, ARRAY_SIZE(iommu_ports));
+               msm_gem_address_space_destroy(gpu->aspace);
        }
-       release_firmware(gpu->pm4);
-       release_firmware(gpu->pfp);
-       msm_gpu_cleanup(&gpu->base);
 }
index 921270ea6059debadab1a785ed19ca79b064811c..a879ffa534b4d845007d3f974f9ce9c7df69dff0 100644 (file)
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
                        }
                }
        } else {
-               msm_dsi_host_reset_phy(mdsi->host);
+               msm_dsi_host_reset_phy(msm_dsi->host);
                ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
                if (ret)
                        return ret;
index a54d3bb5baad9c01047a582ad3cad98a9efb7f66..8177e8511afd8c6827b1a731688e159da652a8d2 100644 (file)
 #include <linux/hdmi.h>
 #include "hdmi.h"
 
-
-/* Supported HDMI Audio channels */
-#define MSM_HDMI_AUDIO_CHANNEL_2               0
-#define MSM_HDMI_AUDIO_CHANNEL_4               1
-#define MSM_HDMI_AUDIO_CHANNEL_6               2
-#define MSM_HDMI_AUDIO_CHANNEL_8               3
-
 /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
 static int nchannels[] = { 2, 4, 6, 8 };
 
index 611da7a660c9426ed6c165ae90c8257f7a777360..238901987e00b0c1bc74979647a6cfadcfee084a 100644 (file)
@@ -18,7 +18,8 @@
 #ifndef __MDP5_PIPE_H__
 #define __MDP5_PIPE_H__
 
-#define SSPP_MAX       (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX       (SSPP_CURSOR1 + 1)
 
 /* represents a hw pipe, which is dynamically assigned to a plane */
 struct mdp5_hw_pipe {
index 59811f29607de60f2a22d53f9ec969e62bb39d98..68e509b3b9e4d08730e3901f46a397519c33e77c 100644 (file)
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 
        size = PAGE_ALIGN(size);
 
+       /* Disallow zero sized objects as they make the underlying
+        * infrastructure grumpy
+        */
+       if (size == 0)
+               return ERR_PTR(-EINVAL);
+
        ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
        if (ret)
                goto fail;
index 99e05aacbee181f4341625af8abbc91ad284f381..af5b6ba4095b06f6a24069f62e178618b547209e 100644 (file)
@@ -706,9 +706,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
                msm_ringbuffer_destroy(gpu->rb);
        }
 
-       if (gpu->aspace)
-               msm_gem_address_space_destroy(gpu->aspace);
-
        if (gpu->fctx)
                msm_fence_context_free(gpu->fctx);
 }
index 0b4440ffbeae21a3d33e67ed1c727e00cf3884b3..a9182d5e60117321a1a354b9b97288d11cc13cf4 100644 (file)
@@ -995,7 +995,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
 {
        struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
        __drm_atomic_helper_plane_destroy_state(&asyw->state);
-       dma_fence_put(asyw->state.fence);
        kfree(asyw);
 }
 
@@ -1007,7 +1006,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
        if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
                return NULL;
        __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
-       asyw->state.fence = NULL;
        asyw->interval = 1;
        asyw->sema = armw->sema;
        asyw->ntfy = armw->ntfy;
@@ -2036,6 +2034,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        u32 vbackp  = (mode->vtotal - mode->vsync_end) * vscan / ilace;
        u32 hfrontp =  mode->hsync_start - mode->hdisplay;
        u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+       u32 blankus;
        struct nv50_head_mode *m = &asyh->mode;
 
        m->h.active = mode->htotal;
@@ -2049,9 +2048,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
        m->v.blanks = m->v.active - vfrontp - 1;
 
        /*XXX: Safe underestimate, even "0" works */
-       m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
-       m->v.blankus *= 1000;
-       m->v.blankus /= mode->clock;
+       blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+       blankus *= 1000;
+       blankus /= mode->clock;
+       m->v.blankus = blankus;
 
        if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
                m->v.blank2e =  m->v.active + m->v.synce + vbackp;
index 273562dd6bbdb1a138c2e73417fa0e071716749b..3b86a73995672220b5e71b0f39898129782abf28 100644 (file)
@@ -714,7 +714,7 @@ nv4a_chipset = {
        .i2c = nv04_i2c_new,
        .imem = nv40_instmem_new,
        .mc = nv44_mc_new,
-       .mmu = nv44_mmu_new,
+       .mmu = nv04_mmu_new,
        .pci = nv40_pci_new,
        .therm = nv40_therm_new,
        .timer = nv41_timer_new,
@@ -2271,6 +2271,35 @@ nv136_chipset = {
        .fifo = gp100_fifo_new,
 };
 
+static const struct nvkm_device_chip
+nv137_chipset = {
+       .name = "GP107",
+       .bar = gf100_bar_new,
+       .bios = nvkm_bios_new,
+       .bus = gf100_bus_new,
+       .devinit = gm200_devinit_new,
+       .fb = gp102_fb_new,
+       .fuse = gm107_fuse_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .ltc = gp100_ltc_new,
+       .mc = gp100_mc_new,
+       .mmu = gf100_mmu_new,
+       .pci = gp100_pci_new,
+       .pmu = gp102_pmu_new,
+       .timer = gk20a_timer_new,
+       .top = gk104_top_new,
+       .ce[0] = gp102_ce_new,
+       .ce[1] = gp102_ce_new,
+       .ce[2] = gp102_ce_new,
+       .ce[3] = gp102_ce_new,
+       .disp = gp102_disp_new,
+       .dma = gf119_dma_new,
+       .fifo = gp100_fifo_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -2708,6 +2737,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x132: device->chip = &nv132_chipset; break;
                case 0x134: device->chip = &nv134_chipset; break;
                case 0x136: device->chip = &nv136_chipset; break;
+               case 0x137: device->chip = &nv137_chipset; break;
                default:
                        nvdev_error(device, "unknown chipset (%08x)\n", boot0);
                        goto done;
index 003ac915eaadad44c5b0ad2c1bbab9d18377596e..8a8895246d26a23be65eb6fb29cbb001f71f728b 100644 (file)
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
                }
 
                if (type == 0x00000010) {
-                       if (!nv31_mpeg_mthd(mpeg, mthd, data))
+                       if (nv31_mpeg_mthd(mpeg, mthd, data))
                                show &= ~0x01000000;
                }
        }
index e536f37e24b0c75fbf7882eccae37c5b1d0aeae8..c3cf02ed468ea1ccf6212a5e9c42dbee00fa612e 100644 (file)
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
                }
 
                if (type == 0x00000010) {
-                       if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+                       if (nv44_mpeg_mthd(subdev->device, mthd, data))
                                show &= ~0x01000000;
                }
        }
index fdb451e3ec01184a4642e6facd6ddf8e5f0cad47..26a7ad0f478978205be87f9409f309021b610884 100644 (file)
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
        if (unlikely(ret != 0))
                goto out_err0;
 
-       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
        if (unlikely(ret != 0))
                goto out_err1;
 
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
 
 int ttm_ref_object_add(struct ttm_object_file *tfile,
                       struct ttm_base_object *base,
-                      enum ttm_ref_type ref_type, bool *existed)
+                      enum ttm_ref_type ref_type, bool *existed,
+                      bool require_existed)
 {
        struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
        struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
                }
 
                rcu_read_unlock();
+               if (require_existed)
+                       return -EPERM;
+
                ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
                                           false, false);
                if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
                ttm_ref_object_release(&ref->kref);
        }
 
+       spin_unlock(&tfile->lock);
        for (i = 0; i < TTM_REF_NUM; ++i)
                drm_ht_remove(&tfile->ref_hash[i]);
 
-       spin_unlock(&tfile->lock);
        ttm_object_file_unref(&tfile);
 }
 EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
 
        *p_tdev = NULL;
 
-       spin_lock(&tdev->object_lock);
        drm_ht_remove(&tdev->object_hash);
-       spin_unlock(&tdev->object_lock);
 
        kfree(tdev);
 }
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
        prime = (struct ttm_prime_object *) dma_buf->priv;
        base = &prime->base;
        *handle = base->hash.key;
-       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
 
        dma_buf_put(dma_buf);
 
index 917dcb978c2ccc921c1dfcf90329973ff3044b59..0c87b1ac6b68f0d41cfd01851a14b9a092455f4f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/fb.h>
 #include <linux/prefetch.h>
+#include <asm/unaligned.h>
 
 #include <drm/drmP.h>
 #include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
                        const u8 *const start = pixel;
                        const uint16_t repeating_pixel_val16 = pixel_val16;
 
-                       *(uint16_t *)cmd = cpu_to_be16(pixel_val16);
+                       put_unaligned_be16(pixel_val16, cmd);
 
                        cmd += 2;
                        pixel += bpp;
index 6541dd8b82dc0747433403b64795843a29e0544c..6b2708b4eafe84c832d41a10f75c2be9e65fc0b9 100644 (file)
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
                     struct vmw_fence_obj **p_fence)
 {
        struct vmw_fence_obj *fence;
-       int ret;
+       int ret;
 
        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
        if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 }
 
 
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+       struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+       if (!base) {
+               pr_err("Invalid fence object handle 0x%08lx.\n",
+                      (unsigned long)handle);
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (base->refcount_release != vmw_user_fence_base_release) {
+               pr_err("Invalid fence object handle 0x%08lx.\n",
+                      (unsigned long)handle);
+               ttm_base_object_unref(&base);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return base;
+}
+
+
 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv)
 {
@@ -726,13 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
                arg->kernel_cookie = jiffies + wait_timeout;
        }
 
-       base = ttm_base_object_lookup(tfile, arg->handle);
-       if (unlikely(base == NULL)) {
-               printk(KERN_ERR "Wait invalid fence object handle "
-                      "0x%08lx.\n",
-                      (unsigned long)arg->handle);
-               return -EINVAL;
-       }
+       base = vmw_fence_obj_lookup(tfile, arg->handle);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 
@@ -771,13 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       base = ttm_base_object_lookup(tfile, arg->handle);
-       if (unlikely(base == NULL)) {
-               printk(KERN_ERR "Fence signaled invalid fence object handle "
-                      "0x%08lx.\n",
-                      (unsigned long)arg->handle);
-               return -EINVAL;
-       }
+       base = vmw_fence_obj_lookup(tfile, arg->handle);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
 
        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
        fman = fman_from_fence(fence);
@@ -1024,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
                (struct drm_vmw_fence_event_arg *) data;
        struct vmw_fence_obj *fence = NULL;
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+       struct ttm_object_file *tfile = vmw_fp->tfile;
        struct drm_vmw_fence_rep __user *user_fence_rep =
                (struct drm_vmw_fence_rep __user *)(unsigned long)
                arg->fence_rep;
@@ -1037,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
         */
        if (arg->handle) {
                struct ttm_base_object *base =
-                       ttm_base_object_lookup_for_ref(dev_priv->tdev,
-                                                      arg->handle);
-
-               if (unlikely(base == NULL)) {
-                       DRM_ERROR("Fence event invalid fence object handle "
-                                 "0x%08lx.\n",
-                                 (unsigned long)arg->handle);
-                       return -EINVAL;
-               }
+                       vmw_fence_obj_lookup(tfile, arg->handle);
+
+               if (IS_ERR(base))
+                       return PTR_ERR(base);
+
                fence = &(container_of(base, struct vmw_user_fence,
                                       base)->fence);
                (void) vmw_fence_obj_reference(fence);
 
                if (user_fence_rep != NULL) {
-                       bool existed;
-
                        ret = ttm_ref_object_add(vmw_fp->tfile, base,
-                                                TTM_REF_USAGE, &existed);
+                                                TTM_REF_USAGE, NULL, false);
                        if (unlikely(ret != 0)) {
                                DRM_ERROR("Failed to reference a fence "
                                          "object.\n");
@@ -1097,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
        return 0;
 out_no_create:
        if (user_fence_rep != NULL)
-               ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
-                                         handle, TTM_REF_USAGE);
+               ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
 out_no_ref_obj:
        vmw_fence_obj_unreference(&fence);
        return ret;
index b8c6a03c8c54df15def2d359ee5253f213f1816e..5ec24fd801cd2bb1b32b66540b91edcca2ef3b6d 100644 (file)
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
                param->value = dev_priv->has_dx;
                break;
        default:
-               DRM_ERROR("Illegal vmwgfx get param request: %d\n",
-                         param->param);
                return -EINVAL;
        }
 
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
        bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
-       if (unlikely(arg->pad64 != 0)) {
+       if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
                DRM_ERROR("Illegal GET_3D_CAP argument.\n");
                return -EINVAL;
        }
index 65b3f0369636710eda49086f72e250478dfe288d..bf23153d4f55515b54c66f5a250b8f04aa7f5051 100644 (file)
@@ -589,7 +589,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
                return ret;
 
        ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                TTM_REF_SYNCCPU_WRITE, &existed);
+                                TTM_REF_SYNCCPU_WRITE, &existed, false);
        if (ret != 0 || existed)
                ttm_bo_synccpu_write_release(&user_bo->dma.base);
 
@@ -773,7 +773,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
 
        *handle = user_bo->prime.base.hash.key;
        return ttm_ref_object_add(tfile, &user_bo->prime.base,
-                                 TTM_REF_USAGE, NULL);
+                                 TTM_REF_USAGE, NULL, false);
 }
 
 /*
index b445ce9b9757861ecc1ece1071f1c8c3a02a166f..05fa092c942beedb209b25777971a0c196d19d85 100644 (file)
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                        128;
 
        num_sizes = 0;
-       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+       for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+               if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+                       return -EINVAL;
                num_sizes += req->mip_levels[i];
+       }
 
-       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
-           DRM_VMW_MAX_MIP_LEVELS)
+       if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+           num_sizes == 0)
                return -EINVAL;
 
        size = vmw_user_surface_size + 128 +
@@ -891,17 +894,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
        uint32_t handle;
        struct ttm_base_object *base;
        int ret;
+       bool require_exist = false;
 
        if (handle_type == DRM_VMW_HANDLE_PRIME) {
                ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
                if (unlikely(ret != 0))
                        return ret;
        } else {
-               if (unlikely(drm_is_render_client(file_priv))) {
-                       DRM_ERROR("Render client refused legacy "
-                                 "surface reference.\n");
-                       return -EACCES;
-               }
+               if (unlikely(drm_is_render_client(file_priv)))
+                       require_exist = true;
+
                if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
                        DRM_ERROR("Locked master refused legacy "
                                  "surface reference.\n");
@@ -929,17 +931,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
 
                /*
                 * Make sure the surface creator has the same
-                * authenticating master.
+                * authenticating master, or is already registered with us.
                 */
                if (drm_is_primary_client(file_priv) &&
-                   user_srf->master != file_priv->master) {
-                       DRM_ERROR("Trying to reference surface outside of"
-                                 " master domain.\n");
-                       ret = -EACCES;
-                       goto out_bad_resource;
-               }
+                   user_srf->master != file_priv->master)
+                       require_exist = true;
 
-               ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+               ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+                                        require_exist);
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Could not add a reference to a surface.\n");
                        goto out_bad_resource;
index 63ec1993eaaa905af1583f7169e6be812cf0486d..d162f0dc76e3f44e2134eafa5509137ddf0cc411 100644 (file)
@@ -819,8 +819,7 @@ static int hid_scan_report(struct hid_device *hid)
                hid->group = HID_GROUP_WACOM;
                break;
        case USB_VENDOR_ID_SYNAPTICS:
-               if (hid->group == HID_GROUP_GENERIC ||
-                   hid->group == HID_GROUP_MULTITOUCH_WIN_8)
+               if (hid->group == HID_GROUP_GENERIC)
                        if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
                            && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
                                /*
@@ -2096,6 +2095,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
index 4e2648c86c8c56142cd06b6a269433f72c5c07c9..b26c030926c188aff2dd054cd2ae9e9910837887 100644 (file)
 #define USB_DEVICE_ID_UGEE_TABLET_45           0x0045
 #define USB_DEVICE_ID_YIYNOVA_TABLET           0x004d
 
+#define USB_VENDOR_ID_UGEE             0x28bd
+#define USB_DEVICE_ID_UGEE_TABLET_EX07S                0x0071
+
 #define USB_VENDOR_ID_UNITEC   0x227d
 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709    0x0709
 #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19    0x0a19
index 1509d7287ff3e60e79c845214e47e901fae650f8..e3e6e5c893cc05e0c934c8c9f505de9fdd06e26a 100644 (file)
@@ -977,6 +977,7 @@ static int uclogic_probe(struct hid_device *hdev,
                }
                break;
        case USB_DEVICE_ID_UGTIZER_TABLET_GP0610:
+       case USB_DEVICE_ID_UGEE_TABLET_EX07S:
                /* If this is the pen interface */
                if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
                        rc = uclogic_tablet_enable(hdev);
@@ -1069,6 +1070,7 @@ static const struct hid_device_id uclogic_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, uclogic_devices);
index ca5759c0c318eb807baf14cb09a1a6b5f3c1b748..43a6cb07819363e8409ed0c28e9f91ae0d49d06d 100644 (file)
@@ -370,10 +370,12 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
                name = "accel_3d";
                channel_spec = accel_3d_channels;
                channel_size = sizeof(accel_3d_channels);
+               indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
        } else {
                name = "gravity";
                channel_spec = gravity_channels;
                channel_size = sizeof(gravity_channels);
+               indio_dev->num_channels = ARRAY_SIZE(gravity_channels);
        }
        ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
                                        &accel_state->common_attributes);
@@ -395,7 +397,6 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
                goto error_free_dev_mem;
        }
 
-       indio_dev->num_channels = ARRAY_SIZE(accel_3d_channels);
        indio_dev->dev.parent = &pdev->dev;
        indio_dev->info = &accel_3d_info;
        indio_dev->name = name;
index d6c372bb433b4983bca0d99b448ccd5af4e3cdab..c17596f7ed2c30786f6765c091de121870be47d1 100644 (file)
@@ -61,7 +61,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
                ret = st->core.read_ec_sensors_data(indio_dev, 1 << idx, &data);
                if (ret < 0)
                        break;
-
+               ret = IIO_VAL_INT;
                *val = data;
                break;
        case IIO_CHAN_INFO_CALIBBIAS:
@@ -76,7 +76,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
                for (i = CROS_EC_SENSOR_X; i < CROS_EC_SENSOR_MAX_AXIS; i++)
                        st->core.calib[i] =
                                st->core.resp->sensor_offset.offset[i];
-
+               ret = IIO_VAL_INT;
                *val = st->core.calib[idx];
                break;
        case IIO_CHAN_INFO_SCALE:
index 7afdac42ed42d9d87d71c5627d85d0016b652f29..01e02b9926d410c566e1b598fcc392fc2594ff01 100644 (file)
@@ -379,6 +379,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
 {
 
        struct hid_sensor_hub_attribute_info timestamp;
+       s32 value;
+       int ret;
 
        hid_sensor_get_reporting_interval(hsdev, usage_id, st);
 
@@ -417,6 +419,14 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
                st->sensitivity.index, st->sensitivity.report_id,
                timestamp.index, timestamp.report_id);
 
+       ret = sensor_hub_get_feature(hsdev,
+                               st->power_state.report_id,
+                               st->power_state.index, sizeof(value), &value);
+       if (ret < 0)
+               return ret;
+       if (value < 0)
+               return -EINVAL;
+
        return 0;
 }
 EXPORT_SYMBOL(hid_sensor_parse_common_attributes);
index f7fcfa886f72181e36b89b38daa327dbf8d17cd0..821919dd245bc8597ce901fffe26001382a9a71c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/triggered_buffer.h>
 #include <linux/regmap.h>
+#include <linux/delay.h>
 #include "bmg160.h"
 
 #define BMG160_IRQ_NAME                "bmg160_event"
@@ -52,6 +53,9 @@
 #define BMG160_DEF_BW                  100
 #define BMG160_REG_PMU_BW_RES          BIT(7)
 
+#define BMG160_GYRO_REG_RESET          0x14
+#define BMG160_GYRO_RESET_VAL          0xb6
+
 #define BMG160_REG_INT_MAP_0           0x17
 #define BMG160_INT_MAP_0_BIT_ANY       BIT(1)
 
@@ -236,6 +240,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
        int ret;
        unsigned int val;
 
+       /*
+        * Reset chip to get it in a known good state. A delay of 30ms after
+        * reset is required according to the datasheet.
+        */
+       regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
+                    BMG160_GYRO_RESET_VAL);
+       usleep_range(30000, 30700);
+
        ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
        if (ret < 0) {
                dev_err(dev, "Error reading reg_chip_id\n");
index d18ded45bedd98893dde42f182924b671b490e64..3ff91e02fee346a244cfbce3c2143ce4c3fd2bf0 100644 (file)
@@ -610,10 +610,9 @@ static ssize_t __iio_format_value(char *buf, size_t len, unsigned int type,
                tmp0 = (int)div_s64_rem(tmp, 1000000000, &tmp1);
                return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
        case IIO_VAL_FRACTIONAL_LOG2:
-               tmp = (s64)vals[0] * 1000000000LL >> vals[1];
-               tmp1 = do_div(tmp, 1000000000LL);
-               tmp0 = tmp;
-               return snprintf(buf, len, "%d.%09u", tmp0, tmp1);
+               tmp = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
+               tmp0 = (int)div_s64_rem(tmp, 1000000000LL, &tmp1);
+               return snprintf(buf, len, "%d.%09u", tmp0, abs(tmp1));
        case IIO_VAL_INT_MULTIPLE:
        {
                int i;
index 5f2680855552875e1c9569d9bf6c429466d00fde..fd0edca0e656001b6b4487dc754695da4a81297f 100644 (file)
@@ -457,6 +457,7 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
                },
                .multi_read_bit = true,
+               .bootime = 2,
        },
 };
 
index 91cbe86b25c8ec2d693bea5452c04d0ffa73ba0b..fcbed35e95a824979bb48fba9f05614591e97491 100644 (file)
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
                rx_wr->sg_list = &rx_desc->rx_sg;
                rx_wr->num_sge = 1;
                rx_wr->next = rx_wr + 1;
+               rx_desc->in_use = false;
        }
        rx_wr--;
        rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
        struct ib_recv_wr *rx_wr_failed, rx_wr;
        int ret;
 
+       if (!rx_desc->in_use) {
+               /*
+                * if the descriptor is not in-use we already reposted it
+                * for recv, so just silently return
+                */
+               return 0;
+       }
+
+       rx_desc->in_use = false;
        rx_wr.wr_cqe = &rx_desc->rx_cqe;
        rx_wr.sg_list = &rx_desc->rx_sg;
        rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
                return;
        }
 
+       rx_desc->in_use = true;
+
        ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
                        ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
 
@@ -1659,10 +1671,23 @@ isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
        ret = isert_check_pi_status(cmd, isert_cmd->rw.sig->sig_mr);
        isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
 
-       if (ret)
-               transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0);
-       else
-               isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+       if (ret) {
+               /*
+                * transport_generic_request_failure() expects to have
+                * plus two references to handle queue-full, so re-add
+                * one here as target-core will have already dropped
+                * it after the first isert_put_datain() callback.
+                */
+               kref_get(&cmd->cmd_kref);
+               transport_generic_request_failure(cmd, cmd->pi_err);
+       } else {
+               /*
+                * XXX: isert_put_response() failure is not retried.
+                */
+               ret = isert_put_response(isert_conn->conn, isert_cmd->iscsi_cmd);
+               if (ret)
+                       pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
+       }
 }
 
 static void
@@ -1699,13 +1724,15 @@ isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
        cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
        spin_unlock_bh(&cmd->istate_lock);
 
-       if (ret) {
-               target_put_sess_cmd(se_cmd);
-               transport_send_check_condition_and_sense(se_cmd,
-                                                        se_cmd->pi_err, 0);
-       } else {
+       /*
+        * transport_generic_request_failure() will drop the extra
+        * se_cmd->cmd_kref reference after T10-PI error, and handle
+        * any non-zero ->queue_status() callback error retries.
+        */
+       if (ret)
+               transport_generic_request_failure(se_cmd, se_cmd->pi_err);
+       else
                target_execute_cmd(se_cmd);
-       }
 }
 
 static void
@@ -2171,26 +2198,28 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                chain_wr = &isert_cmd->tx_desc.send_wr;
        }
 
-       isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
-       isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n", isert_cmd);
-       return 1;
+       rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
+       isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
+                 isert_cmd, rc);
+       return rc;
 }
 
 static int
 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
 {
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+       int ret;
 
        isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
                 isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
 
        isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
-       isert_rdma_rw_ctx_post(isert_cmd, conn->context,
-                       &isert_cmd->tx_desc.tx_cqe, NULL);
+       ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
+                                    &isert_cmd->tx_desc.tx_cqe, NULL);
 
-       isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
-                isert_cmd);
-       return 0;
+       isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
+                isert_cmd, ret);
+       return ret;
 }
 
 static int
index c02ada57d7f5c4fa5b765a6401ac8e7bbd6096bb..87d994de8c910d998c12a4205c0e66eb1220b12d 100644 (file)
@@ -60,7 +60,7 @@
 
 #define ISER_RX_PAD_SIZE       (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
                (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
-                sizeof(struct ib_cqe)))
+                sizeof(struct ib_cqe) + sizeof(bool)))
 
 #define ISCSI_ISER_SG_TABLESIZE                256
 
@@ -85,6 +85,7 @@ struct iser_rx_desc {
        u64             dma_addr;
        struct ib_sge   rx_sg;
        struct ib_cqe   rx_cqe;
+       bool            in_use;
        char            pad[ISER_RX_PAD_SIZE];
 } __packed;
 
index 155fcb3b6230a01da6d1de3ab772e508b56fef78..153b1ee13e03eccd764d4a92a85bd579c0f89946 100644 (file)
@@ -202,6 +202,7 @@ static const struct xpad_device {
        { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
        { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
        { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+       { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
        { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
        { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
        { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -326,6 +327,7 @@ static struct usb_device_id xpad_table[] = {
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x146b),            /* BigBen Interactive Controllers */
        XPAD_XBOX360_VENDOR(0x1532),            /* Razer Sabertooth */
+       XPAD_XBOXONE_VENDOR(0x1532),            /* Razer Wildcat */
        XPAD_XBOX360_VENDOR(0x15e4),            /* Numark X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x162e),            /* Joytech X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
index 15af9a9753e582b797a58de3c9713226800999f8..2d203b422129e53554d5be023595c6946877037c 100644 (file)
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
                return -ENOMEM;
        }
 
+       raw_spin_lock_init(&cd->rlock);
+
        cd->gpc_base = of_iomap(node, 0);
        if (!cd->gpc_base) {
                pr_err("fsl-gpcv2: unable to map gpc registers\n");
index 1dfd1085a04f87a016a2405e68200f9c09a2263e..9ca691d6c13b4d31cdb5b29221214f377a7d490a 100644 (file)
@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
                                                     sizeof(avmb1_carddef))))
                                return -EFAULT;
                        cdef.cardtype = AVM_CARDTYPE_B1;
+                       cdef.cardnr = 0;
                } else {
                        if ((retval = copy_from_user(&cdef, data,
                                                     sizeof(avmb1_extcarddef))))
index e4c2c1a1e9933282fa2b971999270199c8d9c224..6735c8d6a44551a6dd9cc25fa9b505316bfde060 100644 (file)
@@ -932,7 +932,7 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
        *result = true;
 
        r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
-                                  from_cblock(begin), &cmd->dirty_cursor);
+                                  from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
        if (r) {
                DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
                return r;
@@ -959,14 +959,16 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
                        return 0;
                }
 
+               begin = to_cblock(from_cblock(begin) + 1);
+               if (begin == end)
+                       break;
+
                r = dm_bitset_cursor_next(&cmd->dirty_cursor);
                if (r) {
                        DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
                        dm_bitset_cursor_end(&cmd->dirty_cursor);
                        return r;
                }
-
-               begin = to_cblock(from_cblock(begin) + 1);
        }
 
        dm_bitset_cursor_end(&cmd->dirty_cursor);
index f8564d63982f43f9e3fb453fc9adedf3f14be51a..1e217ba84d090d1c51453c419c4cba62ba82892b 100644 (file)
@@ -3726,7 +3726,7 @@ static int raid_preresume(struct dm_target *ti)
                return r;
 
        /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */
-       if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) &&
+       if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
            mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) {
                r = bitmap_resize(mddev->bitmap, mddev->dev_sectors,
                                  to_bytes(rs->requested_bitmap_chunk_sectors), 0);
index 28955b94d2b26f47d7c54217d84c2a8a11af692a..0b081d170087debcbd6067051999cda91f979889 100644 (file)
@@ -755,6 +755,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                /* Undo dm_start_request() before requeuing */
                rq_end_stats(md, rq);
                rq_completed(md, rq_data_dir(rq), false);
+               blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
                return BLK_MQ_RQ_QUEUE_BUSY;
        }
 
index 0f0eb8a3d922a212583ba491f3899a69a2b91bd6..78f36012eacacb3ca16888b880c0416f8234a14d 100644 (file)
@@ -146,8 +146,6 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
                block = fec_buffer_rs_block(v, fio, n, i);
                res = fec_decode_rs8(v, fio, block, &par[offset], neras);
                if (res < 0) {
-                       dm_bufio_release(buf);
-
                        r = res;
                        goto error;
                }
@@ -172,6 +170,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
 done:
        r = corrected;
 error:
+       dm_bufio_release(buf);
+
        if (r < 0 && neras)
                DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
                            v->data_dev->name, (unsigned long long)rsb, r);
@@ -269,7 +269,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
                                          &is_zero) == 0) {
                        /* skip known zero blocks entirely */
                        if (is_zero)
-                               continue;
+                               goto done;
 
                        /*
                         * skip if we have already found the theoretical
@@ -439,6 +439,13 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
        if (!verity_fec_is_enabled(v))
                return -EOPNOTSUPP;
 
+       if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
+               DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+               return -EIO;
+       }
+
+       fio->level++;
+
        if (type == DM_VERITY_BLOCK_TYPE_METADATA)
                block += v->data_blocks;
 
@@ -470,7 +477,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
        if (r < 0) {
                r = fec_decode_rsb(v, io, fio, rsb, offset, true);
                if (r < 0)
-                       return r;
+                       goto done;
        }
 
        if (dest)
@@ -480,6 +487,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
                r = verity_for_bv_block(v, io, iter, fec_bv_copy);
        }
 
+done:
+       fio->level--;
        return r;
 }
 
@@ -520,6 +529,7 @@ void verity_fec_init_io(struct dm_verity_io *io)
        memset(fio->bufs, 0, sizeof(fio->bufs));
        fio->nbufs = 0;
        fio->output = NULL;
+       fio->level = 0;
 }
 
 /*
index 7fa0298b995e9e3f1ac25c2356c758fad8b65d34..bb31ce87a933b80d11140f31607f804ff81d209a 100644 (file)
@@ -27,6 +27,9 @@
 #define DM_VERITY_FEC_BUF_MAX \
        (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
 
+/* maximum recursion level for verity_fec_decode */
+#define DM_VERITY_FEC_MAX_RECURSION    4
+
 #define DM_VERITY_OPT_FEC_DEV          "use_fec_from_device"
 #define DM_VERITY_OPT_FEC_BLOCKS       "fec_blocks"
 #define DM_VERITY_OPT_FEC_START                "fec_start"
@@ -58,6 +61,7 @@ struct dm_verity_fec_io {
        unsigned nbufs;         /* number of buffers allocated */
        u8 *output;             /* buffer for corrected output */
        size_t output_pos;
+       unsigned level;         /* recursion level */
 };
 
 #ifdef CONFIG_DM_VERITY_FEC
index 138f5ae75c0bc6fcf912d30975197caedb0e0b3e..4d1fe8d9504234f436a0b9dd739b41089f563225 100644 (file)
@@ -557,7 +557,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
        int work_done = 0;
 
        u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
-       u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+       u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
        u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
 
        /* Handle bus state changes */
index caed4e6960f8c77fdca254f5ed4e7ccbe69792fc..11662f479e760ba77f613c90bfc8026b005da3ea 100644 (file)
@@ -826,8 +826,7 @@ static int rcar_can_probe(struct platform_device *pdev)
 
        devm_can_led_init(ndev);
 
-       dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
-                priv->regs, ndev->irq);
+       dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq);
 
        return 0;
 fail_candev:
index d05fbfdce5e52e640042f36ba3eb831a3a42ce8a..5d6c40d86775dd0189de49173210f4d38ee6934c 100644 (file)
@@ -100,11 +100,6 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
                goto err_exit;
        ndev->mtu = new_mtu;
 
-       if (netif_running(ndev)) {
-               aq_ndev_close(ndev);
-               aq_ndev_open(ndev);
-       }
-
 err_exit:
        return err;
 }
index ee78444bfb8851214709920795e26d658c4ca9b5..cdb02991f249c6354b7095d9d777316617c2be42 100644 (file)
@@ -487,6 +487,9 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
                dx_buff->mss = skb_shinfo(skb)->gso_size;
                dx_buff->is_txc = 1U;
 
+               dx_buff->is_ipv6 =
+                       (ip_hdr(skb)->version == 6) ? 1U : 0U;
+
                dx = aq_ring_next_dx(ring, dx);
                dx_buff = &ring->buff_ring[dx];
                ++ret;
@@ -510,10 +513,22 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
                        1U : 0U;
-               dx_buff->is_tcp_cso =
-                       (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
-               dx_buff->is_udp_cso =
-                       (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
+
+               if (ip_hdr(skb)->version == 4) {
+                       dx_buff->is_tcp_cso =
+                               (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
+                                       1U : 0U;
+                       dx_buff->is_udp_cso =
+                               (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
+                                       1U : 0U;
+               } else if (ip_hdr(skb)->version == 6) {
+                       dx_buff->is_tcp_cso =
+                               (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
+                                       1U : 0U;
+                       dx_buff->is_udp_cso =
+                               (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
+                                       1U : 0U;
+               }
        }
 
        for (; nr_frags--; ++frag_count) {
index 0358e6072d45ab94181409de0dd17e80106fbf2d..3a8a4aa13687ff42510e7a260b1de6715a55d8d5 100644 (file)
@@ -101,6 +101,7 @@ int aq_ring_init(struct aq_ring_s *self)
        self->hw_head = 0;
        self->sw_head = 0;
        self->sw_tail = 0;
+       spin_lock_init(&self->header.lock);
        return 0;
 }
 
index 2572546450685d25c8ca4beb71150c356e7c82f6..eecd6d1c4d731a4e648e6811a5615498ee3a8965 100644 (file)
@@ -58,7 +58,8 @@ struct __packed aq_ring_buff_s {
                        u8 len_l2;
                        u8 len_l3;
                        u8 len_l4;
-                       u8 rsvd2;
+                       u8 is_ipv6:1;
+                       u8 rsvd2:7;
                        u32 len_pkt;
                };
        };
index a2b746a2dd50b8825250f5ab1de8a01b5afb32c9..4ee15ff06a448b72dbd6763427d9d02dfadad268 100644 (file)
@@ -433,6 +433,9 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
                                    buff->len_l3 +
                                    buff->len_l2);
                        is_gso = true;
+
+                       if (buff->is_ipv6)
+                               txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
                } else {
                        buff_pa_len = buff->len;
 
@@ -458,6 +461,7 @@ static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
                        if (unlikely(buff->is_eop)) {
                                txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
                                txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+                               is_gso = false;
                        }
                }
 
index cab2931dab9ac354821e4a30bb8517ccfe0041eb..42150708191dbf67d91b33218a2a275e9b5fd45d 100644 (file)
@@ -471,6 +471,9 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
                                    buff->len_l3 +
                                    buff->len_l2);
                        is_gso = true;
+
+                       if (buff->is_ipv6)
+                               txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPV6;
                } else {
                        buff_pa_len = buff->len;
 
@@ -496,6 +499,7 @@ static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
                        if (unlikely(buff->is_eop)) {
                                txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
                                txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+                               is_gso = false;
                        }
                }
 
index 0a23034bbe3ff8d392483e7a5a201caf30c76526..352beff796ae5b090d8e3fa831078cc7182d3a2c 100644 (file)
@@ -2277,7 +2277,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
                                 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
                                 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
 
-#define HW_INTERRUT_ASSERT_SET_0 \
+#define HW_INTERRUPT_ASSERT_SET_0 \
                                (AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
@@ -2290,7 +2290,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
                                 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
                                 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
                                 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_1 \
+#define HW_INTERRUPT_ASSERT_SET_1 \
                                (AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
@@ -2318,7 +2318,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
                                 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
                                 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
                                 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
-#define HW_INTERRUT_ASSERT_SET_2 \
+#define HW_INTERRUPT_ASSERT_SET_2 \
                                (AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
                                 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
index ac76fc251d268e4ec5f9ca2904345557fb2cd55a..a851f95c307a3331a889972bc3c60273219f8a7b 100644 (file)
@@ -4166,14 +4166,14 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
                bnx2x_release_phy_lock(bp);
        }
 
-       if (attn & HW_INTERRUT_ASSERT_SET_0) {
+       if (attn & HW_INTERRUPT_ASSERT_SET_0) {
 
                val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+               val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
                REG_WR(bp, reg_offset, val);
 
                BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+                         (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
                bnx2x_panic();
        }
 }
@@ -4191,7 +4191,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
                        BNX2X_ERR("FATAL error from DORQ\n");
        }
 
-       if (attn & HW_INTERRUT_ASSERT_SET_1) {
+       if (attn & HW_INTERRUPT_ASSERT_SET_1) {
 
                int port = BP_PORT(bp);
                int reg_offset;
@@ -4200,11 +4200,11 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
 
                val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+               val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
                REG_WR(bp, reg_offset, val);
 
                BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+                         (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
                bnx2x_panic();
        }
 }
@@ -4235,7 +4235,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
                }
        }
 
-       if (attn & HW_INTERRUT_ASSERT_SET_2) {
+       if (attn & HW_INTERRUPT_ASSERT_SET_2) {
 
                int port = BP_PORT(bp);
                int reg_offset;
@@ -4244,11 +4244,11 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
                                     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
 
                val = REG_RD(bp, reg_offset);
-               val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+               val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
                REG_WR(bp, reg_offset, val);
 
                BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
-                         (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+                         (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
                bnx2x_panic();
        }
 }
index 32de4589d16a2cde27f2e5674b234e2b7185f00d..1f1e54ba0ecb31ffd053161e458b4d6817cb510b 100644 (file)
@@ -1983,20 +1983,25 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
 
                for (j = 0; j < max_idx; j++) {
                        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
+                       dma_addr_t mapping = rx_buf->mapping;
                        void *data = rx_buf->data;
 
                        if (!data)
                                continue;
 
-                       dma_unmap_single(&pdev->dev, rx_buf->mapping,
-                                        bp->rx_buf_use_size, bp->rx_dir);
-
                        rx_buf->data = NULL;
 
-                       if (BNXT_RX_PAGE_MODE(bp))
+                       if (BNXT_RX_PAGE_MODE(bp)) {
+                               mapping -= bp->rx_dma_offset;
+                               dma_unmap_page(&pdev->dev, mapping,
+                                              PAGE_SIZE, bp->rx_dir);
                                __free_page(data);
-                       else
+                       } else {
+                               dma_unmap_single(&pdev->dev, mapping,
+                                                bp->rx_buf_use_size,
+                                                bp->rx_dir);
                                kfree(data);
+                       }
                }
 
                for (j = 0; j < max_agg_idx; j++) {
@@ -2455,6 +2460,18 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
        return 0;
 }
 
+static void bnxt_init_cp_rings(struct bnxt *bp)
+{
+       int i;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
+               struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+
+               ring->fw_ring_id = INVALID_HW_RING_ID;
+       }
+}
+
 static int bnxt_init_rx_rings(struct bnxt *bp)
 {
        int i, rc = 0;
@@ -4732,7 +4749,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
                rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
                if (rc) {
                        netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
-                                  rc, i);
+                                  i, rc);
                        return rc;
                }
        }
@@ -5006,6 +5023,7 @@ static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
 
 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
 {
+       bnxt_init_cp_rings(bp);
        bnxt_init_rx_rings(bp);
        bnxt_init_tx_rings(bp);
        bnxt_init_ring_grps(bp, irq_re_init);
index 9e59663a6eadb012de6f4a4474484800401fce3b..0f6811860ad51de9b871e806f3f254a1abfcf2eb 100644 (file)
@@ -1930,13 +1930,13 @@ static void
 bfa_ioc_send_enable(struct bfa_ioc *ioc)
 {
        struct bfi_ioc_ctrl_req enable_req;
-       struct timeval tv;
 
        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
                    bfa_ioc_portid(ioc));
        enable_req.clscode = htons(ioc->clscode);
-       do_gettimeofday(&tv);
-       enable_req.tv_sec = ntohl(tv.tv_sec);
+       enable_req.rsvd = htons(0);
+       /* overflow in 2106 */
+       enable_req.tv_sec = ntohl(ktime_get_real_seconds());
        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
@@ -1947,6 +1947,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
 
        bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
                    bfa_ioc_portid(ioc));
+       disable_req.clscode = htons(ioc->clscode);
+       disable_req.rsvd = htons(0);
+       /* overflow in 2106 */
+       disable_req.tv_sec = ntohl(ktime_get_real_seconds());
        bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
 }
 
index 30e855004c57592f9ab6c0cea2eb73f63b59b7ca..02dd5246dfae9a99b20f2bb4b2b13185d3239a3c 100644 (file)
@@ -4939,8 +4939,9 @@ static int
 __be_cmd_set_logical_link_config(struct be_adapter *adapter,
                                 int link_state, int version, u8 domain)
 {
-       struct be_mcc_wrb *wrb;
        struct be_cmd_req_set_ll_link *req;
+       struct be_mcc_wrb *wrb;
+       u32 link_config = 0;
        int status;
 
        mutex_lock(&adapter->mcc_lock);
@@ -4962,10 +4963,12 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
 
        if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
            link_state == IFLA_VF_LINK_STATE_AUTO)
-               req->link_config |= PLINK_ENABLE;
+               link_config |= PLINK_ENABLE;
 
        if (link_state == IFLA_VF_LINK_STATE_AUTO)
-               req->link_config |= PLINK_TRACK;
+               link_config |= PLINK_TRACK;
+
+       req->link_config = cpu_to_le32(link_config);
 
        status = be_mcc_notify_wait(adapter);
 err:
index 992ebe973d25bfbccff7b5c42dc1801ea41fc9ea..f819843e2bae73564e090b8fd9c7a8dfcec1fa12 100644 (file)
@@ -189,11 +189,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
 
        nps_enet_tx_handler(ndev);
        work_done = nps_enet_rx_handler(ndev);
-       if (work_done < budget) {
+       if ((work_done < budget) && napi_complete_done(napi, work_done)) {
                u32 buf_int_enable_value = 0;
 
-               napi_complete_done(napi, work_done);
-
                /* set tx_done and rx_rdy bits */
                buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
                buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
index 928b0df2b8e033e2b784759e32a0218e0b7e16f2..ade6b3e4ed1326a42aa39d52b2465f1b271c02f8 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
+#include <linux/of.h>
 #include <linux/phy.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <net/ip.h>
 #include <net/ncsi.h>
 
index 3239d27143b935dc0056490b32f700093163c74a..bdd8cdd732fb588930f2cc085b7a0fddd9f1a263 100644 (file)
@@ -82,9 +82,12 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
        else
                *link_status = 0;
 
-       ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
-       if (!ret)
-               *link_status = *link_status && sfp_prsnt;
+       if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
+               ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
+                                                              &sfp_prsnt);
+               if (!ret)
+                       *link_status = *link_status && sfp_prsnt;
+       }
 
        mac_cb->link = *link_status;
 }
@@ -855,7 +858,7 @@ static int  hns_mac_get_info(struct hns_mac_cb *mac_cb)
                of_node_put(np);
 
                np = of_parse_phandle(to_of_node(mac_cb->fw_port),
-                                       "serdes-syscon", 0);
+                                     "serdes-syscon", 0);
                syscon = syscon_node_to_regmap(np);
                of_node_put(np);
                if (IS_ERR_OR_NULL(syscon)) {
index 90dbda7926144a41120d18c28a2c7d033f245f8c..403ea9db6dbd15a6384ed845480fe8cf18c52163 100644 (file)
@@ -1519,6 +1519,7 @@ static void hns_dsaf_set_mac_key(
        mac_key->high.bits.mac_3 = addr[3];
        mac_key->low.bits.mac_4 = addr[4];
        mac_key->low.bits.mac_5 = addr[5];
+       mac_key->low.bits.port_vlan = 0;
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
                       DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
        dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
@@ -2924,10 +2925,11 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
        /* find the tcam entry index for promisc */
        entry_index = dsaf_promisc_tcam_entry(port);
 
+       memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
+       memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
+
        /* config key mask */
        if (enable) {
-               memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
-               memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
                dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
                               DSAF_TBL_TCAM_KEY_PORT_M,
                               DSAF_TBL_TCAM_KEY_PORT_S, port);
index a2c22d084ce90cb03337ee09e4b4f1b723046ef4..e13aa064a8e943da7c9538e88bc425f299e944ca 100644 (file)
@@ -461,6 +461,32 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
        return 0;
 }
 
+int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+       union acpi_object *obj;
+       union acpi_object obj_args, argv4;
+
+       obj_args.integer.type = ACPI_TYPE_INTEGER;
+       obj_args.integer.value = mac_cb->mac_id;
+
+       argv4.type = ACPI_TYPE_PACKAGE,
+       argv4.package.count = 1,
+       argv4.package.elements = &obj_args,
+
+       obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+                               hns_dsaf_acpi_dsm_uuid, 0,
+                               HNS_OP_GET_SFP_STAT_FUNC, &argv4);
+
+       if (!obj || obj->type != ACPI_TYPE_INTEGER)
+               return -ENODEV;
+
+       *sfp_prsnt = obj->integer.value;
+
+       ACPI_FREE(obj);
+
+       return 0;
+}
+
 /**
  * hns_mac_config_sds_loopback - set loop back for serdes
  * @mac_cb: mac control block
@@ -592,7 +618,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
                misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
 
                misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
-               misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+               misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
 
                misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
        } else {
index 2175cced402f7fe84dd260eecc5f3f0ab712132a..e9af89ad039c6f0e227878b9de85ea7819cd19d9 100644 (file)
@@ -6274,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
                /* Quiesce the device without resetting the hardware */
                e1000e_down(adapter, false);
                e1000_free_irq(adapter);
-               e1000e_reset_interrupt_capability(adapter);
        }
+       e1000e_reset_interrupt_capability(adapter);
 
        /* Allow time for pending master requests to run */
        e1000e_disable_pcie_master(&adapter->hw);
index e8a8351c8ea998a141bd8fb5f27d619b5b477b67..82a95cc2c8ee386c725dfd01e5367bc95e26ca0a 100644 (file)
@@ -4438,8 +4438,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
        if (!vsi->netdev)
                return;
 
-       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_enable(&vsi->q_vectors[q_idx]->napi);
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+               struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+               if (q_vector->rx.ring || q_vector->tx.ring)
+                       napi_enable(&q_vector->napi);
+       }
 }
 
 /**
@@ -4453,8 +4457,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
        if (!vsi->netdev)
                return;
 
-       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_disable(&vsi->q_vectors[q_idx]->napi);
+       for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+               struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+               if (q_vector->rx.ring || q_vector->tx.ring)
+                       napi_disable(&q_vector->napi);
+       }
 }
 
 /**
index 55957246c0e844826a5a7f18c42c4678fb6c5be5..b5d5519542e87380b064de5578e327f0d55ba9cf 100644 (file)
@@ -294,7 +294,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
                                         struct netdev_notifier_changeupper_info *info)
 {
        struct net_device *upper = info->upper_dev, *ndev_tmp;
-       struct netdev_lag_upper_info *lag_upper_info;
+       struct netdev_lag_upper_info *lag_upper_info = NULL;
        bool is_bonded;
        int bond_status = 0;
        int num_slaves = 0;
@@ -303,7 +303,8 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
        if (!netif_is_lag_master(upper))
                return 0;
 
-       lag_upper_info = info->upper_info;
+       if (info->linking)
+               lag_upper_info = info->upper_info;
 
        /* The event may still be of interest if the slave does not belong to
         * us, but is enslaved to a master which has one or more of our netdevs
index 06c9f4100cb9bd8c0abecada5fa922c7e779fc51..6ad44be08b3307ac147b6f25c8d04bc0706f65a5 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/of_irq.h>
 #include <linux/crc32.h>
 #include <linux/crc32c.h>
+#include <linux/circ_buf.h>
 
 #include "moxart_ether.h"
 
@@ -278,6 +279,13 @@ rx_next:
        return rx;
 }
 
+static int moxart_tx_queue_space(struct net_device *ndev)
+{
+       struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+       return CIRC_SPACE(priv->tx_head, priv->tx_tail, TX_DESC_NUM);
+}
+
 static void moxart_tx_finished(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
@@ -297,6 +305,9 @@ static void moxart_tx_finished(struct net_device *ndev)
                tx_tail = TX_NEXT(tx_tail);
        }
        priv->tx_tail = tx_tail;
+       if (netif_queue_stopped(ndev) &&
+           moxart_tx_queue_space(ndev) >= TX_WAKE_THRESHOLD)
+               netif_wake_queue(ndev);
 }
 
 static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
@@ -324,13 +335,18 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
        void *desc;
        unsigned int len;
-       unsigned int tx_head = priv->tx_head;
+       unsigned int tx_head;
        u32 txdes1;
        int ret = NETDEV_TX_BUSY;
 
+       spin_lock_irq(&priv->txlock);
+
+       tx_head = priv->tx_head;
        desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
 
-       spin_lock_irq(&priv->txlock);
+       if (moxart_tx_queue_space(ndev) == 1)
+               netif_stop_queue(ndev);
+
        if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
                net_dbg_ratelimited("no TX space for packet\n");
                priv->stats.tx_dropped++;
index 93a9563ac7c6730eec8240ac187a86b9831c9741..afc32ec998c043957f0b472080d22d01600edf2a 100644 (file)
@@ -59,6 +59,7 @@
 #define TX_NEXT(N)             (((N) + 1) & (TX_DESC_NUM_MASK))
 #define TX_BUF_SIZE            1600
 #define TX_BUF_SIZE_MAX                (TX_DESC1_BUF_SIZE_MASK+1)
+#define TX_WAKE_THRESHOLD      16
 
 #define RX_DESC_NUM            64
 #define RX_DESC_NUM_MASK       (RX_DESC_NUM-1)
index 9179a99563afa86f4ed7bbcb41b045c2568243de..a41377e26c07d038aa6c8716ef52d7152e92bcf0 100644 (file)
@@ -3275,9 +3275,10 @@ void nfp_net_netdev_clean(struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
 
+       unregister_netdev(nn->netdev);
+
        if (nn->xdp_prog)
                bpf_prog_put(nn->xdp_prog);
        if (nn->bpf_offload_xdp)
                nfp_net_xdp_offload(nn, NULL);
-       unregister_netdev(nn->netdev);
 }
index 7cd76b6b5cb9f6c1c05f09b509be7e11a79b0478..2ae85245478087d2d640617bd79bfbfabd5f0763 100644 (file)
@@ -2216,18 +2216,15 @@ static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port,
 {
        bool want[OFDPA_CTRL_MAX] = { 0, };
        bool prev_ctrls[OFDPA_CTRL_MAX];
-       u8 uninitialized_var(prev_state);
+       u8 prev_state;
        int err;
        int i;
 
-       if (switchdev_trans_ph_prepare(trans)) {
-               memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
-               prev_state = ofdpa_port->stp_state;
-       }
-
-       if (ofdpa_port->stp_state == state)
+       prev_state = ofdpa_port->stp_state;
+       if (prev_state == state)
                return 0;
 
+       memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls));
        ofdpa_port->stp_state = state;
 
        switch (state) {
index 9f3d9c67e3fe0f50b2d1119e74b7eac4b93e8bae..fa674a8bda0c8ff43d19699fefdd0ba718e75c90 100644 (file)
@@ -1267,6 +1267,7 @@ static void soft_reset_slave(struct cpsw_slave *slave)
 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 {
        u32 slave_port;
+       struct phy_device *phy;
        struct cpsw_common *cpsw = priv->cpsw;
 
        soft_reset_slave(slave);
@@ -1300,27 +1301,28 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                                   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
        if (slave->data->phy_node) {
-               slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+               phy = of_phy_connect(priv->ndev, slave->data->phy_node,
                                 &cpsw_adjust_link, 0, slave->data->phy_if);
-               if (!slave->phy) {
+               if (!phy) {
                        dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
                                slave->data->phy_node->full_name,
                                slave->slave_num);
                        return;
                }
        } else {
-               slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
+               phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
-               if (IS_ERR(slave->phy)) {
+               if (IS_ERR(phy)) {
                        dev_err(priv->dev,
                                "phy \"%s\" not found on slave %d, err %ld\n",
                                slave->data->phy_id, slave->slave_num,
-                               PTR_ERR(slave->phy));
-                       slave->phy = NULL;
+                               PTR_ERR(phy));
                        return;
                }
        }
 
+       slave->phy = phy;
+
        phy_attached_info(slave->phy);
 
        phy_start(slave->phy);
@@ -1817,6 +1819,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
        }
 
        cpsw_intr_enable(cpsw);
+       netif_trans_update(ndev);
+       netif_tx_wake_all_queues(ndev);
 }
 
 static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
index ffedad2a360afb8154f0e1e8b9be0c1d388fa5d9..15b920086251633e2ee0ee0b26ba0046859e5299 100644 (file)
@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
                memset(rd, 0, sizeof(*rd));
                rd->hw = hwmap + i;
                rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
-               if (rd->buf == NULL ||
-                   !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
+               if (rd->buf)
+                       busaddr = pci_map_single(pdev, rd->buf, len, dir);
+               if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
                        if (rd->buf) {
                                net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
                                                    __func__, rd->buf);
@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
                                rd = r->rd + j;
                                busaddr = rd_get_addr(rd);
                                rd_set_addr_status(rd, 0, 0);
-                               if (busaddr)
-                                       pci_unmap_single(pdev, busaddr, len, dir);
+                               pci_unmap_single(pdev, busaddr, len, dir);
                                kfree(rd->buf);
                                rd->buf = NULL;
                        }
index 6b988f77da08fca5ba9e7efec8c4af354ad51ecc..61941e29daae85abe7cc8a5726d669eae3d29137 100644 (file)
@@ -84,3 +84,4 @@ int mdiobus_register_board_info(const struct mdio_board_info *info,
 
        return 0;
 }
+EXPORT_SYMBOL(mdiobus_register_board_info);
index 1be69d8bc90948e82f92736b8f7ee9d274b9bd2b..a2bfc82e95d70ba645a1f0975ad835ca7c0991cf 100644 (file)
@@ -681,7 +681,7 @@ void phy_stop_machine(struct phy_device *phydev)
        cancel_delayed_work_sync(&phydev->state_queue);
 
        mutex_lock(&phydev->lock);
-       if (phydev->state > PHY_UP)
+       if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
                phydev->state = PHY_UP;
        mutex_unlock(&phydev->lock);
 }
index 1b52520715aec6f972626361a6aa93accf809301..f8c81f12d988907d42786eedb6cd9e6ca3ca0015 100644 (file)
@@ -990,7 +990,7 @@ static void team_port_disable(struct team *team,
 #define TEAM_ENC_FEATURES      (NETIF_F_HW_CSUM | NETIF_F_SG | \
                                 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
 
-static void ___team_compute_features(struct team *team)
+static void __team_compute_features(struct team *team)
 {
        struct team_port *port;
        u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
@@ -1023,16 +1023,10 @@ static void ___team_compute_features(struct team *team)
                team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
 }
 
-static void __team_compute_features(struct team *team)
-{
-       ___team_compute_features(team);
-       netdev_change_features(team->dev);
-}
-
 static void team_compute_features(struct team *team)
 {
        mutex_lock(&team->lock);
-       ___team_compute_features(team);
+       __team_compute_features(team);
        mutex_unlock(&team->lock);
        netdev_change_features(team->dev);
 }
@@ -1641,6 +1635,7 @@ static void team_uninit(struct net_device *dev)
        team_notify_peers_fini(team);
        team_queue_override_fini(team);
        mutex_unlock(&team->lock);
+       netdev_change_features(dev);
 }
 
 static void team_destructor(struct net_device *dev)
@@ -1928,6 +1923,10 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
        mutex_lock(&team->lock);
        err = team_port_add(team, port_dev);
        mutex_unlock(&team->lock);
+
+       if (!err)
+               netdev_change_features(dev);
+
        return err;
 }
 
@@ -1939,6 +1938,10 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
        mutex_lock(&team->lock);
        err = team_port_del(team, port_dev);
        mutex_unlock(&team->lock);
+
+       if (!err)
+               netdev_change_features(dev);
+
        return err;
 }
 
index f5552aaaa77a59bf558da6c22218a919bf99ec94..f3ae88fdf332e890ac8273e3df1ca7dd53092c07 100644 (file)
@@ -532,6 +532,7 @@ static const struct driver_info wwan_info = {
 #define LENOVO_VENDOR_ID       0x17ef
 #define NVIDIA_VENDOR_ID       0x0955
 #define HP_VENDOR_ID           0x03f0
+#define MICROSOFT_VENDOR_ID    0x045e
 
 static const struct usb_device_id      products[] = {
 /* BLACKLIST !!
@@ -761,6 +762,20 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
+/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index 156f7f85e4860d682d679df68bfe8cfe2a3d4b3b..2474618404f5e592c0fe56d38c30c8988e1ed8ef 100644 (file)
@@ -908,7 +908,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
-       {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
        {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},    /* XS Stick W100-2 from 4G Systems */
        {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},    /* Olivetti Olicard 100 */
        {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},    /* Olivetti Olicard 120 */
index 0b1b9188625d527e01d098d3ca09389dcfd7bf4d..07f788c49d573fe9d4dc15e24b8f29449b4ecbe2 100644 (file)
@@ -517,6 +517,7 @@ enum rtl8152_flags {
 
 /* Define these values to match your device */
 #define VENDOR_ID_REALTEK              0x0bda
+#define VENDOR_ID_MICROSOFT            0x045e
 #define VENDOR_ID_SAMSUNG              0x04e8
 #define VENDOR_ID_LENOVO               0x17ef
 #define VENDOR_ID_NVIDIA               0x0955
@@ -1294,6 +1295,7 @@ static void intr_callback(struct urb *urb)
                }
        } else {
                if (netif_carrier_ok(tp->netdev)) {
+                       netif_stop_queue(tp->netdev);
                        set_bit(RTL8152_LINK_CHG, &tp->flags);
                        schedule_delayed_work(&tp->schedule, 0);
                }
@@ -3169,6 +3171,9 @@ static void set_carrier(struct r8152 *tp)
                        napi_enable(&tp->napi);
                        netif_wake_queue(netdev);
                        netif_info(tp, link, netdev, "carrier on\n");
+               } else if (netif_queue_stopped(netdev) &&
+                          skb_queue_len(&tp->tx_queue) < tp->tx_qlen) {
+                       netif_wake_queue(netdev);
                }
        } else {
                if (netif_carrier_ok(netdev)) {
@@ -3702,8 +3707,18 @@ static int rtl8152_resume(struct usb_interface *intf)
                        tp->rtl_ops.autosuspend_en(tp, false);
                        napi_disable(&tp->napi);
                        set_bit(WORK_ENABLE, &tp->flags);
-                       if (netif_carrier_ok(tp->netdev))
-                               rtl_start_rx(tp);
+
+                       if (netif_carrier_ok(tp->netdev)) {
+                               if (rtl8152_get_speed(tp) & LINK_STATUS) {
+                                       rtl_start_rx(tp);
+                               } else {
+                                       netif_carrier_off(tp->netdev);
+                                       tp->rtl_ops.disable(tp);
+                                       netif_info(tp, link, tp->netdev,
+                                                  "linking down\n");
+                               }
+                       }
+
                        napi_enable(&tp->napi);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        smp_mb__after_atomic();
@@ -4507,6 +4522,8 @@ static void rtl8152_disconnect(struct usb_interface *intf)
 static struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
        {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3062)},
index 3de65ea6531a8add927c0a2d7c74e8923c0f3274..453244805c52e570394673d7a3ebd71cc62fd5ca 100644 (file)
@@ -1929,7 +1929,7 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
                   " value=0x%04x index=0x%04x size=%d\n",
                   cmd, reqtype, value, index, size);
 
-       if (data) {
+       if (size) {
                buf = kmalloc(size, GFP_KERNEL);
                if (!buf)
                        goto out;
@@ -1938,8 +1938,13 @@ static int __usbnet_read_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
        err = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
                              cmd, reqtype, value, index, buf, size,
                              USB_CTRL_GET_TIMEOUT);
-       if (err > 0 && err <= size)
-               memcpy(data, buf, err);
+       if (err > 0 && err <= size) {
+        if (data)
+            memcpy(data, buf, err);
+        else
+            netdev_dbg(dev->net,
+                "Huh? Data requested but thrown away.\n");
+    }
        kfree(buf);
 out:
        return err;
@@ -1960,7 +1965,13 @@ static int __usbnet_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
                buf = kmemdup(data, size, GFP_KERNEL);
                if (!buf)
                        goto out;
-       }
+       } else {
+        if (size) {
+            WARN_ON_ONCE(1);
+            err = -EINVAL;
+            goto out;
+        }
+    }
 
        err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
                              cmd, reqtype, value, index, buf, size,
index ea9890d619670e1abfba75fe608c2925d824cb1c..f36584616e7d6825c7e69137b4a31a3d55779688 100644 (file)
@@ -2230,14 +2230,8 @@ static bool virtnet_validate_features(struct virtio_device *vdev)
 #define MIN_MTU ETH_MIN_MTU
 #define MAX_MTU ETH_MAX_MTU
 
-static int virtnet_probe(struct virtio_device *vdev)
+static int virtnet_validate(struct virtio_device *vdev)
 {
-       int i, err;
-       struct net_device *dev;
-       struct virtnet_info *vi;
-       u16 max_queue_pairs;
-       int mtu;
-
        if (!vdev->config->get) {
                dev_err(&vdev->dev, "%s failure: config access disabled\n",
                        __func__);
@@ -2247,6 +2241,25 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (!virtnet_validate_features(vdev))
                return -EINVAL;
 
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
+               int mtu = virtio_cread16(vdev,
+                                        offsetof(struct virtio_net_config,
+                                                 mtu));
+               if (mtu < MIN_MTU)
+                       __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
+       }
+
+       return 0;
+}
+
+static int virtnet_probe(struct virtio_device *vdev)
+{
+       int i, err;
+       struct net_device *dev;
+       struct virtnet_info *vi;
+       u16 max_queue_pairs;
+       int mtu;
+
        /* Find if host supports multiqueue virtio_net device */
        err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
                                   struct virtio_net_config,
@@ -2362,11 +2375,20 @@ static int virtnet_probe(struct virtio_device *vdev)
                                     offsetof(struct virtio_net_config,
                                              mtu));
                if (mtu < dev->min_mtu) {
-                       __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
-               } else {
-                       dev->mtu = mtu;
-                       dev->max_mtu = mtu;
+                       /* Should never trigger: MTU was previously validated
+                        * in virtnet_validate.
+                        */
+                       dev_err(&vdev->dev, "device MTU appears to have changed "
+                               "it is now %d < %d", mtu, dev->min_mtu);
+                       goto free_stats;
                }
+
+               dev->mtu = mtu;
+               dev->max_mtu = mtu;
+
+               /* TODO: size buffers correctly in this case. */
+               if (dev->mtu > ETH_DATA_LEN)
+                       vi->big_packets = true;
        }
 
        if (vi->any_header_sg)
@@ -2544,6 +2566,7 @@ static struct virtio_driver virtio_net_driver = {
        .driver.name =  KBUILD_MODNAME,
        .driver.owner = THIS_MODULE,
        .id_table =     id_table,
+       .validate =     virtnet_validate,
        .probe =        virtnet_probe,
        .remove =       virtnet_remove,
        .config_changed = virtnet_config_changed,
index de19c7c92bc6c095b3b111abfc280fe228e588ac..85d949e03f79f7c9566c7b00a9bbe99853df7f16 100644 (file)
@@ -2238,14 +2238,16 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
        struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy);
        struct brcmf_p2p_info *p2p = &cfg->p2p;
        struct brcmf_cfg80211_vif *vif;
+       enum nl80211_iftype iftype;
        bool wait_for_disable = false;
        int err;
 
        brcmf_dbg(TRACE, "delete P2P vif\n");
        vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
 
+       iftype = vif->wdev.iftype;
        brcmf_cfg80211_arm_vif_event(cfg, vif);
-       switch (vif->wdev.iftype) {
+       switch (iftype) {
        case NL80211_IFTYPE_P2P_CLIENT:
                if (test_bit(BRCMF_VIF_STATUS_DISCONNECTING, &vif->sme_state))
                        wait_for_disable = true;
@@ -2275,7 +2277,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
                                            BRCMF_P2P_DISABLE_TIMEOUT);
 
        err = 0;
-       if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) {
+       if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
                brcmf_vif_clear_mgmt_ies(vif);
                err = brcmf_p2p_release_p2p_if(vif);
        }
@@ -2291,7 +2293,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
        brcmf_remove_interface(vif->ifp, true);
 
        brcmf_cfg80211_arm_vif_event(cfg, NULL);
-       if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE)
+       if (iftype != NL80211_IFTYPE_P2P_DEVICE)
                p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif = NULL;
 
        return err;
index a260cd5032005bcbf520e98f8be188750c987439..077bfd8f4c0cd85505a3ef7de375bfbaf4cddef5 100644 (file)
@@ -1056,6 +1056,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
 
        if (ret)
                return ret;
+       if (count == 0)
+               return 0;
 
        iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf,
                               (count - 1), NULL);
index 99132ea16ede08e0e7ebd5f8734eeb0ab204e0fa..c5734e1a02d27ee3d15ec59ef6dd19430e6aed71 100644 (file)
@@ -216,7 +216,8 @@ u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
                        qmask |= BIT(vif->hw_queue[ac]);
        }
 
-       if (vif->type == NL80211_IFTYPE_AP)
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC)
                qmask |= BIT(vif->cab_queue);
 
        return qmask;
index 6927caecd48e515557d011491463c48a3b1ea95c..486dcceed17a4f3a5b1ff0084d8a1f6b768199cd 100644 (file)
@@ -2401,7 +2401,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                return;
 
        rcu_read_lock();
-       sta = mvm->fw_id_to_mac_id[notif->sta_id];
+       sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
        if (WARN_ON(IS_ERR_OR_NULL(sta))) {
                rcu_read_unlock();
                return;
index b51a2853cc804a7ad8416423c3cea4554808125d..9d28db7f56aa2404825c530559df62afbca5d21a 100644 (file)
@@ -1806,7 +1806,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                        iwl_mvm_get_wd_timeout(mvm, vif, false, false);
                int queue;
 
-               if (vif->type == NL80211_IFTYPE_AP)
+               if (vif->type == NL80211_IFTYPE_AP ||
+                   vif->type == NL80211_IFTYPE_ADHOC)
                        queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
                else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
                        queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
@@ -1837,7 +1838,8 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
         * enabled-cab_queue to the mask)
         */
        if (iwl_mvm_is_dqa_supported(mvm) &&
-           vif->type == NL80211_IFTYPE_AP) {
+           (vif->type == NL80211_IFTYPE_AP ||
+            vif->type == NL80211_IFTYPE_ADHOC)) {
                struct iwl_trans_txq_scd_cfg cfg = {
                        .fifo = IWL_MVM_TX_FIFO_MCAST,
                        .sta_id = mvmvif->bcast_sta.sta_id,
@@ -1862,7 +1864,8 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (vif->type == NL80211_IFTYPE_AP)
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC)
                iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
                                    IWL_MAX_TID_COUNT, 0);
 
index 3f37075f4cde3cf21da485cd94b1a3f705abac62..1ba0a6f55503665d14b1c1e9a129e37503b9f33f 100644 (file)
@@ -506,6 +506,7 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
 
        switch (info->control.vif->type) {
        case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
                /*
                 * Handle legacy hostapd as well, where station may be added
                 * only after assoc. Take care of the case where we send a
@@ -517,7 +518,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
                if (info->hw_queue == info->control.vif->cab_queue)
                        return info->hw_queue;
 
-               WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
+               WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+                         "fc=0x%02x", le16_to_cpu(fc));
                return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
        case NL80211_IFTYPE_P2P_DEVICE:
                if (ieee80211_is_mgmt(fc))
@@ -584,7 +586,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                        iwl_mvm_vif_from_mac80211(info.control.vif);
 
                if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-                   info.control.vif->type == NL80211_IFTYPE_AP) {
+                   info.control.vif->type == NL80211_IFTYPE_AP ||
+                   info.control.vif->type == NL80211_IFTYPE_ADHOC) {
                        sta_id = mvmvif->bcast_sta.sta_id;
                        queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
                                                           hdr->frame_control);
index caea350f05aac7b2e3dc7137b0b4363abcd8c2d4..bdc379178e87955c5456028a43657f97862670af 100644 (file)
@@ -1742,12 +1742,14 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
        unsigned long flags;
        struct rtl_c2hcmd *c2hcmd;
 
-       c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL);
+       c2hcmd = kmalloc(sizeof(*c2hcmd),
+                        in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
 
        if (!c2hcmd)
                goto label_err;
 
-       c2hcmd->val = kmalloc(len, GFP_KERNEL);
+       c2hcmd->val = kmalloc(len,
+                             in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
 
        if (!c2hcmd->val)
                goto label_err2;
index 23d4a1728cdfa4993903b6b4504c6a70f5a7fff4..351bac8f65031edf831741159c01e860c89bb4a5 100644 (file)
@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
        rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
        if (rc < 0)
                goto out_unlock;
+       nvdimm_bus_unlock(&nvdimm_bus->dev);
+
        if (copy_to_user(p, buf, buf_len))
                rc = -EFAULT;
+
+       vfree(buf);
+       return rc;
+
  out_unlock:
        nvdimm_bus_unlock(&nvdimm_bus->dev);
  out:
index b3323c0697f6239ebbfe757137cde8352fe3c480..ca6d572c48fcb62d7c8a83e4874f18b625caaeb8 100644 (file)
@@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
        }
 
        if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
-               if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
+               /*
+                * FIXME: nsio_rw_bytes() may be called from atomic
+                * context in the btt case and nvdimm_clear_poison()
+                * takes a sleeping lock. Until the locking can be
+                * reworked this capability requires that the namespace
+                * is not claimed by btt.
+                */
+               if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
+                               && (!ndns->claim || !is_nd_btt(ndns->claim))) {
                        long cleared;
 
                        cleared = nvdimm_clear_poison(&ndns->dev, offset, size);
index 0eedc49e0d473ed36b5ef9832760aa8498b9f146..8b721321be5b1cb291e780ae9a5ed7ea5ad67e09 100644 (file)
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
 
 int alias_dpa_busy(struct device *dev, void *data)
 {
-       resource_size_t map_end, blk_start, new, busy;
+       resource_size_t map_end, blk_start, new;
        struct blk_alloc_info *info = data;
        struct nd_mapping *nd_mapping;
        struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
  retry:
        /*
         * Find the free dpa from the end of the last pmem allocation to
-        * the end of the interleave-set mapping that is not already
-        * covered by a blk allocation.
+        * the end of the interleave-set mapping.
         */
-       busy = 0;
        for_each_dpa_resource(ndd, res) {
+               if (strncmp(res->name, "pmem", 4) != 0)
+                       continue;
                if ((res->start >= blk_start && res->start < map_end)
                                || (res->end >= blk_start
                                        && res->end <= map_end)) {
-                       if (strncmp(res->name, "pmem", 4) == 0) {
-                               new = max(blk_start, min(map_end + 1,
-                                                       res->end + 1));
-                               if (new != blk_start) {
-                                       blk_start = new;
-                                       goto retry;
-                               }
-                       } else
-                               busy += min(map_end, res->end)
-                                       - max(nd_mapping->start, res->start) + 1;
-               } else if (nd_mapping->start > res->start
-                               && map_end < res->end) {
-                       /* total eclipse of the PMEM region mapping */
-                       busy += nd_mapping->size;
-                       break;
+                       new = max(blk_start, min(map_end + 1, res->end + 1));
+                       if (new != blk_start) {
+                               blk_start = new;
+                               goto retry;
+                       }
                }
        }
 
@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
                return 1;
        }
 
-       info->available -= blk_start - nd_mapping->start + busy;
+       info->available -= blk_start - nd_mapping->start;
 
        return 0;
 }
 
-static int blk_dpa_busy(struct device *dev, void *data)
-{
-       struct blk_alloc_info *info = data;
-       struct nd_mapping *nd_mapping;
-       struct nd_region *nd_region;
-       resource_size_t map_end;
-       int i;
-
-       if (!is_nd_pmem(dev))
-               return 0;
-
-       nd_region = to_nd_region(dev);
-       for (i = 0; i < nd_region->ndr_mappings; i++) {
-               nd_mapping  = &nd_region->mapping[i];
-               if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
-                       break;
-       }
-
-       if (i >= nd_region->ndr_mappings)
-               return 0;
-
-       map_end = nd_mapping->start + nd_mapping->size - 1;
-       if (info->res->start >= nd_mapping->start
-                       && info->res->start < map_end) {
-               if (info->res->end <= map_end) {
-                       info->busy = 0;
-                       return 1;
-               } else {
-                       info->busy -= info->res->end - map_end;
-                       return 0;
-               }
-       } else if (info->res->end >= nd_mapping->start
-                       && info->res->end <= map_end) {
-               info->busy -= nd_mapping->start - info->res->start;
-               return 0;
-       } else {
-               info->busy -= nd_mapping->size;
-               return 0;
-       }
-}
-
 /**
  * nd_blk_available_dpa - account the unused dpa of BLK region
  * @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
        for_each_dpa_resource(ndd, res) {
                if (strncmp(res->name, "blk", 3) != 0)
                        continue;
-
-               info.res = res;
-               info.busy = resource_size(res);
-               device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
-               info.available -= info.busy;
+               info.available -= resource_size(res);
        }
 
        return info.available;
index 9b3b57fef446dc753c966c90fc2529bc9f846dd8..9583a5f58a1ddc498b89d1b6cbaa14cf1a60bea2 100644 (file)
@@ -270,7 +270,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
        memset(cmnd, 0, sizeof(*cmnd));
        cmnd->dsm.opcode = nvme_cmd_dsm;
        cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
-       cmnd->dsm.nr = segments - 1;
+       cmnd->dsm.nr = cpu_to_le32(segments - 1);
        cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
 
        req->special_vec.bv_page = virt_to_page(range);
index 9690beb15e69ab47bb04345da5f142ec56141035..d996ca73d3be37c2332352fdc45767e738d72822 100644 (file)
@@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
 
        error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (error)
index 47a479f26e5d7de3605c0263d1a0cdbba1b7e1c1..16f84eb0b95e8608b73b196763f8bbd886087b22 100644 (file)
@@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
 
        error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (error)
index a7bcff45f4376d3b0c375bc1df6af3d2ca5c8c2e..76450b0c55f1d12104f3349c11d9c3a9f8e779a4 100644 (file)
@@ -100,7 +100,7 @@ static u16 nvmet_get_smart_log(struct nvmet_req *req,
        u16 status;
 
        WARN_ON(req == NULL || slog == NULL);
-       if (req->cmd->get_log_page.nsid == 0xFFFFFFFF)
+       if (req->cmd->get_log_page.nsid == cpu_to_le32(0xFFFFFFFF))
                status = nvmet_get_smart_log_all(req, slog);
        else
                status = nvmet_get_smart_log_nsid(req, slog);
index 4195115c7e5493be02acc7b4daddbfd2427c831e..6b0baa9caab9fdf524944b0be6e8d011d05c552b 100644 (file)
@@ -180,7 +180,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
 
        sector = le64_to_cpu(write_zeroes->slba) <<
                (req->ns->blksize_shift - 9);
-       nr_sector = (((sector_t)le32_to_cpu(write_zeroes->length)) <<
+       nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length)) <<
                (req->ns->blksize_shift - 9)) + 1;
 
        if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
@@ -230,7 +230,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req)
                return 0;
        case nvme_cmd_dsm:
                req->execute = nvmet_execute_dsm;
-               req->data_len = le32_to_cpu(cmd->dsm.nr + 1) *
+               req->data_len = (le32_to_cpu(cmd->dsm.nr) + 1) *
                        sizeof(struct nvme_dsm_range);
                return 0;
        case nvme_cmd_write_zeroes:
index 22f7bc6bac7fa77dd48198cde3a31ef60ead531b..c7b0b6a527083f7d865e752c7e953b2e1411b808 100644 (file)
@@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        }
 
        ctrl->ctrl.sqsize =
-               min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
+               min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize);
 
        error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (error)
index dfb8a69afc28f5b26a86bb024356c3ea03fdaf31..d2d2ba5b8a68e4eabcf9afe5c28f3fe1b29f3d5b 100644 (file)
@@ -89,6 +89,7 @@ config PCI_HISI
        depends on PCI_MSI_IRQ_DOMAIN
        select PCIEPORTBUS
        select PCIE_DW_HOST
+       select PCI_HOST_COMMON
        help
          Say Y here if you want PCIe controller support on HiSilicon
          Hip05 and Hip06 SoCs
index fcd3ef845883555648e0b2eeda907427d7811144..6d23683c0892f59646b4142242fa80bf0e85be98 100644 (file)
@@ -234,6 +234,9 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
        return 0;
 }
 
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
 static int artpec6_pcie_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -252,6 +255,7 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pci->dev = dev;
+       pci->ops = &dw_pcie_ops;
 
        artpec6_pcie->pci = pci;
 
index b6c832ba39dd6905a4640e770b69aff47e51b738..f20d494922ab890badd99dd96af9bcad866f2ca2 100644 (file)
@@ -86,6 +86,9 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp,
        return 0;
 }
 
+static const struct dw_pcie_ops dw_pcie_ops = {
+};
+
 static int dw_plat_pcie_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -103,6 +106,7 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        pci->dev = dev;
+       pci->ops = &dw_pcie_ops;
 
        dw_plat_pcie->pci = pci;
 
index b89c373555c553d6042571b1b780d04b5cef0960..6e031b522529daec0240022cf5221e92be3ea29b 100644 (file)
@@ -375,7 +375,6 @@ static void thunder_pem_legacy_fw(struct acpi_pci_root *root,
        index -= node * PEM_MAX_DOM_IN_NODE;
        res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) |
                                        FIELD_PREP(PEM_INDX_MASK, index);
-       res_pem->end = res_pem->start + SZ_16M - 1;
        res_pem->flags = IORESOURCE_MEM;
 }
 
@@ -399,8 +398,15 @@ static int thunder_pem_acpi_init(struct pci_config_window *cfg)
         */
        if (ret) {
                thunder_pem_legacy_fw(root, res_pem);
-               /* Reserve PEM-specific resources and PCI configuration space */
+               /*
+                * Reserve 64K size PEM specific resources. The full 16M range
+                * size is required for thunder_pem_init() call.
+                */
+               res_pem->end = res_pem->start + SZ_64K - 1;
                thunder_pem_reserve_range(dev, root->segment, res_pem);
+               res_pem->end = res_pem->start + SZ_16M - 1;
+
+               /* Reserve PCI configuration space as well. */
                thunder_pem_reserve_range(dev, root->segment, &cfg->res);
        }
 
index d69046537b75f31d0b1c881686677967865001e0..32822b0d9cd0f03f76eb5b96307c8b6f0ea1558a 100644 (file)
@@ -2010,29 +2010,57 @@ out_err:
        return ERR_PTR(ret);
 }
 
-static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
+static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
 {
        pctldev->p = create_pinctrl(pctldev->dev, pctldev);
-       if (!IS_ERR(pctldev->p)) {
-               kref_get(&pctldev->p->users);
-               pctldev->hog_default =
-                       pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
-               if (IS_ERR(pctldev->hog_default)) {
-                       dev_dbg(pctldev->dev,
-                               "failed to lookup the default state\n");
-               } else {
-                       if (pinctrl_select_state(pctldev->p,
-                                               pctldev->hog_default))
-                               dev_err(pctldev->dev,
-                                       "failed to select default state\n");
-               }
+       if (PTR_ERR(pctldev->p) == -ENODEV) {
+               dev_dbg(pctldev->dev, "no hogs found\n");
 
-               pctldev->hog_sleep =
-                       pinctrl_lookup_state(pctldev->p,
-                                                   PINCTRL_STATE_SLEEP);
-               if (IS_ERR(pctldev->hog_sleep))
-                       dev_dbg(pctldev->dev,
-                               "failed to lookup the sleep state\n");
+               return 0;
+       }
+
+       if (IS_ERR(pctldev->p)) {
+               dev_err(pctldev->dev, "error claiming hogs: %li\n",
+                       PTR_ERR(pctldev->p));
+
+               return PTR_ERR(pctldev->p);
+       }
+
+       kref_get(&pctldev->p->users);
+       pctldev->hog_default =
+               pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
+       if (IS_ERR(pctldev->hog_default)) {
+               dev_dbg(pctldev->dev,
+                       "failed to lookup the default state\n");
+       } else {
+               if (pinctrl_select_state(pctldev->p,
+                                        pctldev->hog_default))
+                       dev_err(pctldev->dev,
+                               "failed to select default state\n");
+       }
+
+       pctldev->hog_sleep =
+               pinctrl_lookup_state(pctldev->p,
+                                    PINCTRL_STATE_SLEEP);
+       if (IS_ERR(pctldev->hog_sleep))
+               dev_dbg(pctldev->dev,
+                       "failed to lookup the sleep state\n");
+
+       return 0;
+}
+
+int pinctrl_enable(struct pinctrl_dev *pctldev)
+{
+       int error;
+
+       error = pinctrl_claim_hogs(pctldev);
+       if (error) {
+               dev_err(pctldev->dev, "could not claim hogs: %i\n",
+                       error);
+               mutex_destroy(&pctldev->mutex);
+               kfree(pctldev);
+
+               return error;
        }
 
        mutex_lock(&pinctrldev_list_mutex);
@@ -2043,6 +2071,7 @@ static int pinctrl_create_and_start(struct pinctrl_dev *pctldev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(pinctrl_enable);
 
 /**
  * pinctrl_register() - register a pin controller device
@@ -2065,25 +2094,30 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
        if (IS_ERR(pctldev))
                return pctldev;
 
-       error = pinctrl_create_and_start(pctldev);
-       if (error) {
-               mutex_destroy(&pctldev->mutex);
-               kfree(pctldev);
-
+       error = pinctrl_enable(pctldev);
+       if (error)
                return ERR_PTR(error);
-       }
 
        return pctldev;
 
 }
 EXPORT_SYMBOL_GPL(pinctrl_register);
 
+/**
+ * pinctrl_register_and_init() - register and init pin controller device
+ * @pctldesc: descriptor for this pin controller
+ * @dev: parent device for this pin controller
+ * @driver_data: private pin controller data for this pin controller
+ * @pctldev: pin controller device
+ *
+ * Note that pinctrl_enable() still needs to be manually called after
+ * this once the driver is ready.
+ */
 int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
                              struct device *dev, void *driver_data,
                              struct pinctrl_dev **pctldev)
 {
        struct pinctrl_dev *p;
-       int error;
 
        p = pinctrl_init_controller(pctldesc, dev, driver_data);
        if (IS_ERR(p))
@@ -2097,15 +2131,6 @@ int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
         */
        *pctldev = p;
 
-       error = pinctrl_create_and_start(p);
-       if (error) {
-               mutex_destroy(&p->mutex);
-               kfree(p);
-               *pctldev = NULL;
-
-               return error;
-       }
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(pinctrl_register_and_init);
index a7ace9e1ad81f24f571d34ca155ba5d03c5f9a36..74bd90dfd7b1650acde5dc19f369b2cfc897362b 100644 (file)
@@ -790,7 +790,7 @@ int imx_pinctrl_probe(struct platform_device *pdev,
 
        dev_info(&pdev->dev, "initialized IMX pinctrl driver\n");
 
-       return 0;
+       return pinctrl_enable(ipctl->pctl);
 
 free:
        imx_free_resources(ipctl);
index f80134e3e0b68aba9f8b94771702cdd3df83a678..9ff790174906e46962ec9b9fa00f4f04de14aa18 100644 (file)
@@ -13,6 +13,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/dmi.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -1524,10 +1525,31 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
+/*
+ * Certain machines seem to hardcode Linux IRQ numbers in their ACPI
+ * tables. Since we leave GPIOs that are not capable of generating
+ * interrupts out of the irqdomain the numbering will be different and
+ * cause devices using the hardcoded IRQ numbers fail. In order not to
+ * break such machines we will only mask pins from irqdomain if the machine
+ * is not listed below.
+ */
+static const struct dmi_system_id chv_no_valid_mask[] = {
+       {
+               /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */
+               .ident = "Acer Chromebook (CYAN)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"),
+                       DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"),
+               },
+       }
+};
+
 static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 {
        const struct chv_gpio_pinrange *range;
        struct gpio_chip *chip = &pctrl->chip;
+       bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
        int ret, i, offset;
 
        *chip = chv_gpio_chip;
@@ -1536,7 +1558,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
        chip->label = dev_name(pctrl->dev);
        chip->parent = pctrl->dev;
        chip->base = -1;
-       chip->irq_need_valid_mask = true;
+       chip->irq_need_valid_mask = need_valid_mask;
 
        ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
        if (ret) {
@@ -1567,7 +1589,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
                intsel &= CHV_PADCTRL0_INTSEL_MASK;
                intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
 
-               if (intsel >= pctrl->community->nirqs)
+               if (need_valid_mask && intsel >= pctrl->community->nirqs)
                        clear_bit(i, chip->irq_valid_mask);
        }
 
index 8b2d45e85baea612451812fb7cc82b88b75d8d9c..9c267dcda094a224888c69ed5aae8e143fc2a63a 100644 (file)
@@ -1781,7 +1781,7 @@ static int pcs_probe(struct platform_device *pdev)
        dev_info(pcs->dev, "%i pins at pa %p size %u\n",
                 pcs->desc.npins, pcs->base, pcs->size);
 
-       return 0;
+       return pinctrl_enable(pcs->pctl);
 
 free:
        pcs_free_resources(pcs);
index f9b49967f512b52cec5447ae0baee9e146745d1b..63e51b56a22a94c528489a8d18aaf38795f0072e 100644 (file)
@@ -1468,82 +1468,82 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
 
 /* pin banks of exynos5433 pin-controller - ALIVE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks0[] __initconst = {
-       EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
-       EXYNOS_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
-       EXYNOS_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
-       EXYNOS_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
-       EXYNOS_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x020, "gpa1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x040, "gpa2", 0x08),
+       EXYNOS5433_PIN_BANK_EINTW(8, 0x060, "gpa3", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x020, "gpf1", 0x1004, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x040, "gpf2", 0x1008, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(4, 0x060, "gpf3", 0x100c, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x080, "gpf4", 0x1010, 1),
+       EXYNOS5433_PIN_BANK_EINTW_EXT(8, 0x0a0, "gpf5", 0x1014, 1),
 };
 
 /* pin banks of exynos5433 pin-controller - AUD */
 static const struct samsung_pin_bank_data exynos5433_pin_banks1[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
-       EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x000, "gpz0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
 };
 
 /* pin banks of exynos5433 pin-controller - CPIF */
 static const struct samsung_pin_bank_data exynos5433_pin_banks2[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x000, "gpv6", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - eSE */
 static const struct samsung_pin_bank_data exynos5433_pin_banks3[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj2", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FINGER */
 static const struct samsung_pin_bank_data exynos5433_pin_banks4[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x000, "gpd5", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - FSYS */
 static const struct samsung_pin_bank_data exynos5433_pin_banks5[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
-       EXYNOS_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
-       EXYNOS_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gph1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x020, "gpr4", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x040, "gpr0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x060, "gpr1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x080, "gpr2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpr3", 0x14),
 };
 
 /* pin banks of exynos5433 pin-controller - IMEM */
 static const struct samsung_pin_bank_data exynos5433_pin_banks6[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x000, "gpf0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - NFC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks7[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj0", 0x00),
 };
 
 /* pin banks of exynos5433 pin-controller - PERIC */
 static const struct samsung_pin_bank_data exynos5433_pin_banks8[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
-       EXYNOS_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
-       EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
-       EXYNOS_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
-       EXYNOS_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
-       EXYNOS_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
-       EXYNOS_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
-       EXYNOS_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
-       EXYNOS_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
-       EXYNOS_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
-       EXYNOS_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
-       EXYNOS_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
-       EXYNOS_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
-       EXYNOS_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
-       EXYNOS_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
-       EXYNOS_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x000, "gpv7", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x020, "gpb0", 0x04),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x040, "gpc0", 0x08),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x060, "gpc1", 0x0c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x080, "gpc2", 0x10),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x0a0, "gpc3", 0x14),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x0c0, "gpg0", 0x18),
+       EXYNOS5433_PIN_BANK_EINTG(4, 0x0e0, "gpd0", 0x1c),
+       EXYNOS5433_PIN_BANK_EINTG(6, 0x100, "gpd1", 0x20),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x120, "gpd2", 0x24),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x140, "gpd4", 0x28),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x160, "gpd8", 0x2c),
+       EXYNOS5433_PIN_BANK_EINTG(7, 0x180, "gpd6", 0x30),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x1a0, "gpd7", 0x34),
+       EXYNOS5433_PIN_BANK_EINTG(5, 0x1c0, "gpg1", 0x38),
+       EXYNOS5433_PIN_BANK_EINTG(2, 0x1e0, "gpg2", 0x3c),
+       EXYNOS5433_PIN_BANK_EINTG(8, 0x200, "gpg3", 0x40),
 };
 
 /* pin banks of exynos5433 pin-controller - TOUCH */
 static const struct samsung_pin_bank_data exynos5433_pin_banks9[] __initconst = {
-       EXYNOS_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
+       EXYNOS5433_PIN_BANK_EINTG(3, 0x000, "gpj1", 0x00),
 };
 
 /*
index a473092fb8d2362f11a1a48beb7f545b7c9f25ce..cd046eb7d705682fae1293abf432b9d04465328f 100644 (file)
                .name           = id                    \
        }
 
-#define EXYNOS_PIN_BANK_EINTW_EXT(pins, reg, id, offs, pctl_idx) \
-       {                                               \
-               .type           = &bank_type_alive,     \
-               .pctl_offset    = reg,                  \
-               .nr_pins        = pins,                 \
-               .eint_type      = EINT_TYPE_WKUP,       \
-               .eint_offset    = offs,                 \
-               .name           = id,                   \
-               .pctl_res_idx   = pctl_idx,             \
-       }                                               \
-
 #define EXYNOS5433_PIN_BANK_EINTG(pins, reg, id, offs)         \
        {                                                       \
                .type           = &exynos5433_bank_type_off,    \
index 08150a321be6f1e2df11fce59745430b1ead1213..a70157f0acf4ecf7b55fb89a8bb346b537bb1903 100644 (file)
@@ -816,6 +816,13 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
        pmx->pctl_desc.pins = pmx->pins;
        pmx->pctl_desc.npins = pfc->info->nr_pins;
 
-       return devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
-                                             &pmx->pctl);
+       ret = devm_pinctrl_register_and_init(pfc->dev, &pmx->pctl_desc, pmx,
+                                            &pmx->pctl);
+       if (ret) {
+               dev_err(pfc->dev, "could not register: %i\n", ret);
+
+               return ret;
+       }
+
+       return pinctrl_enable(pmx->pctl);
 }
index 717e3404900ca414325086268e55d7ee9f5eb42f..362c50918c13a6252e7c363c2474a26a437605c8 100644 (file)
@@ -893,6 +893,8 @@ static int ti_iodelay_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, iod);
 
+       return pinctrl_enable(iod->pctl);
+
 exit_out:
        of_node_put(np);
        return ret;
index 053088b9b66edb4c50c3b47c5e942b6e2e343c00..c1527cb645be8d0c038742559e270d1914ac8726 100644 (file)
@@ -36,6 +36,14 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
        .clk_rate = 19200000,
        .npwm = 4,
        .base_unit_bits = 22,
+       .bypass = true,
+};
+
+/* Tangier */
+static const struct pwm_lpss_boardinfo pwm_lpss_tng_info = {
+       .clk_rate = 19200000,
+       .npwm = 4,
+       .base_unit_bits = 22,
 };
 
 static int pwm_lpss_probe_pci(struct pci_dev *pdev,
@@ -97,7 +105,7 @@ static const struct pci_device_id pwm_lpss_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x0ac8), (unsigned long)&pwm_lpss_bxt_info},
        { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
        { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
-       { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_bxt_info},
+       { PCI_VDEVICE(INTEL, 0x11a5), (unsigned long)&pwm_lpss_tng_info},
        { PCI_VDEVICE(INTEL, 0x1ac8), (unsigned long)&pwm_lpss_bxt_info},
        { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
        { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
index b22b6fdadb9ae14e0e55f28ecbfcece7e971eb1f..5d6ed1507d29284f2ba28f2cc781f4b797067f01 100644 (file)
@@ -37,6 +37,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bxt_info = {
        .clk_rate = 19200000,
        .npwm = 4,
        .base_unit_bits = 22,
+       .bypass = true,
 };
 
 static int pwm_lpss_probe_platform(struct platform_device *pdev)
index 689d2c1cbead80f5b6540dac13f1f0e56edfecfa..8db0d40ccacde84a61d292936f6bbdeeed7ac358 100644 (file)
@@ -57,7 +57,7 @@ static inline void pwm_lpss_write(const struct pwm_device *pwm, u32 value)
        writel(value, lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM);
 }
 
-static int pwm_lpss_update(struct pwm_device *pwm)
+static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
 {
        struct pwm_lpss_chip *lpwm = to_lpwm(pwm->chip);
        const void __iomem *addr = lpwm->regs + pwm->hwpwm * PWM_SIZE + PWM;
@@ -65,8 +65,6 @@ static int pwm_lpss_update(struct pwm_device *pwm)
        u32 val;
        int err;
 
-       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
-
        /*
         * PWM Configuration register has SW_UPDATE bit that is set when a new
         * configuration is written to the register. The bit is automatically
@@ -122,6 +120,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
        pwm_lpss_write(pwm, ctrl);
 }
 
+static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
+{
+       if (cond)
+               pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+}
+
 static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                          struct pwm_state *state)
 {
@@ -137,18 +141,21 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                                return ret;
                        }
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       ret = pwm_lpss_update(pwm);
+                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
+                       ret = pwm_lpss_wait_for_update(pwm);
                        if (ret) {
                                pm_runtime_put(chip->dev);
                                return ret;
                        }
-                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_ENABLE);
+                       pwm_lpss_cond_enable(pwm, lpwm->info->bypass == true);
                } else {
                        ret = pwm_lpss_is_updating(pwm);
                        if (ret)
                                return ret;
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       return pwm_lpss_update(pwm);
+                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
+                       return pwm_lpss_wait_for_update(pwm);
                }
        } else if (pwm_is_enabled(pwm)) {
                pwm_lpss_write(pwm, pwm_lpss_read(pwm) & ~PWM_ENABLE);
index c94cd7c2695da72181830f35d52c2ce12c6afc18..98306bb02cfe71c0775eb430e7cf623fdc431889 100644 (file)
@@ -22,6 +22,7 @@ struct pwm_lpss_boardinfo {
        unsigned long clk_rate;
        unsigned int npwm;
        unsigned long base_unit_bits;
+       bool bypass;
 };
 
 struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
index ef89df1f7336c77a70c3a4de42b6d71ec96162ba..744d56197286a45eb148de437d31a3c3ca8fbd1a 100644 (file)
@@ -191,6 +191,28 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        return 0;
 }
 
+static int rockchip_pwm_enable(struct pwm_chip *chip,
+                        struct pwm_device *pwm,
+                        bool enable,
+                        enum pwm_polarity polarity)
+{
+       struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+       int ret;
+
+       if (enable) {
+               ret = clk_enable(pc->clk);
+               if (ret)
+                       return ret;
+       }
+
+       pc->data->set_enable(chip, pwm, enable, polarity);
+
+       if (!enable)
+               clk_disable(pc->clk);
+
+       return 0;
+}
+
 static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                              struct pwm_state *state)
 {
@@ -207,22 +229,26 @@ static int rockchip_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                return ret;
 
        if (state->polarity != curstate.polarity && enabled) {
-               pc->data->set_enable(chip, pwm, false, state->polarity);
+               ret = rockchip_pwm_enable(chip, pwm, false, state->polarity);
+               if (ret)
+                       goto out;
                enabled = false;
        }
 
        ret = rockchip_pwm_config(chip, pwm, state->duty_cycle, state->period);
        if (ret) {
                if (enabled != curstate.enabled)
-                       pc->data->set_enable(chip, pwm, !enabled,
-                                            state->polarity);
-
+                       rockchip_pwm_enable(chip, pwm, !enabled,
+                                     state->polarity);
                goto out;
        }
 
-       if (state->enabled != enabled)
-               pc->data->set_enable(chip, pwm, state->enabled,
-                                    state->polarity);
+       if (state->enabled != enabled) {
+               ret = rockchip_pwm_enable(chip, pwm, state->enabled,
+                                   state->polarity);
+               if (ret)
+                       goto out;
+       }
 
        /*
         * Update the state with the real hardware, which can differ a bit
index f1e5e65388bb525b186f9257794afcf3564d2c3f..cd739d2fa160387c91b08e1b4d4f470394599c57 100644 (file)
@@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc)
 }
 EXPORT_SYMBOL_GPL(reset_control_status);
 
-static struct reset_control *__reset_control_get(
+static struct reset_control *__reset_control_get_internal(
                                struct reset_controller_dev *rcdev,
                                unsigned int index, bool shared)
 {
@@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get(
        return rstc;
 }
 
-static void __reset_control_put(struct reset_control *rstc)
+static void __reset_control_put_internal(struct reset_control *rstc)
 {
        lockdep_assert_held(&reset_list_mutex);
 
@@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
        }
 
        /* reset_list_mutex also protects the rcdev's reset_control list */
-       rstc = __reset_control_get(rcdev, rstc_id, shared);
+       rstc = __reset_control_get_internal(rcdev, rstc_id, shared);
 
        mutex_unlock(&reset_list_mutex);
 
@@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node,
 }
 EXPORT_SYMBOL_GPL(__of_reset_control_get);
 
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+                                         int index, bool shared, bool optional)
+{
+       if (dev->of_node)
+               return __of_reset_control_get(dev->of_node, id, index, shared,
+                                             optional);
+
+       return optional ? NULL : ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(__reset_control_get);
+
 /**
  * reset_control_put - free the reset controller
  * @rstc: reset controller
@@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc)
                return;
 
        mutex_lock(&reset_list_mutex);
-       __reset_control_put(rstc);
+       __reset_control_put_internal(rstc);
        mutex_unlock(&reset_list_mutex);
 }
 EXPORT_SYMBOL_GPL(reset_control_put);
@@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev,
        if (!ptr)
                return ERR_PTR(-ENOMEM);
 
-       rstc = __of_reset_control_get(dev ? dev->of_node : NULL,
-                                     id, index, shared, optional);
+       rstc = __reset_control_get(dev, id, index, shared, optional);
        if (!IS_ERR(rstc)) {
                *ptr = rstc;
                devres_add(dev, ptr);
index 40f1136f55688981d70767bcf4eb55524a603cc6..058db724b5a28a8390856aeb51da30a639f3cf6b 100644 (file)
@@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain,
                rc = -EIO;
                goto out;
        }
+       if (prepcblk->ccp_rscode != 0) {
+               DEBUG_WARN(
+                       "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n",
+                       (int) prepcblk->ccp_rtcode,
+                       (int) prepcblk->ccp_rscode);
+       }
 
        /* process response cprb param block */
        prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
@@ -761,9 +767,10 @@ out:
 }
 
 /*
- * Fetch just the mkvp value via query_crypto_facility from adapter.
+ * Fetch the current and old mkvp values via
+ * query_crypto_facility from adapter.
  */
-static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
+static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
 {
        int rc, found = 0;
        size_t rlen, vlen;
@@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp)
        rc = query_crypto_facility(cardnr, domain, "STATICSA",
                                   rarray, &rlen, varray, &vlen);
        if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
-               if (rarray[64] == '2') {
+               if (rarray[8*8] == '2') {
                        /* current master key state is valid */
-                       *mkvp = *((u64 *)(varray + 184));
+                       mkvp[0] = *((u64 *)(varray + 184));
+                       mkvp[1] = *((u64 *)(varray + 172));
                        found = 1;
                }
        }
@@ -796,14 +804,14 @@ struct mkvp_info {
        struct list_head list;
        u16 cardnr;
        u16 domain;
-       u64 mkvp;
+       u64 mkvp[2];
 };
 
 /* a list with mkvp_info entries */
 static LIST_HEAD(mkvp_list);
 static DEFINE_SPINLOCK(mkvp_list_lock);
 
-static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
+static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
 {
        int rc = -ENOENT;
        struct mkvp_info *ptr;
@@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
        list_for_each_entry(ptr, &mkvp_list, list) {
                if (ptr->cardnr == cardnr &&
                    ptr->domain == domain) {
-                       *mkvp = ptr->mkvp;
+                       memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
                        rc = 0;
                        break;
                }
@@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp)
        return rc;
 }
 
-static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
+static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
 {
        int found = 0;
        struct mkvp_info *ptr;
@@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
        list_for_each_entry(ptr, &mkvp_list, list) {
                if (ptr->cardnr == cardnr &&
                    ptr->domain == domain) {
-                       ptr->mkvp = mkvp;
+                       memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
                        found = 1;
                        break;
                }
@@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp)
                }
                ptr->cardnr = cardnr;
                ptr->domain = domain;
-               ptr->mkvp = mkvp;
+               memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
                list_add(&ptr->list, &mkvp_list);
        }
        spin_unlock_bh(&mkvp_list_lock);
@@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey,
        struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
        struct zcrypt_device_matrix *device_matrix;
        u16 card, dom;
-       u64 mkvp;
-       int i, rc;
+       u64 mkvp[2];
+       int i, rc, oi = -1;
 
        /* mkvp must not be zero */
        if (t->mkvp == 0)
@@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey,
                    device_matrix->device[i].functions & 0x04) {
                        /* an enabled CCA Coprocessor card */
                        /* try cached mkvp */
-                       if (mkvp_cache_fetch(card, dom, &mkvp) == 0 &&
-                           t->mkvp == mkvp) {
+                       if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
+                           t->mkvp == mkvp[0]) {
                                if (!verify)
                                        break;
                                /* verify: fetch mkvp from adapter */
-                               if (fetch_mkvp(card, dom, &mkvp) == 0) {
+                               if (fetch_mkvp(card, dom, mkvp) == 0) {
                                        mkvp_cache_update(card, dom, mkvp);
-                                       if (t->mkvp == mkvp)
+                                       if (t->mkvp == mkvp[0])
                                                break;
                                }
                        }
@@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey,
                        card = AP_QID_CARD(device_matrix->device[i].qid);
                        dom = AP_QID_QUEUE(device_matrix->device[i].qid);
                        /* fresh fetch mkvp from adapter */
-                       if (fetch_mkvp(card, dom, &mkvp) == 0) {
+                       if (fetch_mkvp(card, dom, mkvp) == 0) {
                                mkvp_cache_update(card, dom, mkvp);
-                               if (t->mkvp == mkvp)
+                               if (t->mkvp == mkvp[0])
                                        break;
+                               if (t->mkvp == mkvp[1] && oi < 0)
+                                       oi = i;
                        }
                }
+               if (i >= MAX_ZDEV_ENTRIES && oi >= 0) {
+                       /* old mkvp matched, use this card then */
+                       card = AP_QID_CARD(device_matrix->device[oi].qid);
+                       dom = AP_QID_QUEUE(device_matrix->device[oi].qid);
+               }
        }
-       if (i < MAX_ZDEV_ENTRIES) {
+       if (i < MAX_ZDEV_ENTRIES || oi >= 0) {
                if (pcardnr)
                        *pcardnr = card;
                if (pdomain)
index e7addea8741b799066644052cba4e9a99f3a3335..d9561e39c3b237078a2c1fe888042e1321be7883 100644 (file)
@@ -961,7 +961,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
 int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
 int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
-int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
+int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb,
+                        int extra_elems, int data_offset);
 int qeth_get_elements_for_frags(struct sk_buff *);
 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
                        struct sk_buff *, struct qeth_hdr *, int, int, int);
index 315d8a2db7c066a0b8eb3739021edc9fde698c19..9a5f99ccb122bab3918f136f403bdec04b98d2f4 100644 (file)
@@ -3837,6 +3837,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
  * @card:                      qeth card structure, to check max. elems.
  * @skb:                       SKB address
  * @extra_elems:               extra elems needed, to check against max.
+ * @data_offset:               range starts at skb->data + data_offset
  *
  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
  * skb data, including linear part and fragments. Checks if the result plus
@@ -3844,10 +3845,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
  * Note: extra_elems is not included in the returned result.
  */
 int qeth_get_elements_no(struct qeth_card *card,
-                    struct sk_buff *skb, int extra_elems)
+                    struct sk_buff *skb, int extra_elems, int data_offset)
 {
        int elements = qeth_get_elements_for_range(
-                               (addr_t)skb->data,
+                               (addr_t)skb->data + data_offset,
                                (addr_t)skb->data + skb_headlen(skb)) +
                        qeth_get_elements_for_frags(skb);
 
index bea483307618996240cb90cc3382950ab8b38354..af4e6a639fecf20fc5d4c15b5a398ae4b460c4d5 100644 (file)
@@ -849,7 +849,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * chaining we can not send long frag lists
         */
        if ((card->info.type != QETH_CARD_TYPE_IQD) &&
-           !qeth_get_elements_no(card, new_skb, 0)) {
+           !qeth_get_elements_no(card, new_skb, 0, 0)) {
                int lin_rc = skb_linearize(new_skb);
 
                if (card->options.performance_stats) {
@@ -894,7 +894,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
        }
 
-       elements = qeth_get_elements_no(card, new_skb, elements_needed);
+       elements = qeth_get_elements_no(card, new_skb, elements_needed,
+                                       (data_offset > 0) ? data_offset : 0);
        if (!elements) {
                if (data_offset >= 0)
                        kmem_cache_free(qeth_core_header_cache, hdr);
index 06d0addcc058dcccd4333a8f3edcdd96fccb2de4..653f0fb76573ab66c0178615ebcd81a5fe04c16e 100644 (file)
@@ -2609,17 +2609,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
        char daddr[16];
        struct af_iucv_trans_hdr *iucv_hdr;
 
-       skb_pull(skb, 14);
-       card->dev->header_ops->create(skb, card->dev, 0,
-                                     card->dev->dev_addr, card->dev->dev_addr,
-                                     card->dev->addr_len);
-       skb_pull(skb, 14);
-       iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
        memset(hdr, 0, sizeof(struct qeth_hdr));
        hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
        hdr->hdr.l3.ext_flags = 0;
-       hdr->hdr.l3.length = skb->len;
+       hdr->hdr.l3.length = skb->len - ETH_HLEN;
        hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+
+       iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
        memset(daddr, 0, sizeof(daddr));
        daddr[0] = 0xfe;
        daddr[1] = 0x80;
@@ -2823,10 +2819,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if ((card->info.type == QETH_CARD_TYPE_IQD) &&
            !skb_is_nonlinear(skb)) {
                new_skb = skb;
-               if (new_skb->protocol == ETH_P_AF_IUCV)
-                       data_offset = 0;
-               else
-                       data_offset = ETH_HLEN;
+               data_offset = ETH_HLEN;
                hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
                if (!hdr)
                        goto tx_drop;
@@ -2867,7 +2860,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        if ((card->info.type != QETH_CARD_TYPE_IQD) &&
            ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) ||
-            (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) {
+            (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) {
                int lin_rc = skb_linearize(new_skb);
 
                if (card->options.performance_stats) {
@@ -2909,7 +2902,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        elements = use_tso ?
                   qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) :
-                  qeth_get_elements_no(card, new_skb, hdr_elements);
+                  qeth_get_elements_no(card, new_skb, hdr_elements,
+                                       (data_offset > 0) ? data_offset : 0);
        if (!elements) {
                if (data_offset >= 0)
                        kmem_cache_free(qeth_core_header_cache, hdr);
index d036a806f31c47917e2a35cac4a2666bfffdb3eb..d281492009fb4457131b5ef87b04b58d8f3d94c3 100644 (file)
@@ -1690,9 +1690,6 @@ struct aac_dev
 #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
        (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
 
-#define aac_adapter_check_health(dev) \
-       (dev)->a_ops.adapter_check_health(dev)
-
 #define aac_adapter_restart(dev, bled, reset_type) \
        ((dev)->a_ops.adapter_restart(dev, bled, reset_type))
 
@@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
        return capacity;
 }
 
+static inline int aac_adapter_check_health(struct aac_dev *dev)
+{
+       if (unlikely(pci_channel_offline(dev->pdev)))
+               return -1;
+
+       return (dev)->a_ops.adapter_check_health(dev);
+}
+
 /* SCp.phase values */
 #define AAC_OWNER_MIDLEVEL     0x101
 #define AAC_OWNER_LOWLEVEL     0x102
index c8172f16cf33cd6454ae571c3310f6451afa8b79..1f4918355fdb00a9abab06da1e154b710fb86b0e 100644 (file)
@@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac)
        spin_unlock_irqrestore(&aac->fib_lock, flagv);
 
        if (BlinkLED < 0) {
-               printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
+               printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
+                               aac->name, BlinkLED);
                goto out;
        }
 
index b29afafc28857e95bffd8946598748907ab77b17..5d5e272fd815a3ed076eb52e2df47d3ff765fd3a 100644 (file)
@@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
                break;
        case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
        case IPR_IOASA_IR_DUAL_IOA_DISABLED:
-               scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+               /*
+                * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
+                * so SCSI mid-layer and upper layers handle it accordingly.
+                */
+               if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
+                       scsi_cmd->result |= (DID_PASSTHROUGH << 16);
                break;
        case IPR_IOASC_BUS_WAS_RESET:
        case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
index ed58b9104f58b8894b3dfd924c6facfaba29be67..e10b91cc3c62388ccee0db756f1371d7c8037ff9 100644 (file)
@@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
                qedf_set_vlan_id(qedf, vid);
 
                /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
-               complete(&qedf->fipvlan_compl);
+               if (!completion_done(&qedf->fipvlan_compl))
+                       complete(&qedf->fipvlan_compl);
        }
 }
 
index 8e2a160490e66a747e75bf9c0c1a149e34d474a3..cceddd995a4bf46605ae94143cfcff9df693fb83 100644 (file)
@@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
                atomic_set(&qedf->num_offloads, 0);
                qedf->stop_io_on_error = false;
                pci_set_drvdata(pdev, qedf);
+               init_completion(&qedf->fipvlan_compl);
 
                QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
                   "QLogic FastLinQ FCoE Module qedf %s, "
index 3e7011757c8267022744e19778f49cc4db286822..83d61d2142e98d9c48ad3a6dcc5acf4183e293ff 100644 (file)
@@ -1160,8 +1160,13 @@ static inline
 uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha)
 {
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
 
-       return ((RD_REG_DWORD(&reg->host_status)) == ISP_REG_DISCONNECT);
+       if (IS_P3P_TYPE(ha))
+               return ((RD_REG_DWORD(&reg82->host_int)) == ISP_REG_DISCONNECT);
+       else
+               return ((RD_REG_DWORD(&reg->host_status)) ==
+                       ISP_REG_DISCONNECT);
 }
 
 /**************************************************************************
index 19125d72f322c934d84d2d71c748ca9c08418846..e5a2d590a104d5cf7e63c1a7acfb94d7639a51da 100644 (file)
@@ -496,7 +496,7 @@ static void scsi_run_queue(struct request_queue *q)
                scsi_starved_list_run(sdev->host);
 
        if (q->mq_ops)
-               blk_mq_start_stopped_hw_queues(q, false);
+               blk_mq_run_hw_queues(q, false);
        else
                blk_run_queue(q);
 }
@@ -667,7 +667,7 @@ static bool scsi_end_request(struct request *req, int error,
                    !list_empty(&sdev->host->starved_list))
                        kblockd_schedule_work(&sdev->requeue_work);
                else
-                       blk_mq_start_stopped_hw_queues(q, true);
+                       blk_mq_run_hw_queues(q, true);
        } else {
                unsigned long flags;
 
@@ -1974,7 +1974,7 @@ out:
        case BLK_MQ_RQ_QUEUE_BUSY:
                if (atomic_read(&sdev->device_busy) == 0 &&
                    !scsi_device_blocked(sdev))
-                       blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
+                       blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
                break;
        case BLK_MQ_RQ_QUEUE_ERROR:
                /*
index fcfeddc79331bbf32a71e296cf606513ae5b3d78..35ad5e8a31ab3b7214cdfb5b57be90c3c7f9d306 100644 (file)
@@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
 
 #define READ_CAPACITY_RETRIES_ON_RESET 10
 
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+       u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+       if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+               return false;
+
+       return true;
+}
+
 static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                                                unsigned char *buffer)
 {
@@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                return -ENODEV;
        }
 
-       if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+       if (!sd_addressable_capacity(lba, sector_size)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
                        "kernel compiled with support for large block "
                        "devices.\n");
@@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
                return sector_size;
        }
 
-       if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+       if (!sd_addressable_capacity(lba, sector_size)) {
                sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
                        "kernel compiled with support for large block "
                        "devices.\n");
@@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk)
                q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
                rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
        } else
-               rw_max = BLK_DEF_MAX_SECTORS;
+               rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
+                                     (sector_t)BLK_DEF_MAX_SECTORS);
 
        /* Combine with controller limits */
        q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
index 0b29b9329b1c2f5c8207188884498f314e0a414a..a8f630213a1a0fce250e23b00a9452676389b159 100644 (file)
@@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd)
        unsigned char *buffer;
        struct scsi_mode_data data;
        struct scsi_sense_hdr sshdr;
+       unsigned int ms_len = 128;
        int rc, n;
 
        static const char *loadmech[] =
@@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd)
        scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
 
        /* ask for mode page 0x2a */
-       rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+       rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
                             SR_TIMEOUT, 3, &data, NULL);
 
-       if (!scsi_status_is_good(rc)) {
+       if (!scsi_status_is_good(rc) || data.length > ms_len ||
+           data.header_length + data.block_descriptor_length > data.length) {
                /* failed, drive doesn't have capabilities mode page */
                cd->cdi.speed = 1;
                cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
index 7cbad0d45b9c8053dc77b7ec696966ed6318324d..6ba270e0494ddc689eb7be7a12eecc819bc1ac7b 100644 (file)
@@ -409,6 +409,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
                        ret = PTR_ERR(vmfile);
                        goto out;
                }
+               vmfile->f_mode |= FMODE_LSEEK;
                asma->file = vmfile;
        }
        get_file(asma->file);
index a91802432f2f47d1b163ba9f8e2da90dabe28e62..e3f9ed3690b7a86103472de987c03fd76becd59b 100644 (file)
@@ -485,8 +485,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *);
 
 int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 {
-       iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
-       return 0;
+       return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 }
 EXPORT_SYMBOL(iscsit_queue_rsp);
 
index bf40f03755ddc50697652ccde864d40df840fa0b..344e8448869c15c9e078ef708debf98cac1aae1a 100644 (file)
@@ -1398,11 +1398,10 @@ static u32 lio_sess_get_initiator_sid(
 static int lio_queue_data_in(struct se_cmd *se_cmd)
 {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsi_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_DATAIN;
-       cmd->conn->conn_transport->iscsit_queue_data_in(cmd->conn, cmd);
-
-       return 0;
+       return conn->conn_transport->iscsit_queue_data_in(conn, cmd);
 }
 
 static int lio_write_pending(struct se_cmd *se_cmd)
@@ -1431,16 +1430,14 @@ static int lio_write_pending_status(struct se_cmd *se_cmd)
 static int lio_queue_status(struct se_cmd *se_cmd)
 {
        struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+       struct iscsi_conn *conn = cmd->conn;
 
        cmd->i_state = ISTATE_SEND_STATUS;
 
        if (cmd->se_cmd.scsi_status || cmd->sense_reason) {
-               iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
-               return 0;
+               return iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
        }
-       cmd->conn->conn_transport->iscsit_queue_status(cmd->conn, cmd);
-
-       return 0;
+       return conn->conn_transport->iscsit_queue_status(conn, cmd);
 }
 
 static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
index e65bf78ceef3740fc1923c1b3ed446aa2996b82d..fce627628200cf9917a8212da7a17ccff3d6136c 100644 (file)
@@ -781,22 +781,6 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
        } else if (IS_TYPE_NUMBER(param)) {
                if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
                        SET_PSTATE_REPLY_OPTIONAL(param);
-               /*
-                * The GlobalSAN iSCSI Initiator for MacOSX does
-                * not respond to MaxBurstLength, FirstBurstLength,
-                * DefaultTime2Wait or DefaultTime2Retain parameter keys.
-                * So, we set them to 'reply optional' here, and assume the
-                * the defaults from iscsi_parameters.h if the initiator
-                * is not RFC compliant and the keys are not negotiated.
-                */
-               if (!strcmp(param->name, MAXBURSTLENGTH))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, FIRSTBURSTLENGTH))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, DEFAULTTIME2WAIT))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
-               if (!strcmp(param->name, DEFAULTTIME2RETAIN))
-                       SET_PSTATE_REPLY_OPTIONAL(param);
                /*
                 * Required for gPXE iSCSI boot client
                 */
index 5041a9c8bdcbfd9bf9eb9368e850bdb8792a6be9..7d3e2fcc26a0da82629102693a99750622afed95 100644 (file)
@@ -567,7 +567,7 @@ static void iscsit_remove_cmd_from_immediate_queue(
        }
 }
 
-void iscsit_add_cmd_to_response_queue(
+int iscsit_add_cmd_to_response_queue(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn,
        u8 state)
@@ -578,7 +578,7 @@ void iscsit_add_cmd_to_response_queue(
        if (!qr) {
                pr_err("Unable to allocate memory for"
                        " struct iscsi_queue_req\n");
-               return;
+               return -ENOMEM;
        }
        INIT_LIST_HEAD(&qr->qr_list);
        qr->cmd = cmd;
@@ -590,6 +590,7 @@ void iscsit_add_cmd_to_response_queue(
        spin_unlock_bh(&conn->response_queue_lock);
 
        wake_up(&conn->queues_wq);
+       return 0;
 }
 
 struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
@@ -737,21 +738,23 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
 {
        struct se_cmd *se_cmd = NULL;
        int rc;
+       bool op_scsi = false;
        /*
         * Determine if a struct se_cmd is associated with
         * this struct iscsi_cmd.
         */
        switch (cmd->iscsi_opcode) {
        case ISCSI_OP_SCSI_CMD:
-               se_cmd = &cmd->se_cmd;
-               __iscsit_free_cmd(cmd, true, shutdown);
+               op_scsi = true;
                /*
                 * Fallthrough
                 */
        case ISCSI_OP_SCSI_TMFUNC:
-               rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
-               if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
-                       __iscsit_free_cmd(cmd, true, shutdown);
+               se_cmd = &cmd->se_cmd;
+               __iscsit_free_cmd(cmd, op_scsi, shutdown);
+               rc = transport_generic_free_cmd(se_cmd, shutdown);
+               if (!rc && shutdown && se_cmd->se_sess) {
+                       __iscsit_free_cmd(cmd, op_scsi, shutdown);
                        target_put_sess_cmd(se_cmd);
                }
                break;
index 8ff08856516aba68394fc07661ec71b635c8b6a2..9e4197af8708e1056a08f4c01d423581a075bef1 100644 (file)
@@ -31,7 +31,7 @@ extern int iscsit_find_cmd_for_recovery(struct iscsi_session *, struct iscsi_cmd
                        struct iscsi_conn_recovery **, itt_t);
 extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
 extern struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *);
-extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
+extern int iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
 extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
 extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
index fd7c16a7ca6e06ad53e6d6df54ab739550ae4a4a..fc4a9c303d559f95b1216857efd8ae6ce77b96ca 100644 (file)
@@ -197,8 +197,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
                /*
                 * Set the ASYMMETRIC ACCESS State
                 */
-               buf[off++] |= (atomic_read(
-                       &tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
+               buf[off++] |= tg_pt_gp->tg_pt_gp_alua_access_state & 0xff;
                /*
                 * Set supported ASYMMETRIC ACCESS State bits
                 */
@@ -710,7 +709,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        spin_lock(&lun->lun_tg_pt_gp_lock);
        tg_pt_gp = lun->lun_tg_pt_gp;
-       out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+       out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
        nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
 
        // XXX: keeps using tg_pt_gp witout reference after unlock
@@ -911,7 +910,7 @@ static int core_alua_write_tpg_metadata(
 }
 
 /*
- * Called with tg_pt_gp->tg_pt_gp_md_mutex held
+ * Called with tg_pt_gp->tg_pt_gp_transition_mutex held
  */
 static int core_alua_update_tpg_primary_metadata(
        struct t10_alua_tg_pt_gp *tg_pt_gp)
@@ -934,7 +933,7 @@ static int core_alua_update_tpg_primary_metadata(
                        "alua_access_state=0x%02x\n"
                        "alua_access_status=0x%02x\n",
                        tg_pt_gp->tg_pt_gp_id,
-                       tg_pt_gp->tg_pt_gp_alua_pending_state,
+                       tg_pt_gp->tg_pt_gp_alua_access_state,
                        tg_pt_gp->tg_pt_gp_alua_access_status);
 
        snprintf(path, ALUA_METADATA_PATH_LEN,
@@ -1013,93 +1012,41 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
        spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
 }
 
-static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
-{
-       struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
-               struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
-       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
-       bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
-                        ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
-
-       /*
-        * Update the ALUA metadata buf that has been allocated in
-        * core_alua_do_port_transition(), this metadata will be written
-        * to struct file.
-        *
-        * Note that there is the case where we do not want to update the
-        * metadata when the saved metadata is being parsed in userspace
-        * when setting the existing port access state and access status.
-        *
-        * Also note that the failure to write out the ALUA metadata to
-        * struct file does NOT affect the actual ALUA transition.
-        */
-       if (tg_pt_gp->tg_pt_gp_write_metadata) {
-               mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
-               core_alua_update_tpg_primary_metadata(tg_pt_gp);
-               mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
-       }
-       /*
-        * Set the current primary ALUA access state to the requested new state
-        */
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-                  tg_pt_gp->tg_pt_gp_alua_pending_state);
-
-       pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
-               " from primary access state %s to %s\n", (explicit) ? "explicit" :
-               "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
-               tg_pt_gp->tg_pt_gp_id,
-               core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state),
-               core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
-
-       core_alua_queue_state_change_ua(tg_pt_gp);
-
-       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
-       atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
-
-       if (tg_pt_gp->tg_pt_gp_transition_complete)
-               complete(tg_pt_gp->tg_pt_gp_transition_complete);
-}
-
 static int core_alua_do_transition_tg_pt(
        struct t10_alua_tg_pt_gp *tg_pt_gp,
        int new_state,
        int explicit)
 {
-       struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
-       DECLARE_COMPLETION_ONSTACK(wait);
+       int prev_state;
 
+       mutex_lock(&tg_pt_gp->tg_pt_gp_transition_mutex);
        /* Nothing to be done here */
-       if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
+       if (tg_pt_gp->tg_pt_gp_alua_access_state == new_state) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return 0;
+       }
 
-       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return -EAGAIN;
-
-       /*
-        * Flush any pending transitions
-        */
-       if (!explicit)
-               flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
+       }
 
        /*
         * Save the old primary ALUA access state, and set the current state
         * to ALUA_ACCESS_STATE_TRANSITION.
         */
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-                       ALUA_ACCESS_STATE_TRANSITION);
+       prev_state = tg_pt_gp->tg_pt_gp_alua_access_state;
+       tg_pt_gp->tg_pt_gp_alua_access_state = ALUA_ACCESS_STATE_TRANSITION;
        tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
                                ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG :
                                ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA;
 
        core_alua_queue_state_change_ua(tg_pt_gp);
 
-       if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+       if (new_state == ALUA_ACCESS_STATE_TRANSITION) {
+               mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
                return 0;
-
-       tg_pt_gp->tg_pt_gp_alua_previous_state =
-               atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
-       tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+       }
 
        /*
         * Check for the optional ALUA primary state transition delay
@@ -1108,19 +1055,36 @@ static int core_alua_do_transition_tg_pt(
                msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
 
        /*
-        * Take a reference for workqueue item
+        * Set the current primary ALUA access state to the requested new state
         */
-       spin_lock(&dev->t10_alua.tg_pt_gps_lock);
-       atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
-       spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
+       tg_pt_gp->tg_pt_gp_alua_access_state = new_state;
 
-       schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
-       if (explicit) {
-               tg_pt_gp->tg_pt_gp_transition_complete = &wait;
-               wait_for_completion(&wait);
-               tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+       /*
+        * Update the ALUA metadata buf that has been allocated in
+        * core_alua_do_port_transition(), this metadata will be written
+        * to struct file.
+        *
+        * Note that there is the case where we do not want to update the
+        * metadata when the saved metadata is being parsed in userspace
+        * when setting the existing port access state and access status.
+        *
+        * Also note that the failure to write out the ALUA metadata to
+        * struct file does NOT affect the actual ALUA transition.
+        */
+       if (tg_pt_gp->tg_pt_gp_write_metadata) {
+               core_alua_update_tpg_primary_metadata(tg_pt_gp);
        }
 
+       pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
+               " from primary access state %s to %s\n", (explicit) ? "explicit" :
+               "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
+               tg_pt_gp->tg_pt_gp_id,
+               core_alua_dump_state(prev_state),
+               core_alua_dump_state(new_state));
+
+       core_alua_queue_state_change_ua(tg_pt_gp);
+
+       mutex_unlock(&tg_pt_gp->tg_pt_gp_transition_mutex);
        return 0;
 }
 
@@ -1685,14 +1649,12 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
        }
        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
        INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_lun_list);
-       mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
+       mutex_init(&tg_pt_gp->tg_pt_gp_transition_mutex);
        spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
        atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
-       INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
-                 core_alua_do_transition_tg_pt_work);
        tg_pt_gp->tg_pt_gp_dev = dev;
-       atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
-               ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
+       tg_pt_gp->tg_pt_gp_alua_access_state =
+                       ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
        /*
         * Enable both explicit and implicit ALUA support by default
         */
@@ -1797,8 +1759,6 @@ void core_alua_free_tg_pt_gp(
        dev->t10_alua.alua_tg_pt_gps_counter--;
        spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
 
-       flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
-
        /*
         * Allow a struct t10_alua_tg_pt_gp_member * referenced by
         * core_alua_get_tg_pt_gp_by_name() in
@@ -1938,8 +1898,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
                        "Primary Access Status: %s\nTG Port Secondary Access"
                        " State: %s\nTG Port Secondary Access Status: %s\n",
                        config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
-                       core_alua_dump_state(atomic_read(
-                                       &tg_pt_gp->tg_pt_gp_alua_access_state)),
+                       core_alua_dump_state(
+                               tg_pt_gp->tg_pt_gp_alua_access_state),
                        core_alua_dump_status(
                                tg_pt_gp->tg_pt_gp_alua_access_status),
                        atomic_read(&lun->lun_tg_pt_secondary_offline) ?
index 38b5025e4c7a877f9e5c0bcfa6995262b6330e32..70657fd564406b3c137b02c3f61824f387f554aa 100644 (file)
@@ -2392,7 +2392,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
                char *page)
 {
        return sprintf(page, "%d\n",
-               atomic_read(&to_tg_pt_gp(item)->tg_pt_gp_alua_access_state));
+                      to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
 }
 
 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
index d8a16ca6baa507b235cbec2fbff56a648874fd2e..d1e6cab8e3d3f0a95cb801f2680c2b8e2474de1d 100644 (file)
@@ -92,6 +92,11 @@ static int target_fabric_mappedlun_link(
                pr_err("Source se_lun->lun_se_dev does not exist\n");
                return -EINVAL;
        }
+       if (lun->lun_shutdown) {
+               pr_err("Unable to create mappedlun symlink because"
+                       " lun->lun_shutdown=true\n");
+               return -EINVAL;
+       }
        se_tpg = lun->lun_tpg;
 
        nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
index 6fb191914f458f7889508652e19b860355387491..dfaef4d3b2d2698088754c08f1846bc893815237 100644 (file)
@@ -642,6 +642,8 @@ void core_tpg_remove_lun(
         */
        struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
 
+       lun->lun_shutdown = true;
+
        core_clear_lun_from_tpg(lun, tpg);
        /*
         * Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -663,6 +665,8 @@ void core_tpg_remove_lun(
        }
        if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
                hlist_del_rcu(&lun->link);
+
+       lun->lun_shutdown = false;
        mutex_unlock(&tpg->tpg_lun_mutex);
 
        percpu_ref_exit(&lun->lun_ref);
index b1a3cdb29468cf84e7eb48d6c8c41934c0b5b4cb..a0cd56ee5fe984f7ddf27c41157f7314343d23c1 100644 (file)
@@ -64,8 +64,9 @@ struct kmem_cache *t10_alua_lba_map_cache;
 struct kmem_cache *t10_alua_lba_map_mem_cache;
 
 static void transport_complete_task_attr(struct se_cmd *cmd);
+static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
 static void transport_handle_queue_full(struct se_cmd *cmd,
-               struct se_device *dev);
+               struct se_device *dev, int err, bool write_pending);
 static int transport_put_cmd(struct se_cmd *cmd);
 static void target_complete_ok_work(struct work_struct *work);
 
@@ -804,7 +805,8 @@ void target_qf_do_work(struct work_struct *work)
 
                if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
                        transport_write_pending_qf(cmd);
-               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+               else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
+                        cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
                        transport_complete_qf(cmd);
        }
 }
@@ -1719,7 +1721,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
                }
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                goto check_stop;
        default:
@@ -1730,7 +1732,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        }
 
        ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
 check_stop:
@@ -1739,8 +1741,7 @@ check_stop:
        return;
 
 queue_full:
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 EXPORT_SYMBOL(transport_generic_request_failure);
 
@@ -1977,13 +1978,29 @@ static void transport_complete_qf(struct se_cmd *cmd)
        int ret = 0;
 
        transport_complete_task_attr(cmd);
+       /*
+        * If a fabric driver ->write_pending() or ->queue_data_in() callback
+        * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
+        * the same callbacks should not be retried.  Return CHECK_CONDITION
+        * if a scsi_status is not already set.
+        *
+        * If a fabric driver ->queue_status() has returned non zero, always
+        * keep retrying no matter what..
+        */
+       if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
+               if (cmd->scsi_status)
+                       goto queue_status;
 
-       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
-               trace_target_cmd_complete(cmd);
-               ret = cmd->se_tfo->queue_status(cmd);
-               goto out;
+               cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
+               cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+               cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
+               translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
+               goto queue_status;
        }
 
+       if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+               goto queue_status;
+
        switch (cmd->data_direction) {
        case DMA_FROM_DEVICE:
                if (cmd->scsi_status)
@@ -2007,19 +2024,33 @@ queue_status:
                break;
        }
 
-out:
        if (ret < 0) {
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
                return;
        }
        transport_lun_remove_cmd(cmd);
        transport_cmd_check_stop_to_fabric(cmd);
 }
 
-static void transport_handle_queue_full(
-       struct se_cmd *cmd,
-       struct se_device *dev)
+static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
+                                       int err, bool write_pending)
 {
+       /*
+        * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
+        * ->queue_data_in() callbacks from new process context.
+        *
+        * Otherwise for other errors, transport_complete_qf() will send
+        * CHECK_CONDITION via ->queue_status() instead of attempting to
+        * retry associated fabric driver data-transfer callbacks.
+        */
+       if (err == -EAGAIN || err == -ENOMEM) {
+               cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
+                                                TRANSPORT_COMPLETE_QF_OK;
+       } else {
+               pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
+               cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
+       }
+
        spin_lock_irq(&dev->qf_cmd_lock);
        list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
        atomic_inc_mb(&dev->dev_qf_count);
@@ -2083,7 +2114,7 @@ static void target_complete_ok_work(struct work_struct *work)
                WARN_ON(!cmd->scsi_status);
                ret = transport_send_check_condition_and_sense(
                                        cmd, 0, 1);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
 
                transport_lun_remove_cmd(cmd);
@@ -2109,7 +2140,7 @@ static void target_complete_ok_work(struct work_struct *work)
                } else if (rc) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                rc, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2134,7 +2165,7 @@ queue_rsp:
                if (target_read_prot_action(cmd)) {
                        ret = transport_send_check_condition_and_sense(cmd,
                                                cmd->pi_err, 0);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
 
                        transport_lun_remove_cmd(cmd);
@@ -2144,7 +2175,7 @@ queue_rsp:
 
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_data_in(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        case DMA_TO_DEVICE:
@@ -2157,7 +2188,7 @@ queue_rsp:
                        atomic_long_add(cmd->data_length,
                                        &cmd->se_lun->lun_stats.tx_data_octets);
                        ret = cmd->se_tfo->queue_data_in(cmd);
-                       if (ret == -EAGAIN || ret == -ENOMEM)
+                       if (ret)
                                goto queue_full;
                        break;
                }
@@ -2166,7 +2197,7 @@ queue_rsp:
 queue_status:
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_status(cmd);
-               if (ret == -EAGAIN || ret == -ENOMEM)
+               if (ret)
                        goto queue_full;
                break;
        default:
@@ -2180,8 +2211,8 @@ queue_status:
 queue_full:
        pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
                " data_direction: %d\n", cmd, cmd->data_direction);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_OK;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 void target_free_sgl(struct scatterlist *sgl, int nents)
@@ -2449,18 +2480,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM)
+       if (ret)
                goto queue_full;
 
-       /* fabric drivers should only return -EAGAIN or -ENOMEM as error */
-       WARN_ON(ret);
-
-       return (!ret) ? 0 : TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return 0;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
-       cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
-       transport_handle_queue_full(cmd, cmd->se_dev);
+       transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        return 0;
 }
 EXPORT_SYMBOL(transport_generic_new_cmd);
@@ -2470,10 +2497,10 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
        int ret;
 
        ret = cmd->se_tfo->write_pending(cmd);
-       if (ret == -EAGAIN || ret == -ENOMEM) {
+       if (ret) {
                pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
                         cmd);
-               transport_handle_queue_full(cmd, cmd->se_dev);
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
        }
 }
 
@@ -3011,6 +3038,8 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        __releases(&cmd->t_state_lock)
        __acquires(&cmd->t_state_lock)
 {
+       int ret;
+
        assert_spin_locked(&cmd->t_state_lock);
        WARN_ON_ONCE(!irqs_disabled());
 
@@ -3034,7 +3063,9 @@ static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        trace_target_cmd_complete(cmd);
 
        spin_unlock_irq(&cmd->t_state_lock);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
        spin_lock_irq(&cmd->t_state_lock);
 
        return 1;
@@ -3055,6 +3086,7 @@ EXPORT_SYMBOL(transport_check_aborted_status);
 void transport_send_task_abort(struct se_cmd *cmd)
 {
        unsigned long flags;
+       int ret;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
@@ -3090,7 +3122,9 @@ send_abort:
                 cmd->t_task_cdb[0], cmd->tag);
 
        trace_target_cmd_complete(cmd);
-       cmd->se_tfo->queue_status(cmd);
+       ret = cmd->se_tfo->queue_status(cmd);
+       if (ret)
+               transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
 }
 
 static void target_tmr_work(struct work_struct *work)
index c6874c38a10bc45e86beae58ddfed175664d51cf..f615c3bbb73e8b7a2a7bf3f5039efd84c724cdf2 100644 (file)
@@ -311,24 +311,50 @@ static void free_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd)
                   DATA_BLOCK_BITS);
 }
 
-static void gather_data_area(struct tcmu_dev *udev, unsigned long *cmd_bitmap,
-               struct scatterlist *data_sg, unsigned int data_nents)
+static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+                            bool bidi)
 {
+       struct se_cmd *se_cmd = cmd->se_cmd;
        int i, block;
        int block_remaining = 0;
        void *from, *to;
        size_t copy_bytes, from_offset;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *data_sg;
+       unsigned int data_nents;
+       DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
+
+       bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
+
+       if (!bidi) {
+               data_sg = se_cmd->t_data_sg;
+               data_nents = se_cmd->t_data_nents;
+       } else {
+               uint32_t count;
+
+               /*
+                * For bidi case, the first count blocks are for Data-Out
+                * buffer blocks, and before gathering the Data-In buffer
+                * the Data-Out buffer blocks should be discarded.
+                */
+               count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+               while (count--) {
+                       block = find_first_bit(bitmap, DATA_BLOCK_BITS);
+                       clear_bit(block, bitmap);
+               }
+
+               data_sg = se_cmd->t_bidi_data_sg;
+               data_nents = se_cmd->t_bidi_data_nents;
+       }
 
        for_each_sg(data_sg, sg, data_nents, i) {
                int sg_remaining = sg->length;
                to = kmap_atomic(sg_page(sg)) + sg->offset;
                while (sg_remaining > 0) {
                        if (block_remaining == 0) {
-                               block = find_first_bit(cmd_bitmap,
+                               block = find_first_bit(bitmap,
                                                DATA_BLOCK_BITS);
                                block_remaining = DATA_BLOCK_SIZE;
-                               clear_bit(block, cmd_bitmap);
+                               clear_bit(block, bitmap);
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
@@ -394,6 +420,27 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t d
        return true;
 }
 
+static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+{
+       struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+       size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
+               data_length += round_up(se_cmd->t_bidi_data_sg->length,
+                               DATA_BLOCK_SIZE);
+       }
+
+       return data_length;
+}
+
+static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+{
+       size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+
+       return data_length / DATA_BLOCK_SIZE;
+}
+
 static sense_reason_t
 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 {
@@ -407,7 +454,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        uint32_t cmd_head;
        uint64_t cdb_off;
        bool copy_to_data_area;
-       size_t data_length;
+       size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
        DECLARE_BITMAP(old_bitmap, DATA_BLOCK_BITS);
 
        if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
@@ -421,8 +468,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
         * expensive to tell how many regions are freed in the bitmap
        */
        base_command_size = max(offsetof(struct tcmu_cmd_entry,
-                               req.iov[se_cmd->t_bidi_data_nents +
-                                       se_cmd->t_data_nents]),
+                               req.iov[tcmu_cmd_get_block_cnt(tcmu_cmd)]),
                                sizeof(struct tcmu_cmd_entry));
        command_size = base_command_size
                + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
@@ -433,11 +479,6 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
 
        mb = udev->mb_addr;
        cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
-       data_length = se_cmd->data_length;
-       if (se_cmd->se_cmd_flags & SCF_BIDI) {
-               BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
-               data_length += se_cmd->t_bidi_data_sg->length;
-       }
        if ((command_size > (udev->cmdr_size / 2)) ||
            data_length > udev->data_size) {
                pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
@@ -511,11 +552,14 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
        entry->req.iov_dif_cnt = 0;
 
        /* Handle BIDI commands */
-       iov_cnt = 0;
-       alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
-               se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false);
-       entry->req.iov_bidi_cnt = iov_cnt;
-
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               iov_cnt = 0;
+               iov++;
+               alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg,
+                               se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
+                               false);
+               entry->req.iov_bidi_cnt = iov_cnt;
+       }
        /* cmd's data_bitmap is what changed in process */
        bitmap_xor(tcmu_cmd->data_bitmap, old_bitmap, udev->data_bitmap,
                        DATA_BLOCK_BITS);
@@ -592,19 +636,11 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                               se_cmd->scsi_sense_length);
                free_data_area(udev, cmd);
        } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
-               DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
                /* Get Data-In buffer before clean up */
-               bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-               gather_data_area(udev, bitmap,
-                       se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents);
+               gather_data_area(udev, cmd, true);
                free_data_area(udev, cmd);
        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               DECLARE_BITMAP(bitmap, DATA_BLOCK_BITS);
-
-               bitmap_copy(bitmap, cmd->data_bitmap, DATA_BLOCK_BITS);
-               gather_data_area(udev, bitmap,
-                       se_cmd->t_data_sg, se_cmd->t_data_nents);
+               gather_data_area(udev, cmd, false);
                free_data_area(udev, cmd);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                free_data_area(udev, cmd);
@@ -1196,11 +1232,6 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
        if (ret < 0)
                return ret;
 
-       if (!val) {
-               pr_err("Illegal value for cmd_time_out\n");
-               return -EINVAL;
-       }
-
        udev->cmd_time_out = val * MSEC_PER_SEC;
        return count;
 }
index b0500a0a87b86161b8cf8befcee9753ff6cda74d..e4603b09863a8fa5ffd4467e81a463538f0533bb 100644 (file)
@@ -491,6 +491,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
        tty_ldisc_debug(tty, "%p: closed\n", ld);
 }
 
+/**
+ *     tty_ldisc_restore       -       helper for tty ldisc change
+ *     @tty: tty to recover
+ *     @old: previous ldisc
+ *
+ *     Restore the previous line discipline or N_TTY when a line discipline
+ *     change fails due to an open error
+ */
+
+static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
+{
+       struct tty_ldisc *new_ldisc;
+       int r;
+
+       /* There is an outstanding reference here so this is safe */
+       old = tty_ldisc_get(tty, old->ops->num);
+       WARN_ON(IS_ERR(old));
+       tty->ldisc = old;
+       tty_set_termios_ldisc(tty, old->ops->num);
+       if (tty_ldisc_open(tty, old) < 0) {
+               tty_ldisc_put(old);
+               /* This driver is always present */
+               new_ldisc = tty_ldisc_get(tty, N_TTY);
+               if (IS_ERR(new_ldisc))
+                       panic("n_tty: get");
+               tty->ldisc = new_ldisc;
+               tty_set_termios_ldisc(tty, N_TTY);
+               r = tty_ldisc_open(tty, new_ldisc);
+               if (r < 0)
+                       panic("Couldn't open N_TTY ldisc for "
+                             "%s --- error %d.",
+                             tty_name(tty), r);
+       }
+}
+
 /**
  *     tty_set_ldisc           -       set line discipline
  *     @tty: the terminal to set
@@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
 
 int tty_set_ldisc(struct tty_struct *tty, int disc)
 {
-       int retval, old_disc;
+       int retval;
+       struct tty_ldisc *old_ldisc, *new_ldisc;
+
+       new_ldisc = tty_ldisc_get(tty, disc);
+       if (IS_ERR(new_ldisc))
+               return PTR_ERR(new_ldisc);
 
        tty_lock(tty);
        retval = tty_ldisc_lock(tty, 5 * HZ);
@@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
        }
 
        /* Check the no-op case */
-       old_disc = tty->ldisc->ops->num;
-       if (old_disc == disc)
+       if (tty->ldisc->ops->num == disc)
                goto out;
 
        if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
                goto out;
        }
 
-       retval = tty_ldisc_reinit(tty, disc);
+       old_ldisc = tty->ldisc;
+
+       /* Shutdown the old discipline. */
+       tty_ldisc_close(tty, old_ldisc);
+
+       /* Now set up the new line discipline. */
+       tty->ldisc = new_ldisc;
+       tty_set_termios_ldisc(tty, disc);
+
+       retval = tty_ldisc_open(tty, new_ldisc);
        if (retval < 0) {
                /* Back to the old one or N_TTY if we can't */
-               if (tty_ldisc_reinit(tty, old_disc) < 0) {
-                       pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n");
-                       if (tty_ldisc_reinit(tty, N_TTY) < 0) {
-                               /* At this point we have tty->ldisc == NULL. */
-                               pr_err("tty: reinitializing N_TTY failed\n");
-                       }
-               }
+               tty_ldisc_put(new_ldisc);
+               tty_ldisc_restore(tty, old_ldisc);
        }
 
-       if (tty->ldisc && tty->ldisc->ops->num != old_disc &&
-           tty->ops->set_ldisc) {
+       if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) {
                down_read(&tty->termios_rwsem);
                tty->ops->set_ldisc(tty);
                up_read(&tty->termios_rwsem);
        }
 
+       /* At this point we hold a reference to the new ldisc and a
+          reference to the old ldisc, or we hold two references to
+          the old ldisc (if it was restored as part of error cleanup
+          above). In either case, releasing a single reference from
+          the old ldisc is correct. */
+       new_ldisc = old_ldisc;
 out:
        tty_ldisc_unlock(tty);
 
@@ -553,6 +601,7 @@ out:
           already running */
        tty_buffer_restart_work(tty->port);
 err:
+       tty_ldisc_put(new_ldisc);       /* drop the extra reference */
        tty_unlock(tty);
        return retval;
 }
@@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
        int retval;
 
        ld = tty_ldisc_get(tty, disc);
-       if (IS_ERR(ld))
+       if (IS_ERR(ld)) {
+               BUG_ON(disc == N_TTY);
                return PTR_ERR(ld);
+       }
 
        if (tty->ldisc) {
                tty_ldisc_close(tty, tty->ldisc);
@@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
        tty_set_termios_ldisc(tty, disc);
        retval = tty_ldisc_open(tty, tty->ldisc);
        if (retval) {
-               tty_ldisc_put(tty->ldisc);
-               tty->ldisc = NULL;
+               if (!WARN_ON(disc == N_TTY)) {
+                       tty_ldisc_put(tty->ldisc);
+                       tty->ldisc = NULL;
+               }
        }
        return retval;
 }
index d2351139342f6200209078e769e04f5ea1eb2d1f..a82e2bd5ea34d97996cb79ba5b72d09aed86e09a 100644 (file)
@@ -373,7 +373,7 @@ static void bot_cleanup_old_alt(struct f_uas *fu)
        usb_ep_free_request(fu->ep_in, fu->bot_req_in);
        usb_ep_free_request(fu->ep_out, fu->bot_req_out);
        usb_ep_free_request(fu->ep_out, fu->cmd.req);
-       usb_ep_free_request(fu->ep_out, fu->bot_status.req);
+       usb_ep_free_request(fu->ep_in, fu->bot_status.req);
 
        kfree(fu->cmd.buf);
 
index 8c4dc1e1f94fdb53ad7acf2808fa400c03787857..b827a8113e26803d8caee69e4ea59d6ceae56ea0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/efi.h>
 #include <linux/errno.h>
 #include <linux/fb.h>
+#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
 #include <video/vga.h>
@@ -143,6 +144,8 @@ static struct attribute *efifb_attrs[] = {
 };
 ATTRIBUTE_GROUPS(efifb);
 
+static bool pci_dev_disabled;  /* FB base matches BAR of a disabled device */
+
 static int efifb_probe(struct platform_device *dev)
 {
        struct fb_info *info;
@@ -152,7 +155,7 @@ static int efifb_probe(struct platform_device *dev)
        unsigned int size_total;
        char *option = NULL;
 
-       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
                return -ENODEV;
 
        if (fb_get_options("efifb", &option))
@@ -360,3 +363,64 @@ static struct platform_driver efifb_driver = {
 };
 
 builtin_platform_driver(efifb_driver);
+
+#if defined(CONFIG_PCI) && !defined(CONFIG_X86)
+
+static bool pci_bar_found;     /* did we find a BAR matching the efifb base? */
+
+static void claim_efifb_bar(struct pci_dev *dev, int idx)
+{
+       u16 word;
+
+       pci_bar_found = true;
+
+       pci_read_config_word(dev, PCI_COMMAND, &word);
+       if (!(word & PCI_COMMAND_MEMORY)) {
+               pci_dev_disabled = true;
+               dev_err(&dev->dev,
+                       "BAR %d: assigned to efifb but device is disabled!\n",
+                       idx);
+               return;
+       }
+
+       if (pci_claim_resource(dev, idx)) {
+               pci_dev_disabled = true;
+               dev_err(&dev->dev,
+                       "BAR %d: failed to claim resource for efifb!\n", idx);
+               return;
+       }
+
+       dev_info(&dev->dev, "BAR %d: assigned to efifb\n", idx);
+}
+
+static void efifb_fixup_resources(struct pci_dev *dev)
+{
+       u64 base = screen_info.lfb_base;
+       u64 size = screen_info.lfb_size;
+       int i;
+
+       if (pci_bar_found || screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+               return;
+
+       if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+               base |= (u64)screen_info.ext_lfb_base << 32;
+
+       if (!base)
+               return;
+
+       for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+               struct resource *res = &dev->resource[i];
+
+               if (!(res->flags & IORESOURCE_MEM))
+                       continue;
+
+               if (res->start <= base && res->end >= base + size - 1) {
+                       claim_efifb_bar(dev, i);
+                       break;
+               }
+       }
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY,
+                              16, efifb_fixup_resources);
+
+#endif
index 1abba07b84b3efd014b28bfd0ef61f3cad7cbeda..f4cbfb3b8a0980030e2a0c2bbd78348968d6b8a1 100644 (file)
@@ -1608,19 +1608,6 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
        return 0;
 }
 
-static void check_required_callbacks(struct omapfb_device *fbdev)
-{
-#define _C(x) (fbdev->ctrl->x != NULL)
-#define _P(x) (fbdev->panel->x != NULL)
-       BUG_ON(fbdev->ctrl == NULL || fbdev->panel == NULL);
-       BUG_ON(!(_C(init) && _C(cleanup) && _C(get_caps) &&
-                _C(set_update_mode) && _C(setup_plane) && _C(enable_plane) &&
-                _P(init) && _P(cleanup) && _P(enable) && _P(disable) &&
-                _P(get_caps)));
-#undef _P
-#undef _C
-}
-
 /*
  * Called by LDM binding to probe and attach a new device.
  * Initialization sequence:
@@ -1705,8 +1692,6 @@ static int omapfb_do_probe(struct platform_device *pdev,
                omapfb_ops.fb_mmap = omapfb_mmap;
        init_state++;
 
-       check_required_callbacks(fbdev);
-
        r = planes_init(fbdev);
        if (r)
                goto cleanup;
index bd017b57c47f8af4ff1558cedaa8589a5f0ce9ff..f599520374ddf575bba1236b81bec2c4c2d21c49 100644 (file)
@@ -578,10 +578,14 @@ static int ssd1307fb_probe(struct i2c_client *client,
 
        par->vbat_reg = devm_regulator_get_optional(&client->dev, "vbat");
        if (IS_ERR(par->vbat_reg)) {
-               dev_err(&client->dev, "failed to get VBAT regulator: %ld\n",
-                       PTR_ERR(par->vbat_reg));
                ret = PTR_ERR(par->vbat_reg);
-               goto fb_alloc_error;
+               if (ret == -ENODEV) {
+                       par->vbat_reg = NULL;
+               } else {
+                       dev_err(&client->dev, "failed to get VBAT regulator: %d\n",
+                               ret);
+                       goto fb_alloc_error;
+               }
        }
 
        if (of_property_read_u32(node, "solomon,width", &par->width))
@@ -668,10 +672,13 @@ static int ssd1307fb_probe(struct i2c_client *client,
                udelay(4);
        }
 
-       ret = regulator_enable(par->vbat_reg);
-       if (ret) {
-               dev_err(&client->dev, "failed to enable VBAT: %d\n", ret);
-               goto reset_oled_error;
+       if (par->vbat_reg) {
+               ret = regulator_enable(par->vbat_reg);
+               if (ret) {
+                       dev_err(&client->dev, "failed to enable VBAT: %d\n",
+                               ret);
+                       goto reset_oled_error;
+               }
        }
 
        ret = ssd1307fb_init(par);
@@ -710,7 +717,8 @@ panel_init_error:
                pwm_put(par->pwm);
        };
 regulator_enable_error:
-       regulator_disable(par->vbat_reg);
+       if (par->vbat_reg)
+               regulator_disable(par->vbat_reg);
 reset_oled_error:
        fb_deferred_io_cleanup(info);
 fb_alloc_error:
index d0115a7af0a9ae0f3171a66bf716f0f21c3e91b1..3ee309c50b2d015fb3d463dd88f6b99253beea12 100644 (file)
@@ -643,7 +643,6 @@ static void xenfb_backend_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateInitWait:
-InitWait:
                xenbus_switch_state(dev, XenbusStateConnected);
                break;
 
@@ -654,7 +653,8 @@ InitWait:
                 * get Connected twice here.
                 */
                if (dev->state != XenbusStateConnected)
-                       goto InitWait; /* no InitWait seen yet, fudge it */
+                       /* no InitWait seen yet, fudge it */
+                       xenbus_switch_state(dev, XenbusStateConnected);
 
                if (xenbus_read_unsigned(info->xbdev->otherend,
                                         "request-update", 0))
index 400d70b6937948cc5a8aac698efc64fcf49c1dde..48230a5e12f262b67d28d87adc713f462e8ec5fc 100644 (file)
@@ -232,6 +232,12 @@ static int virtio_dev_probe(struct device *_d)
                if (device_features & (1ULL << i))
                        __virtio_set_bit(dev, i);
 
+       if (drv->validate) {
+               err = drv->validate(dev);
+               if (err)
+                       goto err;
+       }
+
        err = virtio_finalize_features(dev);
        if (err)
                goto err;
index 590534910dc617836e18c91b6576410a0299de26..698d5d06fa039ca1a27b151a3dcf5d322784e3f0 100644 (file)
@@ -33,8 +33,10 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i;
 
-       synchronize_irq(pci_irq_vector(vp_dev->pci_dev, 0));
-       for (i = 1; i < vp_dev->msix_vectors; i++)
+       if (vp_dev->intx_enabled)
+               synchronize_irq(vp_dev->pci_dev->irq);
+
+       for (i = 0; i < vp_dev->msix_vectors; ++i)
                synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
 }
 
@@ -60,13 +62,16 @@ static irqreturn_t vp_config_changed(int irq, void *opaque)
 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
 {
        struct virtio_pci_device *vp_dev = opaque;
+       struct virtio_pci_vq_info *info;
        irqreturn_t ret = IRQ_NONE;
-       struct virtqueue *vq;
+       unsigned long flags;
 
-       list_for_each_entry(vq, &vp_dev->vdev.vqs, list) {
-               if (vq->callback && vring_interrupt(irq, vq) == IRQ_HANDLED)
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_for_each_entry(info, &vp_dev->virtqueues, node) {
+               if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
                        ret = IRQ_HANDLED;
        }
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
 
        return ret;
 }
@@ -97,186 +102,244 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
        return vp_vring_interrupt(irq, opaque);
 }
 
-static void vp_remove_vqs(struct virtio_device *vdev)
+static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
+                                  bool per_vq_vectors, struct irq_affinity *desc)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       struct virtqueue *vq, *n;
+       const char *name = dev_name(&vp_dev->vdev.dev);
+       unsigned i, v;
+       int err = -ENOMEM;
 
-       list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
-               if (vp_dev->msix_vector_map) {
-                       int v = vp_dev->msix_vector_map[vq->index];
+       vp_dev->msix_vectors = nvectors;
 
-                       if (v != VIRTIO_MSI_NO_VECTOR)
-                               free_irq(pci_irq_vector(vp_dev->pci_dev, v),
-                                       vq);
-               }
-               vp_dev->del_vq(vq);
+       vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
+                                    GFP_KERNEL);
+       if (!vp_dev->msix_names)
+               goto error;
+       vp_dev->msix_affinity_masks
+               = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
+                         GFP_KERNEL);
+       if (!vp_dev->msix_affinity_masks)
+               goto error;
+       for (i = 0; i < nvectors; ++i)
+               if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
+                                       GFP_KERNEL))
+                       goto error;
+
+       err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
+                                            nvectors, PCI_IRQ_MSIX |
+                                            (desc ? PCI_IRQ_AFFINITY : 0),
+                                            desc);
+       if (err < 0)
+               goto error;
+       vp_dev->msix_enabled = 1;
+
+       /* Set the vector used for configuration */
+       v = vp_dev->msix_used_vectors;
+       snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+                "%s-config", name);
+       err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+                         vp_config_changed, 0, vp_dev->msix_names[v],
+                         vp_dev);
+       if (err)
+               goto error;
+       ++vp_dev->msix_used_vectors;
+
+       v = vp_dev->config_vector(vp_dev, v);
+       /* Verify we had enough resources to assign the vector */
+       if (v == VIRTIO_MSI_NO_VECTOR) {
+               err = -EBUSY;
+               goto error;
        }
+
+       if (!per_vq_vectors) {
+               /* Shared vector for all VQs */
+               v = vp_dev->msix_used_vectors;
+               snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
+                        "%s-virtqueues", name);
+               err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
+                                 vp_vring_interrupt, 0, vp_dev->msix_names[v],
+                                 vp_dev);
+               if (err)
+                       goto error;
+               ++vp_dev->msix_used_vectors;
+       }
+       return 0;
+error:
+       return err;
+}
+
+static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
+                                    void (*callback)(struct virtqueue *vq),
+                                    const char *name,
+                                    u16 msix_vec)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
+       struct virtqueue *vq;
+       unsigned long flags;
+
+       /* fill out our structure that represents an active queue */
+       if (!info)
+               return ERR_PTR(-ENOMEM);
+
+       vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
+                             msix_vec);
+       if (IS_ERR(vq))
+               goto out_info;
+
+       info->vq = vq;
+       if (callback) {
+               spin_lock_irqsave(&vp_dev->lock, flags);
+               list_add(&info->node, &vp_dev->virtqueues);
+               spin_unlock_irqrestore(&vp_dev->lock, flags);
+       } else {
+               INIT_LIST_HEAD(&info->node);
+       }
+
+       vp_dev->vqs[index] = info;
+       return vq;
+
+out_info:
+       kfree(info);
+       return vq;
+}
+
+static void vp_del_vq(struct virtqueue *vq)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
+       struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+       unsigned long flags;
+
+       spin_lock_irqsave(&vp_dev->lock, flags);
+       list_del(&info->node);
+       spin_unlock_irqrestore(&vp_dev->lock, flags);
+
+       vp_dev->del_vq(info);
+       kfree(info);
 }
 
 /* the config->del_vqs() implementation */
 void vp_del_vqs(struct virtio_device *vdev)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtqueue *vq, *n;
        int i;
 
-       if (WARN_ON_ONCE(list_empty_careful(&vdev->vqs)))
-               return;
+       list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
+               if (vp_dev->per_vq_vectors) {
+                       int v = vp_dev->vqs[vq->index]->msix_vector;
 
-       vp_remove_vqs(vdev);
+                       if (v != VIRTIO_MSI_NO_VECTOR) {
+                               int irq = pci_irq_vector(vp_dev->pci_dev, v);
+
+                               irq_set_affinity_hint(irq, NULL);
+                               free_irq(irq, vq);
+                       }
+               }
+               vp_del_vq(vq);
+       }
+       vp_dev->per_vq_vectors = false;
+
+       if (vp_dev->intx_enabled) {
+               free_irq(vp_dev->pci_dev->irq, vp_dev);
+               vp_dev->intx_enabled = 0;
+       }
 
-       if (vp_dev->pci_dev->msix_enabled) {
-               for (i = 0; i < vp_dev->msix_vectors; i++)
+       for (i = 0; i < vp_dev->msix_used_vectors; ++i)
+               free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
+
+       for (i = 0; i < vp_dev->msix_vectors; i++)
+               if (vp_dev->msix_affinity_masks[i])
                        free_cpumask_var(vp_dev->msix_affinity_masks[i]);
 
+       if (vp_dev->msix_enabled) {
                /* Disable the vector used for configuration */
                vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
 
-               kfree(vp_dev->msix_affinity_masks);
-               kfree(vp_dev->msix_names);
-               kfree(vp_dev->msix_vector_map);
+               pci_free_irq_vectors(vp_dev->pci_dev);
+               vp_dev->msix_enabled = 0;
        }
 
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
-       pci_free_irq_vectors(vp_dev->pci_dev);
+       vp_dev->msix_vectors = 0;
+       vp_dev->msix_used_vectors = 0;
+       kfree(vp_dev->msix_names);
+       vp_dev->msix_names = NULL;
+       kfree(vp_dev->msix_affinity_masks);
+       vp_dev->msix_affinity_masks = NULL;
+       kfree(vp_dev->vqs);
+       vp_dev->vqs = NULL;
 }
 
 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
                struct virtqueue *vqs[], vq_callback_t *callbacks[],
-               const char * const names[], struct irq_affinity *desc)
+               const char * const names[], bool per_vq_vectors,
+               struct irq_affinity *desc)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       const char *name = dev_name(&vp_dev->vdev.dev);
-       int i, j, err = -ENOMEM, allocated_vectors, nvectors;
-       unsigned flags = PCI_IRQ_MSIX;
-       bool shared = false;
        u16 msix_vec;
+       int i, err, nvectors, allocated_vectors;
 
-       if (desc) {
-               flags |= PCI_IRQ_AFFINITY;
-               desc->pre_vectors++; /* virtio config vector */
-       }
-
-       nvectors = 1;
-       for (i = 0; i < nvqs; i++)
-               if (callbacks[i])
-                       nvectors++;
-
-       /* Try one vector per queue first. */
-       err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
-                       nvectors, flags, desc);
-       if (err < 0) {
-               /* Fallback to one vector for config, one shared for queues. */
-               shared = true;
-               err = pci_alloc_irq_vectors(vp_dev->pci_dev, 2, 2,
-                               PCI_IRQ_MSIX);
-               if (err < 0)
-                       return err;
-       }
-       if (err < 0)
-               return err;
-
-       vp_dev->msix_vectors = nvectors;
-       vp_dev->msix_names = kmalloc_array(nvectors,
-                       sizeof(*vp_dev->msix_names), GFP_KERNEL);
-       if (!vp_dev->msix_names)
-               goto out_free_irq_vectors;
-
-       vp_dev->msix_affinity_masks = kcalloc(nvectors,
-                       sizeof(*vp_dev->msix_affinity_masks), GFP_KERNEL);
-       if (!vp_dev->msix_affinity_masks)
-               goto out_free_msix_names;
+       vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+       if (!vp_dev->vqs)
+               return -ENOMEM;
 
-       for (i = 0; i < nvectors; ++i) {
-               if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
-                               GFP_KERNEL))
-                       goto out_free_msix_affinity_masks;
+       if (per_vq_vectors) {
+               /* Best option: one for change interrupt, one per vq. */
+               nvectors = 1;
+               for (i = 0; i < nvqs; ++i)
+                       if (callbacks[i])
+                               ++nvectors;
+       } else {
+               /* Second best: one for change, shared for all vqs. */
+               nvectors = 2;
        }
 
-       /* Set the vector used for configuration */
-       snprintf(vp_dev->msix_names[0], sizeof(*vp_dev->msix_names),
-                "%s-config", name);
-       err = request_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_config_changed,
-                       0, vp_dev->msix_names[0], vp_dev);
+       err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
+                                     per_vq_vectors ? desc : NULL);
        if (err)
-               goto out_free_msix_affinity_masks;
+               goto error_find;
 
-       /* Verify we had enough resources to assign the vector */
-       if (vp_dev->config_vector(vp_dev, 0) == VIRTIO_MSI_NO_VECTOR) {
-               err = -EBUSY;
-               goto out_free_config_irq;
-       }
-
-       vp_dev->msix_vector_map = kmalloc_array(nvqs,
-                       sizeof(*vp_dev->msix_vector_map), GFP_KERNEL);
-       if (!vp_dev->msix_vector_map)
-               goto out_disable_config_irq;
-
-       allocated_vectors = j = 1; /* vector 0 is the config interrupt */
+       vp_dev->per_vq_vectors = per_vq_vectors;
+       allocated_vectors = vp_dev->msix_used_vectors;
        for (i = 0; i < nvqs; ++i) {
                if (!names[i]) {
                        vqs[i] = NULL;
                        continue;
                }
 
-               if (callbacks[i])
-                       msix_vec = allocated_vectors;
-               else
+               if (!callbacks[i])
                        msix_vec = VIRTIO_MSI_NO_VECTOR;
-
-               vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
-                               msix_vec);
+               else if (vp_dev->per_vq_vectors)
+                       msix_vec = allocated_vectors++;
+               else
+                       msix_vec = VP_MSIX_VQ_VECTOR;
+               vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+                                    msix_vec);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
-                       goto out_remove_vqs;
+                       goto error_find;
                }
 
-               if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
-                       vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
+               if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
                        continue;
-               }
 
-               snprintf(vp_dev->msix_names[j],
-                        sizeof(*vp_dev->msix_names), "%s-%s",
+               /* allocate per-vq irq if available and necessary */
+               snprintf(vp_dev->msix_names[msix_vec],
+                        sizeof *vp_dev->msix_names,
+                        "%s-%s",
                         dev_name(&vp_dev->vdev.dev), names[i]);
                err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
-                                 vring_interrupt, IRQF_SHARED,
-                                 vp_dev->msix_names[j], vqs[i]);
-               if (err) {
-                       /* don't free this irq on error */
-                       vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR;
-                       goto out_remove_vqs;
-               }
-               vp_dev->msix_vector_map[i] = msix_vec;
-               j++;
-
-               /*
-                * Use a different vector for each queue if they are available,
-                * else share the same vector for all VQs.
-                */
-               if (!shared)
-                       allocated_vectors++;
+                                 vring_interrupt, 0,
+                                 vp_dev->msix_names[msix_vec],
+                                 vqs[i]);
+               if (err)
+                       goto error_find;
        }
-
        return 0;
 
-out_remove_vqs:
-       vp_remove_vqs(vdev);
-       kfree(vp_dev->msix_vector_map);
-out_disable_config_irq:
-       vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
-out_free_config_irq:
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
-out_free_msix_affinity_masks:
-       for (i = 0; i < nvectors; i++) {
-               if (vp_dev->msix_affinity_masks[i])
-                       free_cpumask_var(vp_dev->msix_affinity_masks[i]);
-       }
-       kfree(vp_dev->msix_affinity_masks);
-out_free_msix_names:
-       kfree(vp_dev->msix_names);
-out_free_irq_vectors:
-       pci_free_irq_vectors(vp_dev->pci_dev);
+error_find:
+       vp_del_vqs(vdev);
        return err;
 }
 
@@ -287,29 +350,33 @@ static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
        int i, err;
 
+       vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
+       if (!vp_dev->vqs)
+               return -ENOMEM;
+
        err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
                        dev_name(&vdev->dev), vp_dev);
        if (err)
-               return err;
+               goto out_del_vqs;
 
+       vp_dev->intx_enabled = 1;
+       vp_dev->per_vq_vectors = false;
        for (i = 0; i < nvqs; ++i) {
                if (!names[i]) {
                        vqs[i] = NULL;
                        continue;
                }
-               vqs[i] = vp_dev->setup_vq(vp_dev, i, callbacks[i], names[i],
-                               VIRTIO_MSI_NO_VECTOR);
+               vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
+                                    VIRTIO_MSI_NO_VECTOR);
                if (IS_ERR(vqs[i])) {
                        err = PTR_ERR(vqs[i]);
-                       goto out_remove_vqs;
+                       goto out_del_vqs;
                }
        }
 
        return 0;
-
-out_remove_vqs:
-       vp_remove_vqs(vdev);
-       free_irq(pci_irq_vector(vp_dev->pci_dev, 0), vp_dev);
+out_del_vqs:
+       vp_del_vqs(vdev);
        return err;
 }
 
@@ -320,9 +387,15 @@ int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 {
        int err;
 
-       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, desc);
+       /* Try MSI-X with one vector per queue. */
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
        if (!err)
                return 0;
+       /* Fallback: MSI-X with one vector for config, one shared for queues. */
+       err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
+       if (!err)
+               return 0;
+       /* Finally fall back to regular interrupts. */
        return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
 }
 
@@ -342,15 +415,16 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 {
        struct virtio_device *vdev = vq->vdev;
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+       struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
+       struct cpumask *mask;
+       unsigned int irq;
 
        if (!vq->callback)
                return -EINVAL;
 
-       if (vp_dev->pci_dev->msix_enabled) {
-               int vec = vp_dev->msix_vector_map[vq->index];
-               struct cpumask *mask = vp_dev->msix_affinity_masks[vec];
-               unsigned int irq = pci_irq_vector(vp_dev->pci_dev, vec);
-
+       if (vp_dev->msix_enabled) {
+               mask = vp_dev->msix_affinity_masks[info->msix_vector];
+               irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
                if (cpu == -1)
                        irq_set_affinity_hint(irq, NULL);
                else {
@@ -365,12 +439,13 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
 {
        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
-       unsigned int *map = vp_dev->msix_vector_map;
 
-       if (!map || map[index] == VIRTIO_MSI_NO_VECTOR)
+       if (!vp_dev->per_vq_vectors ||
+           vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
                return NULL;
 
-       return pci_irq_get_affinity(vp_dev->pci_dev, map[index]);
+       return pci_irq_get_affinity(vp_dev->pci_dev,
+                                   vp_dev->vqs[index]->msix_vector);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -441,6 +516,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
        vp_dev->vdev.dev.parent = &pci_dev->dev;
        vp_dev->vdev.dev.release = virtio_pci_release_dev;
        vp_dev->pci_dev = pci_dev;
+       INIT_LIST_HEAD(&vp_dev->virtqueues);
+       spin_lock_init(&vp_dev->lock);
 
        /* enable the device */
        rc = pci_enable_device(pci_dev);
index ac8c9d7889646ab3cc28bb51accd0d3840d5a34f..e96334aec1e0d70842d1a9fc53462ab728be87c0 100644 (file)
 #include <linux/highmem.h>
 #include <linux/spinlock.h>
 
+struct virtio_pci_vq_info {
+       /* the actual virtqueue */
+       struct virtqueue *vq;
+
+       /* the list node for the virtqueues list */
+       struct list_head node;
+
+       /* MSI-X vector (or none) */
+       unsigned msix_vector;
+};
+
 /* Our device structure */
 struct virtio_pci_device {
        struct virtio_device vdev;
@@ -64,25 +75,47 @@ struct virtio_pci_device {
        /* the IO mapping for the PCI config space */
        void __iomem *ioaddr;
 
+       /* a list of queues so we can dispatch IRQs */
+       spinlock_t lock;
+       struct list_head virtqueues;
+
+       /* array of all queues for house-keeping */
+       struct virtio_pci_vq_info **vqs;
+
+       /* MSI-X support */
+       int msix_enabled;
+       int intx_enabled;
        cpumask_var_t *msix_affinity_masks;
        /* Name strings for interrupts. This size should be enough,
         * and I'm too lazy to allocate each name separately. */
        char (*msix_names)[256];
-       /* Total Number of MSI-X vectors (including per-VQ ones). */
-       int msix_vectors;
-       /* Map of per-VQ MSI-X vectors, may be NULL */
-       unsigned *msix_vector_map;
+       /* Number of available vectors */
+       unsigned msix_vectors;
+       /* Vectors allocated, excluding per-vq vectors if any */
+       unsigned msix_used_vectors;
+
+       /* Whether we have vector per vq */
+       bool per_vq_vectors;
 
        struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
+                                     struct virtio_pci_vq_info *info,
                                      unsigned idx,
                                      void (*callback)(struct virtqueue *vq),
                                      const char *name,
                                      u16 msix_vec);
-       void (*del_vq)(struct virtqueue *vq);
+       void (*del_vq)(struct virtio_pci_vq_info *info);
 
        u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
 };
 
+/* Constants for MSI-X */
+/* Use first vector for configuration changes, second and the rest for
+ * virtqueues Thus, we need at least 2 vectors for MSI. */
+enum {
+       VP_MSIX_CONFIG_VECTOR = 0,
+       VP_MSIX_VQ_VECTOR = 1,
+};
+
 /* Convert a generic virtio device to our structure */
 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
 {
index f7362c5fe18a96a902bc81138b8ea796e03e79d9..4bfa48fb1324660f82ae6272d2e1ecc33522ba3e 100644 (file)
@@ -112,6 +112,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 }
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+                                 struct virtio_pci_vq_info *info,
                                  unsigned index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
@@ -129,6 +130,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
                return ERR_PTR(-ENOENT);
 
+       info->msix_vector = msix_vec;
+
        /* create the vring */
        vq = vring_create_virtqueue(index, num,
                                    VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
@@ -159,13 +162,14 @@ out_deactivate:
        return ERR_PTR(err);
 }
 
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
 {
+       struct virtqueue *vq = info->vq;
        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 
        iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
 
-       if (vp_dev->pci_dev->msix_enabled) {
+       if (vp_dev->msix_enabled) {
                iowrite16(VIRTIO_MSI_NO_VECTOR,
                          vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
                /* Flush the write out to device */
index 7bc3004b840ef3e3dabb5c2e24af8a935f552eba..8978f109d2d79828e5b0c12649debc481dfacd7f 100644 (file)
@@ -293,6 +293,7 @@ static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 }
 
 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
+                                 struct virtio_pci_vq_info *info,
                                  unsigned index,
                                  void (*callback)(struct virtqueue *vq),
                                  const char *name,
@@ -322,6 +323,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        /* get offset of notification word for this vq */
        off = vp_ioread16(&cfg->queue_notify_off);
 
+       info->msix_vector = msix_vec;
+
        /* create the vring */
        vq = vring_create_virtqueue(index, num,
                                    SMP_CACHE_BYTES, &vp_dev->vdev,
@@ -405,13 +408,14 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
        return 0;
 }
 
-static void del_vq(struct virtqueue *vq)
+static void del_vq(struct virtio_pci_vq_info *info)
 {
+       struct virtqueue *vq = info->vq;
        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 
        vp_iowrite16(vq->index, &vp_dev->common->queue_select);
 
-       if (vp_dev->pci_dev->msix_enabled) {
+       if (vp_dev->msix_enabled) {
                vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
                             &vp_dev->common->queue_msix_vector);
                /* Flush the write out to device */
index 1f4733b80c877426fa337e67eebf708b5fb9b41c..f3b089b7c0b62ab22fd2aba11111f9bcecea50cd 100644 (file)
@@ -442,8 +442,10 @@ static int xenbus_write_transaction(unsigned msg_type,
                return xenbus_command_reply(u, XS_ERROR, "ENOENT");
 
        rc = xenbus_dev_request_and_reply(&u->u.msg, u);
-       if (rc)
+       if (rc && trans) {
+               list_del(&trans->list);
                kfree(trans);
+       }
 
 out:
        return rc;
index a18510be76c141e5d4b4d687c2eb5498cc273c56..5e71f1ea3391b034dc8e6f55f62d82dbe76e9811 100644 (file)
@@ -7910,7 +7910,6 @@ struct btrfs_retry_complete {
 static void btrfs_retry_endio_nocsum(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
-       struct inode *inode;
        struct bio_vec *bvec;
        int i;
 
@@ -7918,12 +7917,12 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
                goto end;
 
        ASSERT(bio->bi_vcnt == 1);
-       inode = bio->bi_io_vec->bv_page->mapping->host;
-       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
 
        done->uptodate = 1;
        bio_for_each_segment_all(bvec, bio, i)
-       clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0);
+               clean_io_failure(BTRFS_I(done->inode), done->start,
+                                bvec->bv_page, 0);
 end:
        complete(&done->done);
        bio_put(bio);
@@ -7973,8 +7972,10 @@ next_block_or_try_again:
 
                start += sectorsize;
 
-               if (nr_sectors--) {
+               nr_sectors--;
+               if (nr_sectors) {
                        pgoff += sectorsize;
+                       ASSERT(pgoff < PAGE_SIZE);
                        goto next_block_or_try_again;
                }
        }
@@ -7986,9 +7987,7 @@ static void btrfs_retry_endio(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
-       struct inode *inode;
        struct bio_vec *bvec;
-       u64 start;
        int uptodate;
        int ret;
        int i;
@@ -7998,11 +7997,8 @@ static void btrfs_retry_endio(struct bio *bio)
 
        uptodate = 1;
 
-       start = done->start;
-
        ASSERT(bio->bi_vcnt == 1);
-       inode = bio->bi_io_vec->bv_page->mapping->host;
-       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(inode));
+       ASSERT(bio->bi_io_vec->bv_len == btrfs_inode_sectorsize(done->inode));
 
        bio_for_each_segment_all(bvec, bio, i) {
                ret = __readpage_endio_check(done->inode, io_bio, i,
@@ -8080,8 +8076,10 @@ next:
 
                ASSERT(nr_sectors);
 
-               if (--nr_sectors) {
+               nr_sectors--;
+               if (nr_sectors) {
                        pgoff += sectorsize;
+                       ASSERT(pgoff < PAGE_SIZE);
                        goto next_block;
                }
        }
index da687dc79cce6155a278038a15775637c97ce3cc..9530a333d302c0c13c3e8463814b642a023afc23 100644 (file)
@@ -549,16 +549,19 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_ssd:
                        btrfs_set_and_info(info, SSD,
                                           "use ssd allocation scheme");
+                       btrfs_clear_opt(info->mount_opt, NOSSD);
                        break;
                case Opt_ssd_spread:
                        btrfs_set_and_info(info, SSD_SPREAD,
                                           "use spread ssd allocation scheme");
                        btrfs_set_opt(info->mount_opt, SSD);
+                       btrfs_clear_opt(info->mount_opt, NOSSD);
                        break;
                case Opt_nossd:
                        btrfs_set_and_info(info, NOSSD,
                                             "not using ssd allocation scheme");
                        btrfs_clear_opt(info->mount_opt, SSD);
+                       btrfs_clear_opt(info->mount_opt, SSD_SPREAD);
                        break;
                case Opt_barrier:
                        btrfs_clear_and_info(info, NOBARRIER,
index 73d56eef5e60f311225b06ad7adccedeed54a0db..ab8a66d852f91cb04206361a551b8c57760c9c40 100644 (file)
@@ -6213,7 +6213,7 @@ int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
        for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
                dev = bbio->stripes[dev_nr].dev;
                if (!dev || !dev->bdev ||
-                   (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
+                   (bio_op(first_bio) == REQ_OP_WRITE && !dev->writeable)) {
                        bbio_error(bbio, first_bio, logical);
                        continue;
                }
index 15e1db8738aecad0c8a86888c0fa1ada5f9b7623..dd3f5fabfdf6a35e25f9dae28020dcc9bba65007 100644 (file)
@@ -972,6 +972,86 @@ out:
        return rc;
 }
 
+ssize_t cifs_file_copychunk_range(unsigned int xid,
+                               struct file *src_file, loff_t off,
+                               struct file *dst_file, loff_t destoff,
+                               size_t len, unsigned int flags)
+{
+       struct inode *src_inode = file_inode(src_file);
+       struct inode *target_inode = file_inode(dst_file);
+       struct cifsFileInfo *smb_file_src;
+       struct cifsFileInfo *smb_file_target;
+       struct cifs_tcon *src_tcon;
+       struct cifs_tcon *target_tcon;
+       ssize_t rc;
+
+       cifs_dbg(FYI, "copychunk range\n");
+
+       if (src_inode == target_inode) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (!src_file->private_data || !dst_file->private_data) {
+               rc = -EBADF;
+               cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+               goto out;
+       }
+
+       rc = -EXDEV;
+       smb_file_target = dst_file->private_data;
+       smb_file_src = src_file->private_data;
+       src_tcon = tlink_tcon(smb_file_src->tlink);
+       target_tcon = tlink_tcon(smb_file_target->tlink);
+
+       if (src_tcon->ses != target_tcon->ses) {
+               cifs_dbg(VFS, "source and target of copy not on same server\n");
+               goto out;
+       }
+
+       /*
+        * Note: cifs case is easier than btrfs since server responsible for
+        * checks for proper open modes and file type and if it wants
+        * server could even support copy of range where source = target
+        */
+       lock_two_nondirectories(target_inode, src_inode);
+
+       cifs_dbg(FYI, "about to flush pages\n");
+       /* should we flush first and last page first */
+       truncate_inode_pages(&target_inode->i_data, 0);
+
+       if (target_tcon->ses->server->ops->copychunk_range)
+               rc = target_tcon->ses->server->ops->copychunk_range(xid,
+                       smb_file_src, smb_file_target, off, len, destoff);
+       else
+               rc = -EOPNOTSUPP;
+
+       /* force revalidate of size and timestamps of target file now
+        * that target is updated on the server
+        */
+       CIFS_I(target_inode)->time = 0;
+       /* although unlocking in the reverse order from locking is not
+        * strictly necessary here it is a little cleaner to be consistent
+        */
+       unlock_two_nondirectories(src_inode, target_inode);
+
+out:
+       return rc;
+}
+
+static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+                               struct file *dst_file, loff_t destoff,
+                               size_t len, unsigned int flags)
+{
+       unsigned int xid = get_xid();
+       ssize_t rc;
+
+       rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
+                                       len, flags);
+       free_xid(xid);
+       return rc;
+}
+
 const struct file_operations cifs_file_ops = {
        .read_iter = cifs_loose_read_iter,
        .write_iter = cifs_file_write_iter,
@@ -984,6 +1064,7 @@ const struct file_operations cifs_file_ops = {
        .splice_read = generic_file_splice_read,
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
+       .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
@@ -1001,6 +1082,7 @@ const struct file_operations cifs_file_strict_ops = {
        .splice_read = generic_file_splice_read,
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
+       .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
@@ -1018,6 +1100,7 @@ const struct file_operations cifs_file_direct_ops = {
        .mmap = cifs_file_mmap,
        .splice_read = generic_file_splice_read,
        .unlocked_ioctl  = cifs_ioctl,
+       .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
@@ -1035,6 +1118,7 @@ const struct file_operations cifs_file_nobrl_ops = {
        .splice_read = generic_file_splice_read,
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
+       .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
@@ -1051,6 +1135,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
        .splice_read = generic_file_splice_read,
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
+       .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
@@ -1067,6 +1152,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
        .mmap = cifs_file_mmap,
        .splice_read = generic_file_splice_read,
        .unlocked_ioctl  = cifs_ioctl,
+       .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
@@ -1078,6 +1164,7 @@ const struct file_operations cifs_dir_ops = {
        .release = cifs_closedir,
        .read    = generic_read_dir,
        .unlocked_ioctl  = cifs_ioctl,
+       .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .llseek = generic_file_llseek,
 };
index da717fee30260be533f0992d5ed93114907f1c31..30bf89b1fd9a789ec02070f534d9d9f68b593650 100644 (file)
@@ -139,6 +139,11 @@ extern ssize_t     cifs_listxattr(struct dentry *, char *, size_t);
 # define cifs_listxattr NULL
 #endif
 
+extern ssize_t cifs_file_copychunk_range(unsigned int xid,
+                                       struct file *src_file, loff_t off,
+                                       struct file *dst_file, loff_t destoff,
+                                       size_t len, unsigned int flags);
+
 extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_CIFS_NFSD_EXPORT
 extern const struct export_operations cifs_export_ops;
index d42dd3288647808216a6c22c56731dc960e525f8..37f5a41cc50cc523cd76c790100398d4db9e80ca 100644 (file)
@@ -243,6 +243,7 @@ struct smb_version_operations {
        /* verify the message */
        int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
        bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+       int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
        void (*downgrade_oplock)(struct TCP_Server_Info *,
                                        struct cifsInodeInfo *, bool);
        /* process transaction2 response */
@@ -407,9 +408,10 @@ struct smb_version_operations {
        char * (*create_lease_buf)(u8 *, u8);
        /* parse lease context buffer and return oplock/epoch info */
        __u8 (*parse_lease_buf)(void *, unsigned int *);
-       int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
-                       struct cifsFileInfo *target_file, u64 src_off, u64 len,
-                       u64 dest_off);
+       ssize_t (*copychunk_range)(const unsigned int,
+                       struct cifsFileInfo *src_file,
+                       struct cifsFileInfo *target_file,
+                       u64 src_off, u64 len, u64 dest_off);
        int (*duplicate_extents)(const unsigned int, struct cifsFileInfo *src,
                        struct cifsFileInfo *target_file, u64 src_off, u64 len,
                        u64 dest_off);
@@ -946,7 +948,6 @@ struct cifs_tcon {
        bool use_persistent:1; /* use persistent instead of durable handles */
 #ifdef CONFIG_CIFS_SMB2
        bool print:1;           /* set if connection to printer share */
-       bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
        __le32 capabilities;
        __u32 share_flags;
        __u32 maximal_access;
@@ -1343,6 +1344,7 @@ struct mid_q_entry {
        void *callback_data;      /* general purpose pointer for callback */
        void *resp_buf;         /* pointer to received SMB header */
        int mid_state;  /* wish this were enum but can not pass to wait_event */
+       unsigned int mid_flags;
        __le16 command;         /* smb command code */
        bool large_buf:1;       /* if valid response, is pointer to large buf */
        bool multiRsp:1;        /* multiple trans2 responses for one request  */
@@ -1350,6 +1352,12 @@ struct mid_q_entry {
        bool decrypted:1;       /* decrypted entry */
 };
 
+struct close_cancelled_open {
+       struct cifs_fid         fid;
+       struct cifs_tcon        *tcon;
+       struct work_struct      work;
+};
+
 /*     Make code in transport.c a little cleaner by moving
        update of optional stats into function below */
 #ifdef CONFIG_CIFS_STATS2
@@ -1481,6 +1489,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
 #define   MID_RESPONSE_MALFORMED 0x10
 #define   MID_SHUTDOWN          0x20
 
+/* Flags */
+#define   MID_WAIT_CANCELLED    1 /* Cancelled while waiting for response */
+
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
 #define   CIFS_SMALL_BUFFER     1
index 066950671929682399974fb4fb8641cf78c60855..5d21f00ae341b4ec002eb33dd79c7a16a1e1dfa4 100644 (file)
@@ -1428,6 +1428,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
 
        length = cifs_discard_remaining_data(server);
        dequeue_mid(mid, rdata->result);
+       mid->resp_buf = server->smallbuf;
+       server->smallbuf = NULL;
        return length;
 }
 
@@ -1541,6 +1543,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
                return cifs_readv_discard(server, mid);
 
        dequeue_mid(mid, false);
+       mid->resp_buf = server->smallbuf;
+       server->smallbuf = NULL;
        return length;
 }
 
index 9ae695ae3ed7be3788db2a889e34cde8a3224c9c..d82467cfb0e2df5ef7e655bc601f4f17f3b890b1 100644 (file)
@@ -904,10 +904,19 @@ cifs_demultiplex_thread(void *p)
 
                server->lstrp = jiffies;
                if (mid_entry != NULL) {
+                       if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
+                            mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
+                                       server->ops->handle_cancelled_mid)
+                               server->ops->handle_cancelled_mid(
+                                                       mid_entry->resp_buf,
+                                                       server);
+
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
-               } else if (!server->ops->is_oplock_break ||
-                          !server->ops->is_oplock_break(buf, server)) {
+               } else if (server->ops->is_oplock_break &&
+                          server->ops->is_oplock_break(buf, server)) {
+                       cifs_dbg(FYI, "Received oplock break\n");
+               } else {
                        cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
                                 atomic_read(&midCount));
                        cifs_dump_mem("Received Data is: ", buf,
@@ -3744,6 +3753,9 @@ try_mount_again:
        if (IS_ERR(tcon)) {
                rc = PTR_ERR(tcon);
                tcon = NULL;
+               if (rc == -EACCES)
+                       goto mount_fail_check;
+
                goto remote_path_check;
        }
 
index aa3debbba82648944a1e1426516abebb268b7ca0..21d4045357397de647b8fe0282ed1a436b7e3a7f 100644 (file)
@@ -2597,7 +2597,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                wdata->credits = credits;
 
                if (!wdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(wdata->cfile, false))
+                   !(rc = cifs_reopen_file(wdata->cfile, false)))
                        rc = server->ops->async_writev(wdata,
                                        cifs_uncached_writedata_release);
                if (rc) {
@@ -3022,7 +3022,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                rdata->credits = credits;
 
                if (!rdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(rdata->cfile, true))
+                   !(rc = cifs_reopen_file(rdata->cfile, true)))
                        rc = server->ops->async_readv(rdata);
 error:
                if (rc) {
@@ -3617,7 +3617,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                }
 
                if (!rdata->cfile->invalidHandle ||
-                   !cifs_reopen_file(rdata->cfile, true))
+                   !(rc = cifs_reopen_file(rdata->cfile, true)))
                        rc = server->ops->async_readv(rdata);
                if (rc) {
                        add_credits_and_wake_if(server, rdata->credits, 0);
index 001528781b6b0f80552c33f3d8e93fb9cfc27b86..265c45fe4ea5e5246f5304480c2b4b66bf9d6c73 100644 (file)
 #include "cifs_ioctl.h"
 #include <linux/btrfs.h>
 
-static int cifs_file_clone_range(unsigned int xid, struct file *src_file,
-                         struct file *dst_file)
-{
-       struct inode *src_inode = file_inode(src_file);
-       struct inode *target_inode = file_inode(dst_file);
-       struct cifsFileInfo *smb_file_src;
-       struct cifsFileInfo *smb_file_target;
-       struct cifs_tcon *src_tcon;
-       struct cifs_tcon *target_tcon;
-       int rc;
-
-       cifs_dbg(FYI, "ioctl clone range\n");
-
-       if (!src_file->private_data || !dst_file->private_data) {
-               rc = -EBADF;
-               cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
-               goto out;
-       }
-
-       rc = -EXDEV;
-       smb_file_target = dst_file->private_data;
-       smb_file_src = src_file->private_data;
-       src_tcon = tlink_tcon(smb_file_src->tlink);
-       target_tcon = tlink_tcon(smb_file_target->tlink);
-
-       if (src_tcon->ses != target_tcon->ses) {
-               cifs_dbg(VFS, "source and target of copy not on same server\n");
-               goto out;
-       }
-
-       /*
-        * Note: cifs case is easier than btrfs since server responsible for
-        * checks for proper open modes and file type and if it wants
-        * server could even support copy of range where source = target
-        */
-       lock_two_nondirectories(target_inode, src_inode);
-
-       cifs_dbg(FYI, "about to flush pages\n");
-       /* should we flush first and last page first */
-       truncate_inode_pages(&target_inode->i_data, 0);
-
-       if (target_tcon->ses->server->ops->clone_range)
-               rc = target_tcon->ses->server->ops->clone_range(xid,
-                       smb_file_src, smb_file_target, 0, src_inode->i_size, 0);
-       else
-               rc = -EOPNOTSUPP;
-
-       /* force revalidate of size and timestamps of target file now
-          that target is updated on the server */
-       CIFS_I(target_inode)->time = 0;
-       /* although unlocking in the reverse order from locking is not
-          strictly necessary here it is a little cleaner to be consistent */
-       unlock_two_nondirectories(src_inode, target_inode);
-out:
-       return rc;
-}
-
-static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
                        unsigned long srcfd)
 {
        int rc;
        struct fd src_file;
        struct inode *src_inode;
 
-       cifs_dbg(FYI, "ioctl clone range\n");
+       cifs_dbg(FYI, "ioctl copychunk range\n");
        /* the destination must be opened for writing */
        if (!(dst_file->f_mode & FMODE_WRITE)) {
                cifs_dbg(FYI, "file target not open for write\n");
@@ -129,7 +72,8 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
        if (S_ISDIR(src_inode->i_mode))
                goto out_fput;
 
-       rc = cifs_file_clone_range(xid, src_file.file, dst_file);
+       rc = cifs_file_copychunk_range(xid, src_file.file, 0, dst_file, 0,
+                                       src_inode->i_size, 0);
 
 out_fput:
        fdput(src_file);
@@ -251,7 +195,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        }
                        break;
                case CIFS_IOC_COPYCHUNK_FILE:
-                       rc = cifs_ioctl_clone(xid, filep, arg);
+                       rc = cifs_ioctl_copychunk(xid, filep, arg);
                        break;
                case CIFS_IOC_SET_INTEGRITY:
                        if (pSMBFile == NULL)
index fd516ea8b8f89c46cb1d9977d53fbbb9d16de9d3..1a04b3a5beb164cb22c166f5c7ac03b65e70d00b 100644 (file)
@@ -659,3 +659,49 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
        cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
        return false;
 }
+
+void
+smb2_cancelled_close_fid(struct work_struct *work)
+{
+       struct close_cancelled_open *cancelled = container_of(work,
+                                       struct close_cancelled_open, work);
+
+       cifs_dbg(VFS, "Close unmatched open\n");
+
+       SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
+                  cancelled->fid.volatile_fid);
+       cifs_put_tcon(cancelled->tcon);
+       kfree(cancelled);
+}
+
+int
+smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+{
+       struct smb2_sync_hdr *sync_hdr = get_sync_hdr(buffer);
+       struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+       struct cifs_tcon *tcon;
+       struct close_cancelled_open *cancelled;
+
+       if (sync_hdr->Command != SMB2_CREATE ||
+           sync_hdr->Status != STATUS_SUCCESS)
+               return 0;
+
+       cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+       if (!cancelled)
+               return -ENOMEM;
+
+       tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
+                                 sync_hdr->TreeId);
+       if (!tcon) {
+               kfree(cancelled);
+               return -ENOENT;
+       }
+
+       cancelled->fid.persistent_fid = rsp->PersistentFileId;
+       cancelled->fid.volatile_fid = rsp->VolatileFileId;
+       cancelled->tcon = tcon;
+       INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+       queue_work(cifsiod_wq, &cancelled->work);
+
+       return 0;
+}
index 0231108d9387a4af3448a2d89451a4c86a0bc17c..152e37f2ad9213462a2ae439296a6edb49d653f9 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/vfs.h>
 #include <linux/falloc.h>
 #include <linux/scatterlist.h>
+#include <linux/uuid.h>
 #include <crypto/aead.h>
 #include "cifsglob.h"
 #include "smb2pdu.h"
@@ -592,8 +593,8 @@ req_res_key_exit:
        return rc;
 }
 
-static int
-smb2_clone_range(const unsigned int xid,
+static ssize_t
+smb2_copychunk_range(const unsigned int xid,
                        struct cifsFileInfo *srcfile,
                        struct cifsFileInfo *trgtfile, u64 src_off,
                        u64 len, u64 dest_off)
@@ -605,13 +606,14 @@ smb2_clone_range(const unsigned int xid,
        struct cifs_tcon *tcon;
        int chunks_copied = 0;
        bool chunk_sizes_updated = false;
+       ssize_t bytes_written, total_bytes_written = 0;
 
        pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
 
        if (pcchunk == NULL)
                return -ENOMEM;
 
-       cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n");
+       cifs_dbg(FYI, "in smb2_copychunk_range - about to call request res key\n");
        /* Request a key from the server to identify the source of the copy */
        rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
                                srcfile->fid.persistent_fid,
@@ -669,14 +671,16 @@ smb2_clone_range(const unsigned int xid,
                        }
                        chunks_copied++;
 
-                       src_off += le32_to_cpu(retbuf->TotalBytesWritten);
-                       dest_off += le32_to_cpu(retbuf->TotalBytesWritten);
-                       len -= le32_to_cpu(retbuf->TotalBytesWritten);
+                       bytes_written = le32_to_cpu(retbuf->TotalBytesWritten);
+                       src_off += bytes_written;
+                       dest_off += bytes_written;
+                       len -= bytes_written;
+                       total_bytes_written += bytes_written;
 
-                       cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %d\n",
+                       cifs_dbg(FYI, "Chunks %d PartialChunk %d Total %zu\n",
                                le32_to_cpu(retbuf->ChunksWritten),
                                le32_to_cpu(retbuf->ChunkBytesWritten),
-                               le32_to_cpu(retbuf->TotalBytesWritten));
+                               bytes_written);
                } else if (rc == -EINVAL) {
                        if (ret_data_len != sizeof(struct copychunk_ioctl_rsp))
                                goto cchunk_out;
@@ -713,7 +717,10 @@ smb2_clone_range(const unsigned int xid,
 cchunk_out:
        kfree(pcchunk);
        kfree(retbuf);
-       return rc;
+       if (rc)
+               return rc;
+       else
+               return total_bytes_written;
 }
 
 static int
@@ -2322,6 +2329,7 @@ struct smb_version_operations smb20_operations = {
        .clear_stats = smb2_clear_stats,
        .print_stats = smb2_print_stats,
        .is_oplock_break = smb2_is_valid_oplock_break,
+       .handle_cancelled_mid = smb2_handle_cancelled_mid,
        .downgrade_oplock = smb2_downgrade_oplock,
        .need_neg = smb2_need_neg,
        .negotiate = smb2_negotiate,
@@ -2377,7 +2385,7 @@ struct smb_version_operations smb20_operations = {
        .set_oplock_level = smb2_set_oplock_level,
        .create_lease_buf = smb2_create_lease_buf,
        .parse_lease_buf = smb2_parse_lease_buf,
-       .clone_range = smb2_clone_range,
+       .copychunk_range = smb2_copychunk_range,
        .wp_retry_size = smb2_wp_retry_size,
        .dir_needs_close = smb2_dir_needs_close,
        .get_dfs_refer = smb2_get_dfs_refer,
@@ -2404,6 +2412,7 @@ struct smb_version_operations smb21_operations = {
        .clear_stats = smb2_clear_stats,
        .print_stats = smb2_print_stats,
        .is_oplock_break = smb2_is_valid_oplock_break,
+       .handle_cancelled_mid = smb2_handle_cancelled_mid,
        .downgrade_oplock = smb2_downgrade_oplock,
        .need_neg = smb2_need_neg,
        .negotiate = smb2_negotiate,
@@ -2459,7 +2468,7 @@ struct smb_version_operations smb21_operations = {
        .set_oplock_level = smb21_set_oplock_level,
        .create_lease_buf = smb2_create_lease_buf,
        .parse_lease_buf = smb2_parse_lease_buf,
-       .clone_range = smb2_clone_range,
+       .copychunk_range = smb2_copychunk_range,
        .wp_retry_size = smb2_wp_retry_size,
        .dir_needs_close = smb2_dir_needs_close,
        .enum_snapshots = smb3_enum_snapshots,
@@ -2488,6 +2497,7 @@ struct smb_version_operations smb30_operations = {
        .print_stats = smb2_print_stats,
        .dump_share_caps = smb2_dump_share_caps,
        .is_oplock_break = smb2_is_valid_oplock_break,
+       .handle_cancelled_mid = smb2_handle_cancelled_mid,
        .downgrade_oplock = smb2_downgrade_oplock,
        .need_neg = smb2_need_neg,
        .negotiate = smb2_negotiate,
@@ -2545,7 +2555,7 @@ struct smb_version_operations smb30_operations = {
        .set_oplock_level = smb3_set_oplock_level,
        .create_lease_buf = smb3_create_lease_buf,
        .parse_lease_buf = smb3_parse_lease_buf,
-       .clone_range = smb2_clone_range,
+       .copychunk_range = smb2_copychunk_range,
        .duplicate_extents = smb2_duplicate_extents,
        .validate_negotiate = smb3_validate_negotiate,
        .wp_retry_size = smb2_wp_retry_size,
@@ -2582,6 +2592,7 @@ struct smb_version_operations smb311_operations = {
        .print_stats = smb2_print_stats,
        .dump_share_caps = smb2_dump_share_caps,
        .is_oplock_break = smb2_is_valid_oplock_break,
+       .handle_cancelled_mid = smb2_handle_cancelled_mid,
        .downgrade_oplock = smb2_downgrade_oplock,
        .need_neg = smb2_need_neg,
        .negotiate = smb2_negotiate,
@@ -2639,7 +2650,7 @@ struct smb_version_operations smb311_operations = {
        .set_oplock_level = smb3_set_oplock_level,
        .create_lease_buf = smb3_create_lease_buf,
        .parse_lease_buf = smb3_parse_lease_buf,
-       .clone_range = smb2_clone_range,
+       .copychunk_range = smb2_copychunk_range,
        .duplicate_extents = smb2_duplicate_extents,
 /*     .validate_negotiate = smb3_validate_negotiate, */ /* not used in 3.11 */
        .wp_retry_size = smb2_wp_retry_size,
index 7446496850a3bd5f21fb36e12b65ba5c78532612..02da648041fcd58d6e37431a60d596a5bfd4ca06 100644 (file)
@@ -562,8 +562,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
         * but for time being this is our only auth choice so doesn't matter.
         * We just found a server which sets blob length to zero expecting raw.
         */
-       if (blob_length == 0)
+       if (blob_length == 0) {
                cifs_dbg(FYI, "missing security blob on negprot\n");
+               server->sec_ntlmssp = true;
+       }
 
        rc = cifs_enable_signing(server, ses->sign);
        if (rc)
@@ -1171,9 +1173,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        else
                return -EIO;
 
-       if (tcon && tcon->bad_network_name)
-               return -ENOENT;
-
        unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
        if (unc_path == NULL)
                return -ENOMEM;
@@ -1185,6 +1184,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
                return -EINVAL;
        }
 
+       /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
+       if (tcon)
+               tcon->tid = 0;
+
        rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
        if (rc) {
                kfree(unc_path);
@@ -1273,8 +1276,6 @@ tcon_exit:
 tcon_error_exit:
        if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
                cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
-               if (tcon)
-                       tcon->bad_network_name = true;
        }
        goto tcon_exit;
 }
@@ -2177,6 +2178,9 @@ void smb2_reconnect_server(struct work_struct *work)
        struct cifs_tcon *tcon, *tcon2;
        struct list_head tmp_list;
        int tcon_exist = false;
+       int rc;
+       int resched = false;
+
 
        /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
        mutex_lock(&server->reconnect_mutex);
@@ -2204,13 +2208,18 @@ void smb2_reconnect_server(struct work_struct *work)
        spin_unlock(&cifs_tcp_ses_lock);
 
        list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
-               if (!smb2_reconnect(SMB2_INTERNAL_CMD, tcon))
+               rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+               if (!rc)
                        cifs_reopen_persistent_handles(tcon);
+               else
+                       resched = true;
                list_del_init(&tcon->rlist);
                cifs_put_tcon(tcon);
        }
 
        cifs_dbg(FYI, "Reconnecting tcons finished\n");
+       if (resched)
+               queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
        mutex_unlock(&server->reconnect_mutex);
 
        /* now we can safely release srv struct */
index 69e35873b1de734991fbe027fd68b97b505d01c3..6853454fc871a7947591effb0c98d55accd955dc 100644 (file)
@@ -48,6 +48,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
                              struct smb_rqst *rqst);
 extern struct mid_q_entry *smb2_setup_async_request(
                        struct TCP_Server_Info *server, struct smb_rqst *rqst);
+extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+                                          __u64 ses_id);
+extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+                                               __u64 ses_id, __u32  tid);
 extern int smb2_calc_signature(struct smb_rqst *rqst,
                                struct TCP_Server_Info *server);
 extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -164,6 +168,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
 extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                             const u64 persistent_fid, const u64 volatile_fid,
                             const __u8 oplock_level);
+extern int smb2_handle_cancelled_mid(char *buffer,
+                                       struct TCP_Server_Info *server);
+void smb2_cancelled_close_fid(struct work_struct *work);
 extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
                         u64 persistent_file_id, u64 volatile_file_id,
                         struct kstatfs *FSData);
index 7c3bb1bd7eedfd044e0a7716951a39181a9e1bae..506b67fc93d9611ea859cf738126fb2d67e52e16 100644 (file)
@@ -115,23 +115,70 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
        return 0;
 }
 
-struct cifs_ses *
-smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+static struct cifs_ses *
+smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
 {
        struct cifs_ses *ses;
 
-       spin_lock(&cifs_tcp_ses_lock);
        list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
                if (ses->Suid != ses_id)
                        continue;
-               spin_unlock(&cifs_tcp_ses_lock);
                return ses;
        }
+
+       return NULL;
+}
+
+struct cifs_ses *
+smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+{
+       struct cifs_ses *ses;
+
+       spin_lock(&cifs_tcp_ses_lock);
+       ses = smb2_find_smb_ses_unlocked(server, ses_id);
        spin_unlock(&cifs_tcp_ses_lock);
 
+       return ses;
+}
+
+static struct cifs_tcon *
+smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32  tid)
+{
+       struct cifs_tcon *tcon;
+
+       list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+               if (tcon->tid != tid)
+                       continue;
+               ++tcon->tc_count;
+               return tcon;
+       }
+
        return NULL;
 }
 
+/*
+ * Obtain tcon corresponding to the tid in the given
+ * cifs_ses
+ */
+
+struct cifs_tcon *
+smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32  tid)
+{
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
+
+       spin_lock(&cifs_tcp_ses_lock);
+       ses = smb2_find_smb_ses_unlocked(server, ses_id);
+       if (!ses) {
+               spin_unlock(&cifs_tcp_ses_lock);
+               return NULL;
+       }
+       tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+       spin_unlock(&cifs_tcp_ses_lock);
+
+       return tcon;
+}
+
 int
 smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
index 526f0533cb4e8ca0eb1209561bd4bf59db9af864..f6e13a977fc8e907e3fa89fc0a5d14a4ab0eccd5 100644 (file)
@@ -752,9 +752,11 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 
        rc = wait_for_response(ses->server, midQ);
        if (rc != 0) {
+               cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
                send_cancel(ses->server, rqst, midQ);
                spin_lock(&GlobalMid_Lock);
                if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+                       midQ->mid_flags |= MID_WAIT_CANCELLED;
                        midQ->callback = DeleteMidQEntry;
                        spin_unlock(&GlobalMid_Lock);
                        add_credits(ses->server, 1, optype);
index de622d4282a6507a9c4e4eb082ac6ff8286efb48..85abd741253d4b89a42c6c5f2b4dd7bcf456e843 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -373,6 +373,22 @@ restart:
                }
                spin_lock_irq(&mapping->tree_lock);
 
+               if (!entry) {
+                       /*
+                        * We needed to drop the page_tree lock while calling
+                        * radix_tree_preload() and we didn't have an entry to
+                        * lock.  See if another thread inserted an entry at
+                        * our index during this time.
+                        */
+                       entry = __radix_tree_lookup(&mapping->page_tree, index,
+                                       NULL, &slot);
+                       if (entry) {
+                               radix_tree_preload_end();
+                               spin_unlock_irq(&mapping->tree_lock);
+                               goto restart;
+                       }
+               }
+
                if (pmd_downgrade) {
                        radix_tree_delete(&mapping->page_tree, index);
                        mapping->nrexceptional--;
@@ -388,19 +404,12 @@ restart:
                if (err) {
                        spin_unlock_irq(&mapping->tree_lock);
                        /*
-                        * Someone already created the entry?  This is a
-                        * normal failure when inserting PMDs in a range
-                        * that already contains PTEs.  In that case we want
-                        * to return -EEXIST immediately.
-                        */
-                       if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
-                               goto restart;
-                       /*
-                        * Our insertion of a DAX PMD entry failed, most
-                        * likely because it collided with a PTE sized entry
-                        * at a different index in the PMD range.  We haven't
-                        * inserted anything into the radix tree and have no
-                        * waiters to wake.
+                        * Our insertion of a DAX entry failed, most likely
+                        * because we were inserting a PMD entry and it
+                        * collided with a PTE sized entry at a different
+                        * index in the PMD range.  We haven't inserted
+                        * anything into the radix tree and have no waiters to
+                        * wake.
                         */
                        return ERR_PTR(err);
                }
index f493af66659134dafce0e6078437834968ef2eb7..fb69ee2388dba0b83b4f29c57b37ec5cb0aefb34 100644 (file)
@@ -2466,6 +2466,7 @@ extern int  ext4_setattr(struct dentry *, struct iattr *);
 extern int  ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
 extern void ext4_evict_inode(struct inode *);
 extern void ext4_clear_inode(struct inode *);
+extern int  ext4_file_getattr(const struct path *, struct kstat *, u32, unsigned int);
 extern int  ext4_sync_inode(handle_t *, struct inode *);
 extern void ext4_dirty_inode(struct inode *, int);
 extern int ext4_change_inode_journal_flag(struct inode *, int);
index 8210c1f43556f4358e9b602a93d158e5c0780c44..cefa9835f275d9b062ae9b13765ea743edb53f64 100644 (file)
@@ -744,7 +744,7 @@ const struct file_operations ext4_file_operations = {
 
 const struct inode_operations ext4_file_inode_operations = {
        .setattr        = ext4_setattr,
-       .getattr        = ext4_getattr,
+       .getattr        = ext4_file_getattr,
        .listxattr      = ext4_listxattr,
        .get_acl        = ext4_get_acl,
        .set_acl        = ext4_set_acl,
index 4247d8d25687814dd1b844ea5555feeb43854d99..b9ffa9f4191f4cb3a122c215894ef76689fd9604 100644 (file)
@@ -5390,11 +5390,46 @@ err_out:
 int ext4_getattr(const struct path *path, struct kstat *stat,
                 u32 request_mask, unsigned int query_flags)
 {
-       struct inode *inode;
-       unsigned long long delalloc_blocks;
+       struct inode *inode = d_inode(path->dentry);
+       struct ext4_inode *raw_inode;
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       unsigned int flags;
+
+       if (EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
+               stat->result_mask |= STATX_BTIME;
+               stat->btime.tv_sec = ei->i_crtime.tv_sec;
+               stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
+       }
+
+       flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
+       if (flags & EXT4_APPEND_FL)
+               stat->attributes |= STATX_ATTR_APPEND;
+       if (flags & EXT4_COMPR_FL)
+               stat->attributes |= STATX_ATTR_COMPRESSED;
+       if (flags & EXT4_ENCRYPT_FL)
+               stat->attributes |= STATX_ATTR_ENCRYPTED;
+       if (flags & EXT4_IMMUTABLE_FL)
+               stat->attributes |= STATX_ATTR_IMMUTABLE;
+       if (flags & EXT4_NODUMP_FL)
+               stat->attributes |= STATX_ATTR_NODUMP;
+
+       stat->attributes_mask |= (STATX_ATTR_APPEND |
+                                 STATX_ATTR_COMPRESSED |
+                                 STATX_ATTR_ENCRYPTED |
+                                 STATX_ATTR_IMMUTABLE |
+                                 STATX_ATTR_NODUMP);
 
-       inode = d_inode(path->dentry);
        generic_fillattr(inode, stat);
+       return 0;
+}
+
+int ext4_file_getattr(const struct path *path, struct kstat *stat,
+                     u32 request_mask, unsigned int query_flags)
+{
+       struct inode *inode = d_inode(path->dentry);
+       u64 delalloc_blocks;
+
+       ext4_getattr(path, stat, request_mask, query_flags);
 
        /*
         * If there is inline data in the inode, the inode will normally not
index 6ad612c576fc733f8a6d95540c20d8e75995e1c6..07e5e140577176e13db84089eaa048713ddb4797 100644 (file)
@@ -3912,6 +3912,7 @@ const struct inode_operations ext4_dir_inode_operations = {
        .tmpfile        = ext4_tmpfile,
        .rename         = ext4_rename2,
        .setattr        = ext4_setattr,
+       .getattr        = ext4_getattr,
        .listxattr      = ext4_listxattr,
        .get_acl        = ext4_get_acl,
        .set_acl        = ext4_set_acl,
@@ -3920,6 +3921,7 @@ const struct inode_operations ext4_dir_inode_operations = {
 
 const struct inode_operations ext4_special_inode_operations = {
        .setattr        = ext4_setattr,
+       .getattr        = ext4_getattr,
        .listxattr      = ext4_listxattr,
        .get_acl        = ext4_get_acl,
        .set_acl        = ext4_set_acl,
index 73b184d161fc98dc6c870758243b1a03932cb85f..5c8fc53cb0e5a3127366b6324514407668f150b3 100644 (file)
@@ -85,17 +85,20 @@ errout:
 const struct inode_operations ext4_encrypted_symlink_inode_operations = {
        .get_link       = ext4_encrypted_get_link,
        .setattr        = ext4_setattr,
+       .getattr        = ext4_getattr,
        .listxattr      = ext4_listxattr,
 };
 
 const struct inode_operations ext4_symlink_inode_operations = {
        .get_link       = page_get_link,
        .setattr        = ext4_setattr,
+       .getattr        = ext4_getattr,
        .listxattr      = ext4_listxattr,
 };
 
 const struct inode_operations ext4_fast_symlink_inode_operations = {
        .get_link       = simple_get_link,
        .setattr        = ext4_setattr,
+       .getattr        = ext4_getattr,
        .listxattr      = ext4_listxattr,
 };
index 7163fe014b57f4e15813c1969958d5764b18e5af..dde861387a407810b35f73ff37c3ce6389048326 100644 (file)
@@ -136,17 +136,26 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
        vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
        vma->vm_ops = &hugetlb_vm_ops;
 
+       /*
+        * Offset passed to mmap (before page shift) could have been
+        * negative when represented as a (l)off_t.
+        */
+       if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
+               return -EINVAL;
+
        if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
                return -EINVAL;
 
        vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+       /* check for overflow */
+       if (len < vma_len)
+               return -EINVAL;
 
        inode_lock(inode);
        file_accessed(file);
 
        ret = -ENOMEM;
-       len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
        if (hugetlb_reserve_pages(inode,
                                vma->vm_pgoff >> huge_page_order(h),
                                len >> huge_page_shift(h), vma,
@@ -155,7 +164,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
 
        ret = 0;
        if (vma->vm_flags & VM_WRITE && inode->i_size < len)
-               inode->i_size = len;
+               i_size_write(inode, len);
 out:
        inode_unlock(inode);
 
index d41fab78798b2e2510ca4f8b54925ef304a14c7d..19dcf62133cc95162d364f7ef43c17d280ee6448 100644 (file)
@@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
        int retval = 0;
        const char *s = nd->name->name;
 
+       if (!*s)
+               flags &= ~LOOKUP_RCU;
+
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
        nd->depth = 0;
index c4ab6fdf17a01426db5d6e2bb9638130147bee61..e1534c9bab16ce69e30eefd6614ca2872595fb04 100644 (file)
@@ -208,14 +208,19 @@ restart:
                                continue;
                        /*
                         * Skip ops whose filesystem we don't know about unless
-                        * it is being mounted.
+                        * it is being mounted or unmounted.  It is possible for
+                        * a filesystem we don't know about to be unmounted if
+                        * it fails to mount in the kernel after userspace has
+                        * been sent the mount request.
                         */
                        /* XXX: is there a better way to detect this? */
                        } else if (ret == -1 &&
                                   !(op->upcall.type ==
                                        ORANGEFS_VFS_OP_FS_MOUNT ||
                                     op->upcall.type ==
-                                       ORANGEFS_VFS_OP_GETATTR)) {
+                                       ORANGEFS_VFS_OP_GETATTR ||
+                                    op->upcall.type ==
+                                       ORANGEFS_VFS_OP_FS_UMOUNT)) {
                                gossip_debug(GOSSIP_DEV_DEBUG,
                                    "orangefs: skipping op tag %llu %s\n",
                                    llu(op->tag), get_opname_string(op));
index 5e48a0be976194f466b654fc1aa11dc670cdd084..8afac46fcc87a1e1d3ea8c658e0ae60c336c3d5d 100644 (file)
@@ -249,6 +249,7 @@ struct orangefs_sb_info_s {
        char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
        struct super_block *sb;
        int mount_pending;
+       int no_list;
        struct list_head list;
 };
 
index 67c24351a67f8d38e7e4eb1b95d94b9b80cf12a2..629d8c917fa679886715360fcfe8f6cee1b5a37f 100644 (file)
@@ -263,8 +263,13 @@ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
                if (!new_op)
                        return -ENOMEM;
                new_op->upcall.req.features.features = 0;
-               ret = service_operation(new_op, "orangefs_features", 0);
-               orangefs_features = new_op->downcall.resp.features.features;
+               ret = service_operation(new_op, "orangefs_features",
+                   ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
+               if (!ret)
+                       orangefs_features =
+                           new_op->downcall.resp.features.features;
+               else
+                       orangefs_features = 0;
                op_release(new_op);
        } else {
                orangefs_features = 0;
@@ -488,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
 
        if (ret) {
                d = ERR_PTR(ret);
-               goto free_op;
+               goto free_sb_and_op;
        }
 
        /*
@@ -514,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
        spin_unlock(&orangefs_superblocks_lock);
        op_release(new_op);
 
+       /* Must be removed from the list now. */
+       ORANGEFS_SB(sb)->no_list = 0;
+
        if (orangefs_userspace_version >= 20906) {
                new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES);
                if (!new_op)
@@ -528,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
 
        return dget(sb->s_root);
 
+free_sb_and_op:
+       /* Will call orangefs_kill_sb with sb not in list. */
+       ORANGEFS_SB(sb)->no_list = 1;
+       deactivate_locked_super(sb);
 free_op:
        gossip_err("orangefs_mount: mount request failed with %d\n", ret);
        if (ret == -EINVAL) {
@@ -553,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb)
         */
         orangefs_unmount_sb(sb);
 
-       /* remove the sb from our list of orangefs specific sb's */
-
-       spin_lock(&orangefs_superblocks_lock);
-       __list_del_entry(&ORANGEFS_SB(sb)->list);       /* not list_del_init */
-       ORANGEFS_SB(sb)->list.prev = NULL;
-       spin_unlock(&orangefs_superblocks_lock);
+       if (!ORANGEFS_SB(sb)->no_list) {
+               /* remove the sb from our list of orangefs specific sb's */
+               spin_lock(&orangefs_superblocks_lock);
+               /* not list_del_init */
+               __list_del_entry(&ORANGEFS_SB(sb)->list);
+               ORANGEFS_SB(sb)->list.prev = NULL;
+               spin_unlock(&orangefs_superblocks_lock);
+       }
 
        /*
         * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us
index 8f91ec66baa3261299430c47fce487bf5164246c..d04ea43499096e4fc02eec0f363683e3713f65ba 100644 (file)
@@ -1074,6 +1074,7 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
 
                if ((table->proc_handler == proc_dostring) ||
                    (table->proc_handler == proc_dointvec) ||
+                   (table->proc_handler == proc_douintvec) ||
                    (table->proc_handler == proc_dointvec_minmax) ||
                    (table->proc_handler == proc_dointvec_jiffies) ||
                    (table->proc_handler == proc_dointvec_userhz_jiffies) ||
index f08bd31c1081cc0536602db9d865f4bf491440c9..312578089544dbd652aac303d0cbabee8fbcb968 100644 (file)
@@ -900,7 +900,14 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
                unsigned long addr, pmd_t *pmdp)
 {
-       pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
+       pmd_t pmd = *pmdp;
+
+       /* See comment in change_huge_pmd() */
+       pmdp_invalidate(vma, addr, pmdp);
+       if (pmd_dirty(*pmdp))
+               pmd = pmd_mkdirty(pmd);
+       if (pmd_young(*pmdp))
+               pmd = pmd_mkyoung(pmd);
 
        pmd = pmd_wrprotect(pmd);
        pmd = pmd_clear_soft_dirty(pmd);
index fa0be59340cc9136eb8c508434e10289acc8fc97..c6c963b2546b4f2d0301f40706064498c31fd3b6 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -130,9 +130,13 @@ EXPORT_SYMBOL(vfs_getattr);
 int vfs_statx_fd(unsigned int fd, struct kstat *stat,
                 u32 request_mask, unsigned int query_flags)
 {
-       struct fd f = fdget_raw(fd);
+       struct fd f;
        int error = -EBADF;
 
+       if (query_flags & ~KSTAT_QUERY_FLAGS)
+               return -EINVAL;
+
+       f = fdget_raw(fd);
        if (f.file) {
                error = vfs_getattr(&f.file->f_path, stat,
                                    request_mask, query_flags);
@@ -155,9 +159,6 @@ EXPORT_SYMBOL(vfs_statx_fd);
  * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
  * at the given name from being referenced.
  *
- * The caller must have preset stat->request_mask as for vfs_getattr().  The
- * flags are also used to load up stat->query_flags.
- *
  * 0 will be returned on success, and a -ve error code if unsuccessful.
  */
 int vfs_statx(int dfd, const char __user *filename, int flags,
@@ -509,46 +510,38 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
 }
 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
 
-static inline int __put_timestamp(struct timespec *kts,
-                                 struct statx_timestamp __user *uts)
-{
-       return (__put_user(kts->tv_sec,         &uts->tv_sec            ) ||
-               __put_user(kts->tv_nsec,        &uts->tv_nsec           ) ||
-               __put_user(0,                   &uts->__reserved        ));
-}
-
-/*
- * Set the statx results.
- */
-static long statx_set_result(struct kstat *stat, struct statx __user *buffer)
+static noinline_for_stack int
+cp_statx(const struct kstat *stat, struct statx __user *buffer)
 {
-       uid_t uid = from_kuid_munged(current_user_ns(), stat->uid);
-       gid_t gid = from_kgid_munged(current_user_ns(), stat->gid);
-
-       if (__put_user(stat->result_mask,       &buffer->stx_mask       ) ||
-           __put_user(stat->mode,              &buffer->stx_mode       ) ||
-           __clear_user(&buffer->__spare0, sizeof(buffer->__spare0))     ||
-           __put_user(stat->nlink,             &buffer->stx_nlink      ) ||
-           __put_user(uid,                     &buffer->stx_uid        ) ||
-           __put_user(gid,                     &buffer->stx_gid        ) ||
-           __put_user(stat->attributes,        &buffer->stx_attributes ) ||
-           __put_user(stat->blksize,           &buffer->stx_blksize    ) ||
-           __put_user(MAJOR(stat->rdev),       &buffer->stx_rdev_major ) ||
-           __put_user(MINOR(stat->rdev),       &buffer->stx_rdev_minor ) ||
-           __put_user(MAJOR(stat->dev),        &buffer->stx_dev_major  ) ||
-           __put_user(MINOR(stat->dev),        &buffer->stx_dev_minor  ) ||
-           __put_timestamp(&stat->atime,       &buffer->stx_atime      ) ||
-           __put_timestamp(&stat->btime,       &buffer->stx_btime      ) ||
-           __put_timestamp(&stat->ctime,       &buffer->stx_ctime      ) ||
-           __put_timestamp(&stat->mtime,       &buffer->stx_mtime      ) ||
-           __put_user(stat->ino,               &buffer->stx_ino        ) ||
-           __put_user(stat->size,              &buffer->stx_size       ) ||
-           __put_user(stat->blocks,            &buffer->stx_blocks     ) ||
-           __clear_user(&buffer->__spare1, sizeof(buffer->__spare1))     ||
-           __clear_user(&buffer->__spare2, sizeof(buffer->__spare2)))
-               return -EFAULT;
-
-       return 0;
+       struct statx tmp;
+
+       memset(&tmp, 0, sizeof(tmp));
+
+       tmp.stx_mask = stat->result_mask;
+       tmp.stx_blksize = stat->blksize;
+       tmp.stx_attributes = stat->attributes;
+       tmp.stx_nlink = stat->nlink;
+       tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
+       tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
+       tmp.stx_mode = stat->mode;
+       tmp.stx_ino = stat->ino;
+       tmp.stx_size = stat->size;
+       tmp.stx_blocks = stat->blocks;
+       tmp.stx_attributes_mask = stat->attributes_mask;
+       tmp.stx_atime.tv_sec = stat->atime.tv_sec;
+       tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
+       tmp.stx_btime.tv_sec = stat->btime.tv_sec;
+       tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
+       tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
+       tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
+       tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
+       tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
+       tmp.stx_rdev_major = MAJOR(stat->rdev);
+       tmp.stx_rdev_minor = MINOR(stat->rdev);
+       tmp.stx_dev_major = MAJOR(stat->dev);
+       tmp.stx_dev_minor = MINOR(stat->dev);
+
+       return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
 }
 
 /**
@@ -570,10 +563,10 @@ SYSCALL_DEFINE5(statx,
        struct kstat stat;
        int error;
 
+       if (mask & STATX__RESERVED)
+               return -EINVAL;
        if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
                return -EINVAL;
-       if (!access_ok(VERIFY_WRITE, buffer, sizeof(*buffer)))
-               return -EFAULT;
 
        if (filename)
                error = vfs_statx(dfd, filename, flags, &stat, mask);
@@ -581,7 +574,8 @@ SYSCALL_DEFINE5(statx,
                error = vfs_statx_fd(dfd, &stat, mask, flags);
        if (error)
                return error;
-       return statx_set_result(&stat, buffer);
+
+       return cp_statx(&stat, buffer);
 }
 
 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
index b803213d1307e9137c3bfe5e04ee4ac5bd396cce..39c75a86c67f1d24dc14bc79ce0d0f9d7dccef2b 100644 (file)
@@ -108,7 +108,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
 {
        const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
        struct kobject *kobj = of->kn->parent->priv;
-       size_t len;
+       ssize_t len;
 
        /*
         * If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
        if (WARN_ON_ONCE(buf != of->prealloc_buf))
                return 0;
        len = ops->show(kobj, of->kn->priv, buf);
+       if (len < 0)
+               return len;
        if (pos) {
                if (len <= pos)
                        return 0;
                len -= pos;
                memmove(buf, buf + pos, len);
        }
-       return min(count, len);
+       return min_t(ssize_t, count, len);
 }
 
 /* kernfs write callback for regular sysfs files */
index 1d227b0fcf49ff26b40bdd726b3839fb8f353f35..f7555fc25877435e13b65cbe597ae9bdb11c6528 100644 (file)
@@ -1756,7 +1756,7 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
         *      protocols: aa:... bb:...
         */
        seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
-                  pending, total, UFFD_API, UFFD_API_FEATURES,
+                  pending, total, UFFD_API, ctx->features,
                   UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
 }
 #endif
index eb00bc133bca673c556eb85a18385bbc3748dfcf..39f8604f764e13147fd576e95f5ad30f98dc4d1e 100644 (file)
@@ -125,8 +125,7 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
 extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
 extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
 extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
-extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp,
-               int size);
+extern int xfs_dir2_sf_verify(struct xfs_inode *ip);
 
 /* xfs_dir2_readdir.c */
 extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx,
index 96b45cd6c63f0686d3c1cce5c41b232f0ab82080..e84af093b2ab99e5d7a75467bc6d7a490682611a 100644 (file)
@@ -632,36 +632,49 @@ xfs_dir2_sf_check(
 /* Verify the consistency of an inline directory. */
 int
 xfs_dir2_sf_verify(
-       struct xfs_mount                *mp,
-       struct xfs_dir2_sf_hdr          *sfp,
-       int                             size)
+       struct xfs_inode                *ip)
 {
+       struct xfs_mount                *mp = ip->i_mount;
+       struct xfs_dir2_sf_hdr          *sfp;
        struct xfs_dir2_sf_entry        *sfep;
        struct xfs_dir2_sf_entry        *next_sfep;
        char                            *endp;
        const struct xfs_dir_ops        *dops;
+       struct xfs_ifork                *ifp;
        xfs_ino_t                       ino;
        int                             i;
        int                             i8count;
        int                             offset;
+       int                             size;
+       int                             error;
        __uint8_t                       filetype;
 
+       ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+       /*
+        * xfs_iread calls us before xfs_setup_inode sets up ip->d_ops,
+        * so we can only trust the mountpoint to have the right pointer.
+        */
        dops = xfs_dir_get_ops(mp, NULL);
 
+       ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+       sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
+       size = ifp->if_bytes;
+
        /*
         * Give up if the directory is way too short.
         */
-       XFS_WANT_CORRUPTED_RETURN(mp, size >
-                       offsetof(struct xfs_dir2_sf_hdr, parent));
-       XFS_WANT_CORRUPTED_RETURN(mp, size >=
-                       xfs_dir2_sf_hdr_size(sfp->i8count));
+       if (size <= offsetof(struct xfs_dir2_sf_hdr, parent) ||
+           size < xfs_dir2_sf_hdr_size(sfp->i8count))
+               return -EFSCORRUPTED;
 
        endp = (char *)sfp + size;
 
        /* Check .. entry */
        ino = dops->sf_get_parent_ino(sfp);
        i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
-       XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+       error = xfs_dir_ino_validate(mp, ino);
+       if (error)
+               return error;
        offset = dops->data_first_offset;
 
        /* Check all reported entries */
@@ -672,12 +685,12 @@ xfs_dir2_sf_verify(
                 * Check the fixed-offset parts of the structure are
                 * within the data buffer.
                 */
-               XFS_WANT_CORRUPTED_RETURN(mp,
-                               ((char *)sfep + sizeof(*sfep)) < endp);
+               if (((char *)sfep + sizeof(*sfep)) >= endp)
+                       return -EFSCORRUPTED;
 
                /* Don't allow names with known bad length. */
-               XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0);
-               XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN);
+               if (sfep->namelen == 0)
+                       return -EFSCORRUPTED;
 
                /*
                 * Check that the variable-length part of the structure is
@@ -685,33 +698,39 @@ xfs_dir2_sf_verify(
                 * name component, so nextentry is an acceptable test.
                 */
                next_sfep = dops->sf_nextentry(sfp, sfep);
-               XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep);
+               if (endp < (char *)next_sfep)
+                       return -EFSCORRUPTED;
 
                /* Check that the offsets always increase. */
-               XFS_WANT_CORRUPTED_RETURN(mp,
-                               xfs_dir2_sf_get_offset(sfep) >= offset);
+               if (xfs_dir2_sf_get_offset(sfep) < offset)
+                       return -EFSCORRUPTED;
 
                /* Check the inode number. */
                ino = dops->sf_get_ino(sfp, sfep);
                i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
-               XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino));
+               error = xfs_dir_ino_validate(mp, ino);
+               if (error)
+                       return error;
 
                /* Check the file type. */
                filetype = dops->sf_get_ftype(sfep);
-               XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX);
+               if (filetype >= XFS_DIR3_FT_MAX)
+                       return -EFSCORRUPTED;
 
                offset = xfs_dir2_sf_get_offset(sfep) +
                                dops->data_entsize(sfep->namelen);
 
                sfep = next_sfep;
        }
-       XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count);
-       XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp);
+       if (i8count != sfp->i8count)
+               return -EFSCORRUPTED;
+       if ((void *)sfep != (void *)endp)
+               return -EFSCORRUPTED;
 
        /* Make sure this whole thing ought to be in local format. */
-       XFS_WANT_CORRUPTED_RETURN(mp, offset +
-              (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
-              (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize);
+       if (offset + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+           (uint)sizeof(xfs_dir2_block_tail_t) > mp->m_dir_geo->blksize)
+               return -EFSCORRUPTED;
 
        return 0;
 }
index 9653e964eda4f99ca611bb2cb6449a470be45d48..8a37efe04de3235d72357914647e5f480cbbd512 100644 (file)
@@ -212,6 +212,16 @@ xfs_iformat_fork(
        if (error)
                return error;
 
+       /* Check inline dir contents. */
+       if (S_ISDIR(VFS_I(ip)->i_mode) &&
+           dip->di_format == XFS_DINODE_FMT_LOCAL) {
+               error = xfs_dir2_sf_verify(ip);
+               if (error) {
+                       xfs_idestroy_fork(ip, XFS_DATA_FORK);
+                       return error;
+               }
+       }
+
        if (xfs_is_reflink_inode(ip)) {
                ASSERT(ip->i_cowfp == NULL);
                xfs_ifork_init_cow(ip);
@@ -322,8 +332,6 @@ xfs_iformat_local(
        int             whichfork,
        int             size)
 {
-       int             error;
-
        /*
         * If the size is unreasonable, then something
         * is wrong and we just bail out rather than crash in
@@ -339,14 +347,6 @@ xfs_iformat_local(
                return -EFSCORRUPTED;
        }
 
-       if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
-               error = xfs_dir2_sf_verify(ip->i_mount,
-                               (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip),
-                               size);
-               if (error)
-                       return error;
-       }
-
        xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
        return 0;
 }
@@ -867,7 +867,7 @@ xfs_iextents_copy(
  * In these cases, the format always takes precedence, because the
  * format indicates the current state of the fork.
  */
-int
+void
 xfs_iflush_fork(
        xfs_inode_t             *ip,
        xfs_dinode_t            *dip,
@@ -877,7 +877,6 @@ xfs_iflush_fork(
        char                    *cp;
        xfs_ifork_t             *ifp;
        xfs_mount_t             *mp;
-       int                     error;
        static const short      brootflag[2] =
                { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
        static const short      dataflag[2] =
@@ -886,7 +885,7 @@ xfs_iflush_fork(
                { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
 
        if (!iip)
-               return 0;
+               return;
        ifp = XFS_IFORK_PTR(ip, whichfork);
        /*
         * This can happen if we gave up in iformat in an error path,
@@ -894,19 +893,12 @@ xfs_iflush_fork(
         */
        if (!ifp) {
                ASSERT(whichfork == XFS_ATTR_FORK);
-               return 0;
+               return;
        }
        cp = XFS_DFORK_PTR(dip, whichfork);
        mp = ip->i_mount;
        switch (XFS_IFORK_FORMAT(ip, whichfork)) {
        case XFS_DINODE_FMT_LOCAL:
-               if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) {
-                       error = xfs_dir2_sf_verify(mp,
-                                       (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data,
-                                       ifp->if_bytes);
-                       if (error)
-                               return error;
-               }
                if ((iip->ili_fields & dataflag[whichfork]) &&
                    (ifp->if_bytes > 0)) {
                        ASSERT(ifp->if_u1.if_data != NULL);
@@ -959,7 +951,6 @@ xfs_iflush_fork(
                ASSERT(0);
                break;
        }
-       return 0;
 }
 
 /*
index 132dc59fdde6942cd22fca4ae11b8adbc193f051..7fb8365326d1a745583c4f133bc5a63668316b33 100644 (file)
@@ -140,7 +140,7 @@ typedef struct xfs_ifork {
 struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
 
 int            xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
-int            xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
+void           xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
                                struct xfs_inode_log_item *, int);
 void           xfs_idestroy_fork(struct xfs_inode *, int);
 void           xfs_idata_realloc(struct xfs_inode *, int, int);
index 8b75dcea596680f1332412870546181d2f0a579c..828532ce0adca80ddfa174540aae6aa06663302b 100644 (file)
@@ -1311,8 +1311,16 @@ xfs_free_file_space(
        /*
         * Now that we've unmap all full blocks we'll have to zero out any
         * partial block at the beginning and/or end.  xfs_zero_range is
-        * smart enough to skip any holes, including those we just created.
+        * smart enough to skip any holes, including those we just created,
+        * but we must take care not to zero beyond EOF and enlarge i_size.
         */
+
+       if (offset >= XFS_ISIZE(ip))
+               return 0;
+
+       if (offset + len > XFS_ISIZE(ip))
+               len = XFS_ISIZE(ip) - offset;
+
        return xfs_zero_range(ip, offset, len, NULL);
 }
 
index c7fe2c2123ab8375caf0e0349a454ed8b2762095..7605d83965963566e1d6acef679591cf9787d161 100644 (file)
@@ -50,6 +50,7 @@
 #include "xfs_log.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_reflink.h"
+#include "xfs_dir2_priv.h"
 
 kmem_zone_t *xfs_inode_zone;
 
@@ -3475,7 +3476,6 @@ xfs_iflush_int(
        struct xfs_inode_log_item *iip = ip->i_itemp;
        struct xfs_dinode       *dip;
        struct xfs_mount        *mp = ip->i_mount;
-       int                     error;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(xfs_isiflocked(ip));
@@ -3547,6 +3547,12 @@ xfs_iflush_int(
        if (ip->i_d.di_version < 3)
                ip->i_d.di_flushiter++;
 
+       /* Check the inline directory data. */
+       if (S_ISDIR(VFS_I(ip)->i_mode) &&
+           ip->i_d.di_format == XFS_DINODE_FMT_LOCAL &&
+           xfs_dir2_sf_verify(ip))
+               goto corrupt_out;
+
        /*
         * Copy the dirty parts of the inode into the on-disk inode.  We always
         * copy out the core of the inode, because if the inode is dirty at all
@@ -3558,14 +3564,9 @@ xfs_iflush_int(
        if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
                ip->i_d.di_flushiter = 0;
 
-       error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
-       if (error)
-               return error;
-       if (XFS_IFORK_Q(ip)) {
-               error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
-               if (error)
-                       return error;
-       }
+       xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
+       if (XFS_IFORK_Q(ip))
+               xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
        xfs_inobp_check(mp, bp);
 
        /*
index 229cc6a6d8ef03dc599b5195c5052796e2e19ba7..ebfc13350f9ae8b43b0229948588192560b51683 100644 (file)
@@ -516,6 +516,20 @@ xfs_vn_getattr(
        stat->blocks =
                XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
 
+       if (ip->i_d.di_version == 3) {
+               if (request_mask & STATX_BTIME) {
+                       stat->result_mask |= STATX_BTIME;
+                       stat->btime.tv_sec = ip->i_d.di_crtime.t_sec;
+                       stat->btime.tv_nsec = ip->i_d.di_crtime.t_nsec;
+               }
+       }
+
+       if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
+               stat->attributes |= STATX_ATTR_IMMUTABLE;
+       if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+               stat->attributes |= STATX_ATTR_APPEND;
+       if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
+               stat->attributes |= STATX_ATTR_NODUMP;
 
        switch (inode->i_mode & S_IFMT) {
        case S_IFBLK:
index 2a6d9b1558e00dca550a2d46f8a5a51b9661ec3a..26d67ce3c18d901a85e94836b9e823a5806ae807 100644 (file)
@@ -583,7 +583,7 @@ xfs_inumbers(
                return error;
 
        bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer)));
-       buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP);
+       buffer = kmem_zalloc(bcount * sizeof(*buffer), KM_SLEEP);
        do {
                struct xfs_inobt_rec_incore     r;
                int                             stat;
index 7cdfe167074f873a71dd51e04da3304864d2f063..143db9c523e25f38488bd43302b82f316e3124a9 100644 (file)
  */
 #ifndef RO_AFTER_INIT_DATA
 #define RO_AFTER_INIT_DATA                                             \
-       __start_ro_after_init = .;                                      \
+       VMLINUX_SYMBOL(__start_ro_after_init) = .;                      \
        *(.data..ro_after_init)                                         \
-       __end_ro_after_init = .;
+       VMLINUX_SYMBOL(__end_ro_after_init) = .;
 #endif
 
 /*
index 1d4f365d8f03a439ab0e20caf27ae34ad6e13e74..f6d9af3efa45a6cc8eb448a71f5d43d72d8fcbd1 100644 (file)
@@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance(
        return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
 }
 
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+       req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+       return req->base.flags;
+}
+
 static inline struct crypto_ahash *crypto_spawn_ahash(
        struct crypto_ahash_spawn *spawn)
 {
index ed953f98f0e1446ce01630c005e406f941282347..1487011fe057ba4b35271bc85e52c6b90291c2c3 100644 (file)
@@ -229,6 +229,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  * @ref_type: The type of reference.
  * @existed: Upon completion, indicates that an identical reference object
  * already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
  *
  * Checks that the base object is shareable and adds a ref object to it.
  *
@@ -243,7 +245,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
  */
 extern int ttm_ref_object_add(struct ttm_object_file *tfile,
                              struct ttm_base_object *base,
-                             enum ttm_ref_type ref_type, bool *existed);
+                             enum ttm_ref_type ref_type, bool *existed,
+                             bool require_existed);
 
 extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
                                  struct ttm_base_object *base);
index b72dd2ad5f440cfcbf86407cad5aed8e98835510..c0b3d999c266f2271d61bd58b042352580faf5d2 100644 (file)
@@ -295,6 +295,7 @@ void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
 int kvm_vgic_map_resources(struct kvm *kvm);
 int kvm_vgic_hyp_init(void);
+void kvm_vgic_init_cpu_hardware(void);
 
 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
                        bool level);
index b296a900611790e25639293fd51b0fa7a9beab41..9382c5da7a2edfbc7cbccca2ff1304cd042c3bd2 100644 (file)
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx {
 
        atomic_t                nr_active;
 
+       struct delayed_work     delayed_run_work;
        struct delayed_work     delay_work;
 
        struct hlist_node       cpuhp_dead;
@@ -238,6 +239,7 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
 void blk_mq_start_hw_queues(struct request_queue *q);
 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
index 5a7da607ca045f81a46e7b73bb31a8f1b978452a..01a696b0a4d3ae0118a2742c96d68775ef9f637a 100644 (file)
@@ -610,7 +610,6 @@ struct request_queue {
 #define QUEUE_FLAG_FLUSH_NQ    25      /* flush not queueuable */
 #define QUEUE_FLAG_DAX         26      /* device supports DAX */
 #define QUEUE_FLAG_STATS       27      /* track rq completion times */
-#define QUEUE_FLAG_RESTART     28      /* queue needs restart at completion */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -1673,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q,
        return true;
 }
 
-static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
-                        struct bio *next)
+static inline bool bio_will_gap(struct request_queue *q,
+                               struct request *prev_rq,
+                               struct bio *prev,
+                               struct bio *next)
 {
        if (bio_has_data(prev) && queue_virt_boundary(q)) {
                struct bio_vec pb, nb;
 
+               /*
+                * don't merge if the 1st bio starts with non-zero
+                * offset, otherwise it is quite difficult to respect
+                * sg gap limit. We work hard to merge a huge number of small
+                * single bios in case of mkfs.
+                */
+               if (prev_rq)
+                       bio_get_first_bvec(prev_rq->bio, &pb);
+               else
+                       bio_get_first_bvec(prev, &pb);
+               if (pb.bv_offset)
+                       return true;
+
+               /*
+                * We don't need to worry about the situation that the
+                * merged segment ends in unaligned virt boundary:
+                *
+                * - if 'pb' ends aligned, the merged segment ends aligned
+                * - if 'pb' ends unaligned, the next bio must include
+                *   one single bvec of 'nb', otherwise the 'nb' can't
+                *   merge with 'pb'
+                */
                bio_get_last_bvec(prev, &pb);
                bio_get_first_bvec(next, &nb);
 
@@ -1691,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
 
 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
 {
-       return bio_will_gap(req->q, req->biotail, bio);
+       return bio_will_gap(req->q, req, req->biotail, bio);
 }
 
 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
 {
-       return bio_will_gap(req->q, bio, req->bio);
+       return bio_will_gap(req->q, NULL, bio, req->bio);
 }
 
 int kblockd_schedule_work(struct work_struct *work);
index f6b43fbb141c9ad03c1e05880e8c2228743d076e..af9c86e958bdad3be90cfb5f19aad99ede457339 100644 (file)
@@ -570,6 +570,25 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
        pr_cont_kernfs_path(cgrp->kn);
 }
 
+static inline void cgroup_init_kthreadd(void)
+{
+       /*
+        * kthreadd is inherited by all kthreads, keep it in the root so
+        * that the new kthreads are guaranteed to stay in the root until
+        * initialization is finished.
+        */
+       current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+       /*
+        * This kthread finished initialization.  The creator should have
+        * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+        */
+       current->no_cgroup_migration = 0;
+}
+
 #else /* !CONFIG_CGROUPS */
 
 struct cgroup_subsys_state;
@@ -590,6 +609,8 @@ static inline void cgroup_free(struct task_struct *p) {}
 
 static inline int cgroup_init_early(void) { return 0; }
 static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
 
 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
                                               struct cgroup *ancestor)
index aebecc4ed088f45c189162e560095363ac0e6d2a..22d39e8d4de16be2b69762cf24c92799dfb445d6 100644 (file)
@@ -211,7 +211,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
 extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
 
 extern int elevator_init(struct request_queue *, char *);
-extern void elevator_exit(struct elevator_queue *);
+extern void elevator_exit(struct request_queue *, struct elevator_queue *);
 extern int elevator_change(struct request_queue *, const char *);
 extern bool elv_bio_merge_ok(struct request *, struct bio *);
 extern struct elevator_queue *elevator_alloc(struct request_queue *,
index eafc965b3eb8487b67ae0db870e74f0471199fc2..dc30f3d057eb0801e9ae8d22fc3ce11943d188d1 100644 (file)
@@ -96,6 +96,9 @@
 #define GICH_MISR_EOI                  (1 << 0)
 #define GICH_MISR_U                    (1 << 1)
 
+#define GICV_PMR_PRIORITY_SHIFT                3
+#define GICV_PMR_PRIORITY_MASK         (0x1f << GICV_PMR_PRIORITY_SHIFT)
+
 #ifndef __ASSEMBLY__
 
 #include <linux/irqdomain.h>
index 7a01c94496f14eac3c1ed13b37a9811cc1b6f853..3eef9fb9968ae730716a79bc9e3aef8be4e2e650 100644 (file)
  * Max bus-specific overhead incurred by request/responses.
  * I2C requires 1 additional byte for requests.
  * I2C requires 2 additional bytes for responses.
+ * SPI requires up to 32 additional bytes for responses.
  * */
 #define EC_PROTO_VERSION_UNKNOWN       0
 #define EC_MAX_REQUEST_OVERHEAD                1
-#define EC_MAX_RESPONSE_OVERHEAD       2
+#define EC_MAX_RESPONSE_OVERHEAD       32
 
 /*
  * Command interface between EC and AP, for LPC, I2C and SPI interfaces.
index 51891fb0d3ce075e9343495e54de6d8d03211ac9..c91b3bcd158f8fa8b2dbc3d4738ade5fdcc600cb 100644 (file)
@@ -394,18 +394,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
        ___pud;                                                         \
 })
 
-#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd)           \
-({                                                                     \
-       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
-       pmd_t ___pmd;                                                   \
-                                                                       \
-       ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd);         \
-       mmu_notifier_invalidate_range(__mm, ___haddr,                   \
-                                     ___haddr + HPAGE_PMD_SIZE);       \
-                                                                       \
-       ___pmd;                                                         \
-})
-
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -489,7 +477,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 #define        ptep_clear_flush_notify ptep_clear_flush
 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
-#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */
index c43d435d422552d029bd569157d9aa352348747f..9061780b141ff44c58a0ab5d5429dfbba8f1d2fb 100644 (file)
@@ -64,26 +64,26 @@ enum {
  * RDMA_QPTYPE field
  */
 enum {
-       NVMF_RDMA_QPTYPE_CONNECTED      = 0, /* Reliable Connected */
-       NVMF_RDMA_QPTYPE_DATAGRAM       = 1, /* Reliable Datagram */
+       NVMF_RDMA_QPTYPE_CONNECTED      = 1, /* Reliable Connected */
+       NVMF_RDMA_QPTYPE_DATAGRAM       = 2, /* Reliable Datagram */
 };
 
 /* RDMA QP Service Type codes for Discovery Log Page entry TSAS
  * RDMA_QPTYPE field
  */
 enum {
-       NVMF_RDMA_PRTYPE_NOT_SPECIFIED  = 0, /* No Provider Specified */
-       NVMF_RDMA_PRTYPE_IB             = 1, /* InfiniBand */
-       NVMF_RDMA_PRTYPE_ROCE           = 2, /* InfiniBand RoCE */
-       NVMF_RDMA_PRTYPE_ROCEV2         = 3, /* InfiniBand RoCEV2 */
-       NVMF_RDMA_PRTYPE_IWARP          = 4, /* IWARP */
+       NVMF_RDMA_PRTYPE_NOT_SPECIFIED  = 1, /* No Provider Specified */
+       NVMF_RDMA_PRTYPE_IB             = 2, /* InfiniBand */
+       NVMF_RDMA_PRTYPE_ROCE           = 3, /* InfiniBand RoCE */
+       NVMF_RDMA_PRTYPE_ROCEV2         = 4, /* InfiniBand RoCEV2 */
+       NVMF_RDMA_PRTYPE_IWARP          = 5, /* IWARP */
 };
 
 /* RDMA Connection Management Service Type codes for Discovery Log Page
  * entry TSAS RDMA_CMS field
  */
 enum {
-       NVMF_RDMA_CMS_RDMA_CM   = 0, /* Sockets based enpoint addressing */
+       NVMF_RDMA_CMS_RDMA_CM   = 1, /* Sockets based endpoint addressing */
 };
 
 #define NVMF_AQ_DEPTH          32
index 8ce2d87a238b84d432abca342f9920e42a8d0c42..5e45385c5bdc7af10cb9c75d53d933a16d6ba107 100644 (file)
@@ -145,8 +145,9 @@ struct pinctrl_desc {
 extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc,
                                     struct device *dev, void *driver_data,
                                     struct pinctrl_dev **pctldev);
+extern int pinctrl_enable(struct pinctrl_dev *pctldev);
 
-/* Please use pinctrl_register_and_init() instead */
+/* Please use pinctrl_register_and_init() and pinctrl_enable() instead */
 extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
                                struct device *dev, void *driver_data);
 
index 96fb139bdd08fdec3ad83e689ad2808375473126..13d8681210d545ab2dcedb472fa127b408446eae 100644 (file)
@@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc);
 struct reset_control *__of_reset_control_get(struct device_node *node,
                                     const char *id, int index, bool shared,
                                     bool optional);
+struct reset_control *__reset_control_get(struct device *dev, const char *id,
+                                         int index, bool shared,
+                                         bool optional);
 void reset_control_put(struct reset_control *rstc);
 struct reset_control *__devm_reset_control_get(struct device *dev,
                                     const char *id, int index, bool shared,
@@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get(
        return optional ? NULL : ERR_PTR(-ENOTSUPP);
 }
 
+static inline struct reset_control *__reset_control_get(
+                                       struct device *dev, const char *id,
+                                       int index, bool shared, bool optional)
+{
+       return optional ? NULL : ERR_PTR(-ENOTSUPP);
+}
+
 static inline struct reset_control *__devm_reset_control_get(
                                        struct device *dev, const char *id,
                                        int index, bool shared, bool optional)
@@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
 #ifndef CONFIG_RESET_CONTROLLER
        WARN_ON(1);
 #endif
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
-                                                                       false);
+       return __reset_control_get(dev, id, 0, false, false);
 }
 
 /**
@@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
 static inline struct reset_control *reset_control_get_shared(
                                        struct device *dev, const char *id)
 {
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
-                                                                       false);
+       return __reset_control_get(dev, id, 0, true, false);
 }
 
 static inline struct reset_control *reset_control_get_optional_exclusive(
                                        struct device *dev, const char *id)
 {
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false,
-                                                                       true);
+       return __reset_control_get(dev, id, 0, false, true);
 }
 
 static inline struct reset_control *reset_control_get_optional_shared(
                                        struct device *dev, const char *id)
 {
-       return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true,
-                                                                       true);
+       return __reset_control_get(dev, id, 0, true, true);
 }
 
 /**
index d67eee84fd430f3c44b77d4ba007ec5d2dcabb2b..4cf9a59a4d08ed181f30d7cefa56db92f48408d2 100644 (file)
@@ -604,6 +604,10 @@ struct task_struct {
 #ifdef CONFIG_COMPAT_BRK
        unsigned                        brk_randomized:1;
 #endif
+#ifdef CONFIG_CGROUPS
+       /* disallow userland-initiated cgroup migration */
+       unsigned                        no_cgroup_migration:1;
+#endif
 
        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 
index c76e524fb34b6af362c82103662454750826ac9e..64b6b3aece21aee52a2b7c9246862afb41d606f1 100644 (file)
@@ -26,6 +26,7 @@ struct kstat {
        unsigned int    nlink;
        uint32_t        blksize;        /* Preferred I/O size */
        u64             attributes;
+       u64             attributes_mask;
 #define KSTAT_ATTR_FS_IOC_FLAGS                                \
        (STATX_ATTR_COMPRESSED |                        \
         STATX_ATTR_IMMUTABLE |                         \
index 804e34c6f981de7402ba6a39bd0375b3a3dd0efe..f2d36a3d30052db827fab68e9278fc1b693068ca 100644 (file)
@@ -39,7 +39,10 @@ struct iov_iter {
        };
        union {
                unsigned long nr_segs;
-               int idx;
+               struct {
+                       int idx;
+                       int start_idx;
+               };
        };
 };
 
@@ -81,6 +84,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
 size_t iov_iter_copy_from_user_atomic(struct page *page,
                struct iov_iter *i, unsigned long offset, size_t bytes);
 void iov_iter_advance(struct iov_iter *i, size_t bytes);
+void iov_iter_revert(struct iov_iter *i, size_t bytes);
 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
 size_t iov_iter_single_seg_count(const struct iov_iter *i);
 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
index 04b0d3f95043c66856c6a6f4cab078c4791d35e8..7edfbdb55a995d436bf9e999ce202d0ca0bf2550 100644 (file)
@@ -167,6 +167,7 @@ struct virtio_driver {
        unsigned int feature_table_size;
        const unsigned int *feature_table_legacy;
        unsigned int feature_table_size_legacy;
+       int (*validate)(struct virtio_device *dev);
        int (*probe)(struct virtio_device *dev);
        void (*scan)(struct virtio_device *dev);
        void (*remove)(struct virtio_device *dev);
index 1f71ee5ab518410ecd2fe0cab6064681025b43b3..069582ee5d7fd5b0e92edea68cb2406fbbe6db00 100644 (file)
@@ -448,10 +448,9 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
        return frag;
 }
 
-static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
+static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
 {
-
-       sctp_assoc_sync_pmtu(sk, asoc);
+       sctp_assoc_sync_pmtu(asoc);
        asoc->pmtu_pending = 0;
 }
 
@@ -596,12 +595,23 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
  */
 static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
 {
-       if (t->dst && (!dst_check(t->dst, t->dst_cookie) ||
-                      t->pathmtu != max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
-                                          SCTP_DEFAULT_MINSEGMENT)))
+       if (t->dst && !dst_check(t->dst, t->dst_cookie))
                sctp_transport_dst_release(t);
 
        return t->dst;
 }
 
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+       __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
+                          SCTP_DEFAULT_MINSEGMENT);
+
+       if (t->pathmtu == pmtu)
+               return true;
+
+       t->pathmtu = pmtu;
+
+       return false;
+}
+
 #endif /* __net_sctp_h__ */
index 592decebac752ffca16fddbc75cbf14f9283d125..138f8615acf0993d8015f6c1d2eee32966dccad0 100644 (file)
@@ -377,7 +377,8 @@ typedef struct sctp_sender_hb_info {
        __u64 hb_nonce;
 } sctp_sender_hb_info_t;
 
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp);
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp);
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp);
 void sctp_stream_free(struct sctp_stream *stream);
 void sctp_stream_clear(struct sctp_stream *stream);
 
@@ -499,7 +500,6 @@ struct sctp_datamsg {
        /* Did the messenge fail to send? */
        int send_error;
        u8 send_failed:1,
-          force_delay:1,
           can_delay;       /* should this message be Nagle delayed */
 };
 
@@ -952,8 +952,8 @@ void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t);
 void sctp_transport_burst_limited(struct sctp_transport *);
 void sctp_transport_burst_reset(struct sctp_transport *);
 unsigned long sctp_transport_timeout(struct sctp_transport *);
-void sctp_transport_reset(struct sctp_transport *);
-void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32);
+void sctp_transport_reset(struct sctp_transport *t);
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
 void sctp_transport_dst_release(struct sctp_transport *t);
 void sctp_transport_dst_confirm(struct sctp_transport *t);
@@ -1878,6 +1878,7 @@ struct sctp_association {
 
        __u8 need_ecne:1,       /* Need to send an ECNE Chunk? */
             temp:1,            /* Is it a temporary association? */
+            force_delay:1,
             prsctp_enable:1,
             reconf_enable:1;
 
@@ -1953,7 +1954,7 @@ void sctp_assoc_update(struct sctp_association *old,
 
 __u32 sctp_association_get_next_tsn(struct sctp_association *);
 
-void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc);
 void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
 void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
 void sctp_assoc_set_primary(struct sctp_association *,
index 4b784b6e21c0d9cb533b31997883d7dd447343bf..ccfad0e9c2cdbd68f13c809c7ed6414b2c0c97c1 100644 (file)
@@ -117,6 +117,7 @@ enum transport_state_table {
        TRANSPORT_ISTATE_PROCESSING = 11,
        TRANSPORT_COMPLETE_QF_WP = 18,
        TRANSPORT_COMPLETE_QF_OK = 19,
+       TRANSPORT_COMPLETE_QF_ERR = 20,
 };
 
 /* Used for struct se_cmd->se_cmd_flags */
@@ -279,8 +280,6 @@ struct t10_alua_tg_pt_gp {
        u16     tg_pt_gp_id;
        int     tg_pt_gp_valid_id;
        int     tg_pt_gp_alua_supported_states;
-       int     tg_pt_gp_alua_pending_state;
-       int     tg_pt_gp_alua_previous_state;
        int     tg_pt_gp_alua_access_status;
        int     tg_pt_gp_alua_access_type;
        int     tg_pt_gp_nonop_delay_msecs;
@@ -289,18 +288,16 @@ struct t10_alua_tg_pt_gp {
        int     tg_pt_gp_pref;
        int     tg_pt_gp_write_metadata;
        u32     tg_pt_gp_members;
-       atomic_t tg_pt_gp_alua_access_state;
+       int     tg_pt_gp_alua_access_state;
        atomic_t tg_pt_gp_ref_cnt;
        spinlock_t tg_pt_gp_lock;
-       struct mutex tg_pt_gp_md_mutex;
+       struct mutex tg_pt_gp_transition_mutex;
        struct se_device *tg_pt_gp_dev;
        struct config_group tg_pt_gp_group;
        struct list_head tg_pt_gp_list;
        struct list_head tg_pt_gp_lun_list;
        struct se_lun *tg_pt_gp_alua_lun;
        struct se_node_acl *tg_pt_gp_alua_nacl;
-       struct work_struct tg_pt_gp_transition_work;
-       struct completion *tg_pt_gp_transition_complete;
 };
 
 struct t10_vpd {
@@ -705,6 +702,7 @@ struct se_lun {
        u64                     unpacked_lun;
 #define SE_LUN_LINK_MAGIC                      0xffff7771
        u32                     lun_link_magic;
+       bool                    lun_shutdown;
        bool                    lun_access_ro;
        u32                     lun_index;
 
index dd9820b1c7796b87986443124ad18907b8b719c3..f8d9fed17ba99418d858ceb57a864b31a00d078a 100644 (file)
@@ -445,6 +445,7 @@ header-y += unistd.h
 header-y += unix_diag.h
 header-y += usbdevice_fs.h
 header-y += usbip.h
+header-y += userio.h
 header-y += utime.h
 header-y += utsname.h
 header-y += uuid.h
index 51a6b86e370043f7b37f128c4896f5f588710643..d538897b8e08bb4358c56148c189fc7b8128a9da 100644 (file)
@@ -114,7 +114,7 @@ struct statx {
        __u64   stx_ino;        /* Inode number */
        __u64   stx_size;       /* File size */
        __u64   stx_blocks;     /* Number of 512-byte blocks allocated */
-       __u64   __spare1[1];
+       __u64   stx_attributes_mask; /* Mask to show what's supported in stx_attributes */
        /* 0x40 */
        struct statx_timestamp  stx_atime;      /* Last access time */
        struct statx_timestamp  stx_btime;      /* File creation time */
@@ -152,9 +152,10 @@ struct statx {
 #define STATX_BASIC_STATS      0x000007ffU     /* The stuff in the normal stat struct */
 #define STATX_BTIME            0x00000800U     /* Want/got stx_btime */
 #define STATX_ALL              0x00000fffU     /* All currently supported flags */
+#define STATX__RESERVED                0x80000000U     /* Reserved for future struct statx expansion */
 
 /*
- * Attributes to be found in stx_attributes
+ * Attributes to be found in stx_attributes and masked in stx_attributes_mask.
  *
  * These give information about the features or the state of a file that might
  * be of use to ordinary userspace programs such as GUIs or ls rather than
index 15b4385a2be169e9b99221f31a444820feadae3d..90007a1abcab144ac3d6ac7d6e6f4001d58abb14 100644 (file)
@@ -79,7 +79,7 @@
  * configuration space */
 #define VIRTIO_PCI_CONFIG_OFF(msix_enabled)    ((msix_enabled) ? 24 : 20)
 /* Deprecated: please use VIRTIO_PCI_CONFIG_OFF instead */
-#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->pci_dev->msix_enabled)
+#define VIRTIO_PCI_CONFIG(dev) VIRTIO_PCI_CONFIG_OFF((dev)->msix_enabled)
 
 /* Virtio ABI version, this must match exactly */
 #define VIRTIO_PCI_ABI_VERSION         0
index 2f4964cfde0b4f142778c199e606dc3d519ebbf0..a871bf80fde1adc79040936475b7045971e72d76 100644 (file)
@@ -160,7 +160,6 @@ static LIST_HEAD(audit_freelist);
 
 /* queue msgs to send via kauditd_task */
 static struct sk_buff_head audit_queue;
-static void kauditd_hold_skb(struct sk_buff *skb);
 /* queue msgs due to temporary unicast send problems */
 static struct sk_buff_head audit_retry_queue;
 /* queue msgs waiting for new auditd connection */
@@ -453,30 +452,6 @@ static void auditd_set(int pid, u32 portid, struct net *net)
        spin_unlock_irqrestore(&auditd_conn.lock, flags);
 }
 
-/**
- * auditd_reset - Disconnect the auditd connection
- *
- * Description:
- * Break the auditd/kauditd connection and move all the queued records into the
- * hold queue in case auditd reconnects.
- */
-static void auditd_reset(void)
-{
-       struct sk_buff *skb;
-
-       /* if it isn't already broken, break the connection */
-       rcu_read_lock();
-       if (auditd_conn.pid)
-               auditd_set(0, 0, NULL);
-       rcu_read_unlock();
-
-       /* flush all of the main and retry queues to the hold queue */
-       while ((skb = skb_dequeue(&audit_retry_queue)))
-               kauditd_hold_skb(skb);
-       while ((skb = skb_dequeue(&audit_queue)))
-               kauditd_hold_skb(skb);
-}
-
 /**
  * kauditd_print_skb - Print the audit record to the ring buffer
  * @skb: audit record
@@ -505,9 +480,6 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
 {
        /* put the record back in the queue at the same place */
        skb_queue_head(&audit_hold_queue, skb);
-
-       /* fail the auditd connection */
-       auditd_reset();
 }
 
 /**
@@ -544,9 +516,6 @@ static void kauditd_hold_skb(struct sk_buff *skb)
        /* we have no other options - drop the message */
        audit_log_lost("kauditd hold queue overflow");
        kfree_skb(skb);
-
-       /* fail the auditd connection */
-       auditd_reset();
 }
 
 /**
@@ -566,6 +535,30 @@ static void kauditd_retry_skb(struct sk_buff *skb)
        skb_queue_tail(&audit_retry_queue, skb);
 }
 
+/**
+ * auditd_reset - Disconnect the auditd connection
+ *
+ * Description:
+ * Break the auditd/kauditd connection and move all the queued records into the
+ * hold queue in case auditd reconnects.
+ */
+static void auditd_reset(void)
+{
+       struct sk_buff *skb;
+
+       /* if it isn't already broken, break the connection */
+       rcu_read_lock();
+       if (auditd_conn.pid)
+               auditd_set(0, 0, NULL);
+       rcu_read_unlock();
+
+       /* flush all of the main and retry queues to the hold queue */
+       while ((skb = skb_dequeue(&audit_retry_queue)))
+               kauditd_hold_skb(skb);
+       while ((skb = skb_dequeue(&audit_queue)))
+               kauditd_hold_skb(skb);
+}
+
 /**
  * auditd_send_unicast_skb - Send a record via unicast to auditd
  * @skb: audit record
@@ -758,6 +751,7 @@ static int kauditd_thread(void *dummy)
                                        NULL, kauditd_rehold_skb);
                if (rc < 0) {
                        sk = NULL;
+                       auditd_reset();
                        goto main_queue;
                }
 
@@ -767,6 +761,7 @@ static int kauditd_thread(void *dummy)
                                        NULL, kauditd_hold_skb);
                if (rc < 0) {
                        sk = NULL;
+                       auditd_reset();
                        goto main_queue;
                }
 
@@ -775,16 +770,18 @@ main_queue:
                 * unicast, dump failed record sends to the retry queue; if
                 * sk == NULL due to previous failures we will just do the
                 * multicast send and move the record to the retry queue */
-               kauditd_send_queue(sk, portid, &audit_queue, 1,
-                                  kauditd_send_multicast_skb,
-                                  kauditd_retry_skb);
+               rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
+                                       kauditd_send_multicast_skb,
+                                       kauditd_retry_skb);
+               if (sk == NULL || rc < 0)
+                       auditd_reset();
+               sk = NULL;
 
                /* drop our netns reference, no auditd sends past this line */
                if (net) {
                        put_net(net);
                        net = NULL;
                }
-               sk = NULL;
 
                /* we have processed all the queues so wake everyone */
                wake_up(&audit_backlog_wait);
index 0f1cf6d1878ab34a65d72644175ed50055df9a09..0d87f8ab8778579dde21754e4ed647286f34e3ef 100644 (file)
@@ -333,13 +333,7 @@ extern u32 audit_sig_sid;
 extern int audit_filter(int msgtype, unsigned int listtype);
 
 #ifdef CONFIG_AUDITSYSCALL
-extern int __audit_signal_info(int sig, struct task_struct *t);
-static inline int audit_signal_info(int sig, struct task_struct *t)
-{
-       if (auditd_test_task(t) || (audit_signals && !audit_dummy_context()))
-               return __audit_signal_info(sig, t);
-       return 0;
-}
+extern int audit_signal_info(int sig, struct task_struct *t);
 extern void audit_filter_inodes(struct task_struct *, struct audit_context *);
 extern struct list_head *audit_killed_trees(void);
 #else
index e59ffc7fc522ad9d057c34339807dfe35845d45e..1c2333155893fac54138a8b3f7311c2ca520992c 100644 (file)
@@ -2249,26 +2249,27 @@ void __audit_ptrace(struct task_struct *t)
  * If the audit subsystem is being terminated, record the task (pid)
  * and uid that is doing that.
  */
-int __audit_signal_info(int sig, struct task_struct *t)
+int audit_signal_info(int sig, struct task_struct *t)
 {
        struct audit_aux_data_pids *axp;
        struct task_struct *tsk = current;
        struct audit_context *ctx = tsk->audit_context;
        kuid_t uid = current_uid(), t_uid = task_uid(t);
 
-       if (auditd_test_task(t)) {
-               if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
-                       audit_sig_pid = task_tgid_nr(tsk);
-                       if (uid_valid(tsk->loginuid))
-                               audit_sig_uid = tsk->loginuid;
-                       else
-                               audit_sig_uid = uid;
-                       security_task_getsecid(tsk, &audit_sig_sid);
-               }
-               if (!audit_signals || audit_dummy_context())
-                       return 0;
+       if (auditd_test_task(t) &&
+           (sig == SIGTERM || sig == SIGHUP ||
+            sig == SIGUSR1 || sig == SIGUSR2)) {
+               audit_sig_pid = task_tgid_nr(tsk);
+               if (uid_valid(tsk->loginuid))
+                       audit_sig_uid = tsk->loginuid;
+               else
+                       audit_sig_uid = uid;
+               security_task_getsecid(tsk, &audit_sig_sid);
        }
 
+       if (!audit_signals || audit_dummy_context())
+               return 0;
+
        /* optimize the common case by putting first signal recipient directly
         * in audit_context */
        if (!ctx->target_pid) {
index f45827e205d3f491a818a024dca5122c68924697..b4f1cb0c5ac7104c3f12f9ed5c1e3fe159c57824 100644 (file)
@@ -1162,12 +1162,12 @@ out:
        LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
                off = IMM;
 load_word:
-               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
-                * only appearing in the programs where ctx ==
-                * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
-                * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
-                * internal BPF verifier will check that BPF_R6 ==
-                * ctx.
+               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
+                * appearing in the programs where ctx == skb
+                * (see may_access_skb() in the verifier). All programs
+                * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
+                * bpf_convert_filter() saves it in BPF_R6, internal BPF
+                * verifier will check that BPF_R6 == ctx.
                 *
                 * BPF_ABS and BPF_IND are wrappers of function calls,
                 * so they scratch BPF_R1-BPF_R5 registers, preserve
index 796b68d001198a39186cba850fe8161476a17bfa..a834068a400e279f963097489ed165c6ad1301b2 100644 (file)
@@ -765,38 +765,56 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
        }
 }
 
-static int check_ptr_alignment(struct bpf_verifier_env *env,
-                              struct bpf_reg_state *reg, int off, int size)
+static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
+                                  int off, int size)
 {
-       if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_MAP_VALUE_ADJ) {
-               if (off % size != 0) {
-                       verbose("misaligned access off %d size %d\n",
-                               off, size);
-                       return -EACCES;
-               } else {
-                       return 0;
-               }
-       }
-
-       if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
-               /* misaligned access to packet is ok on x86,arm,arm64 */
-               return 0;
-
        if (reg->id && size != 1) {
-               verbose("Unknown packet alignment. Only byte-sized access allowed\n");
+               verbose("Unknown alignment. Only byte-sized access allowed in packet access.\n");
                return -EACCES;
        }
 
        /* skb->data is NET_IP_ALIGN-ed */
-       if (reg->type == PTR_TO_PACKET &&
-           (NET_IP_ALIGN + reg->off + off) % size != 0) {
+       if ((NET_IP_ALIGN + reg->off + off) % size != 0) {
                verbose("misaligned packet access off %d+%d+%d size %d\n",
                        NET_IP_ALIGN, reg->off, off, size);
                return -EACCES;
        }
+
        return 0;
 }
 
+static int check_val_ptr_alignment(const struct bpf_reg_state *reg,
+                                  int size)
+{
+       if (size != 1) {
+               verbose("Unknown alignment. Only byte-sized access allowed in value access.\n");
+               return -EACCES;
+       }
+
+       return 0;
+}
+
+static int check_ptr_alignment(const struct bpf_reg_state *reg,
+                              int off, int size)
+{
+       switch (reg->type) {
+       case PTR_TO_PACKET:
+               return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+                      check_pkt_ptr_alignment(reg, off, size);
+       case PTR_TO_MAP_VALUE_ADJ:
+               return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ? 0 :
+                      check_val_ptr_alignment(reg, size);
+       default:
+               if (off % size != 0) {
+                       verbose("misaligned access off %d size %d\n",
+                               off, size);
+                       return -EACCES;
+               }
+
+               return 0;
+       }
+}
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -818,7 +836,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
        if (size < 0)
                return size;
 
-       err = check_ptr_alignment(env, reg, off, size);
+       err = check_ptr_alignment(reg, off, size);
        if (err)
                return err;
 
@@ -1925,6 +1943,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                 * register as unknown.
                 */
                if (env->allow_ptr_leaks &&
+                   BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
                    (dst_reg->type == PTR_TO_MAP_VALUE ||
                     dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
                        dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
@@ -1973,14 +1992,15 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
 
        for (i = 0; i < MAX_BPF_REG; i++)
                if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
-                       regs[i].range = dst_reg->off;
+                       /* keep the maximum range already checked */
+                       regs[i].range = max(regs[i].range, dst_reg->off);
 
        for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
                if (state->stack_slot_type[i] != STACK_SPILL)
                        continue;
                reg = &state->spilled_regs[i / BPF_REG_SIZE];
                if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
-                       reg->range = dst_reg->off;
+                       reg->range = max(reg->range, dst_reg->off);
        }
 }
 
index 48851327a15e18e8ba151a3a45c5126c5023ddb8..687f5e0194efccbadce199c0570cd08b8c96181a 100644 (file)
@@ -2425,11 +2425,12 @@ ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
                tsk = tsk->group_leader;
 
        /*
-        * Workqueue threads may acquire PF_NO_SETAFFINITY and become
-        * trapped in a cpuset, or RT worker may be born in a cgroup
-        * with no rt_runtime allocated.  Just say no.
+        * kthreads may acquire PF_NO_SETAFFINITY during initialization.
+        * If userland migrates such a kthread to a non-root cgroup, it can
+        * become trapped in a cpuset, or RT kthread may be born in a
+        * cgroup with no rt_runtime allocated.  Just say no.
         */
-       if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
+       if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
                ret = -EINVAL;
                goto out_unlock_rcu;
        }
index 4544b115f5eb85d4b01ec966f933f191cd2cae1e..d052947fe78507ac80bbe58a09cc612bca7bab73 100644 (file)
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
 struct cpumask *
 irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
 {
-       int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec;
+       int n, nodes, cpus_per_vec, extra_vecs, curvec;
        int affv = nvecs - affd->pre_vectors - affd->post_vectors;
        int last_affv = affv + affd->pre_vectors;
        nodemask_t nodemsk = NODE_MASK_NONE;
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                goto done;
        }
 
-       /* Spread the vectors per node */
-       vecs_per_node = affv / nodes;
-       /* Account for rounding errors */
-       extra_vecs = affv - (nodes * vecs_per_node);
-
        for_each_node_mask(n, nodemsk) {
-               int ncpus, v, vecs_to_assign = vecs_per_node;
+               int ncpus, v, vecs_to_assign, vecs_per_node;
+
+               /* Spread the vectors per node */
+               vecs_per_node = (affv - curvec) / nodes;
 
                /* Get the cpus on this node which are in the mask */
                cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n));
 
                /* Calculate the number of cpus per vector */
                ncpus = cpumask_weight(nmsk);
+               vecs_to_assign = min(vecs_per_node, ncpus);
+
+               /* Account for rounding errors */
+               extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign);
 
                for (v = 0; curvec < last_affv && v < vecs_to_assign;
                     curvec++, v++) {
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
                        /* Account for extra vectors to compensate rounding errors */
                        if (extra_vecs) {
                                cpus_per_vec++;
-                               if (!--extra_vecs)
-                                       vecs_per_node++;
+                               --extra_vecs;
                        }
                        irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
                }
 
                if (curvec >= last_affv)
                        break;
+               --nodes;
        }
 
 done:
index 2f26adea0f84d21f4dae6d1ffdbbf94a73f40c67..26db528c1d881bf371ea5b53b7ade0815c990bf1 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/freezer.h>
 #include <linux/ptrace.h>
 #include <linux/uaccess.h>
+#include <linux/cgroup.h>
 #include <trace/events/sched.h>
 
 static DEFINE_SPINLOCK(kthread_create_lock);
@@ -225,6 +226,7 @@ static int kthread(void *_create)
 
        ret = -EINTR;
        if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
+               cgroup_kthread_ready();
                __kthread_parkme(self);
                ret = threadfn(data);
        }
@@ -538,6 +540,7 @@ int kthreadd(void *unused)
        set_mems_allowed(node_states[N_MEMORY]);
 
        current->flags |= PF_NOFREEZE;
+       cgroup_init_kthreadd();
 
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
index 0af9287121746d1b198429d52a99e862c4f1a8f0..266ddcc1d8bbbc6af7bceda3657618beef2a9c59 100644 (file)
@@ -184,11 +184,17 @@ static void ptrace_unfreeze_traced(struct task_struct *task)
 
        WARN_ON(!task->ptrace || task->parent != current);
 
+       /*
+        * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
+        * Recheck state under the lock to close this race.
+        */
        spin_lock_irq(&task->sighand->siglock);
-       if (__fatal_signal_pending(task))
-               wake_up_state(task, __TASK_TRACED);
-       else
-               task->state = TASK_TRACED;
+       if (task->state == __TASK_TRACED) {
+               if (__fatal_signal_pending(task))
+                       wake_up_state(task, __TASK_TRACED);
+               else
+                       task->state = TASK_TRACED;
+       }
        spin_unlock_irq(&task->sighand->siglock);
 }
 
index acf0a5a06da7c0c47003982e83bb74fcd4ebba5f..8c8714fcb53c35a390becf14f2fa8e1bfc20142c 100644 (file)
@@ -2133,9 +2133,12 @@ static int do_proc_douintvec_conv(bool *negp, unsigned long *lvalp,
        if (write) {
                if (*negp)
                        return -EINVAL;
+               if (*lvalp > UINT_MAX)
+                       return -EINVAL;
                *valp = *lvalp;
        } else {
                unsigned int val = *valp;
+               *negp = false;
                *lvalp = (unsigned long)val;
        }
        return 0;
index e68604ae3cedf41ce98bc06de2142629fa115cbd..60abc44385b795de7449b3810e9e294b7d2c48fc 100644 (file)
@@ -786,6 +786,68 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
 }
 EXPORT_SYMBOL(iov_iter_advance);
 
+void iov_iter_revert(struct iov_iter *i, size_t unroll)
+{
+       if (!unroll)
+               return;
+       i->count += unroll;
+       if (unlikely(i->type & ITER_PIPE)) {
+               struct pipe_inode_info *pipe = i->pipe;
+               int idx = i->idx;
+               size_t off = i->iov_offset;
+               while (1) {
+                       size_t n = off - pipe->bufs[idx].offset;
+                       if (unroll < n) {
+                               off -= (n - unroll);
+                               break;
+                       }
+                       unroll -= n;
+                       if (!unroll && idx == i->start_idx) {
+                               off = 0;
+                               break;
+                       }
+                       if (!idx--)
+                               idx = pipe->buffers - 1;
+                       off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
+               }
+               i->iov_offset = off;
+               i->idx = idx;
+               pipe_truncate(i);
+               return;
+       }
+       if (unroll <= i->iov_offset) {
+               i->iov_offset -= unroll;
+               return;
+       }
+       unroll -= i->iov_offset;
+       if (i->type & ITER_BVEC) {
+               const struct bio_vec *bvec = i->bvec;
+               while (1) {
+                       size_t n = (--bvec)->bv_len;
+                       i->nr_segs++;
+                       if (unroll <= n) {
+                               i->bvec = bvec;
+                               i->iov_offset = n - unroll;
+                               return;
+                       }
+                       unroll -= n;
+               }
+       } else { /* same logics for iovec and kvec */
+               const struct iovec *iov = i->iov;
+               while (1) {
+                       size_t n = (--iov)->iov_len;
+                       i->nr_segs++;
+                       if (unroll <= n) {
+                               i->iov = iov;
+                               i->iov_offset = n - unroll;
+                               return;
+                       }
+                       unroll -= n;
+               }
+       }
+}
+EXPORT_SYMBOL(iov_iter_revert);
+
 /*
  * Return the count of just the current iov_iter segment.
  */
@@ -839,6 +901,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
        i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
        i->iov_offset = 0;
        i->count = count;
+       i->start_idx = i->idx;
 }
 EXPORT_SYMBOL(iov_iter_pipe);
 
index 1ebc93e179f3eab40cf469fd67a361ea43a11368..f3c4f9d22821f889104340332eee93c5e124df4d 100644 (file)
@@ -240,18 +240,18 @@ static ssize_t defrag_store(struct kobject *kobj,
                clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
                clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
                set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
-       } else if (!memcmp("defer", buf,
-                   min(sizeof("defer")-1, count))) {
-               clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
-               clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
-               clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
-               set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
        } else if (!memcmp("defer+madvise", buf,
                    min(sizeof("defer+madvise")-1, count))) {
                clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
                clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
                clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
                set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+       } else if (!memcmp("defer", buf,
+                   min(sizeof("defer")-1, count))) {
+               clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
+               clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
+               clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
+               set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
        } else if (!memcmp("madvise", buf,
                           min(sizeof("madvise")-1, count))) {
                clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
@@ -1568,8 +1568,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                deactivate_page(page);
 
        if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) {
-               orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
-                       tlb->fullmm);
+               pmdp_invalidate(vma, addr, pmd);
                orig_pmd = pmd_mkold(orig_pmd);
                orig_pmd = pmd_mkclean(orig_pmd);
 
@@ -1724,37 +1723,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
        struct mm_struct *mm = vma->vm_mm;
        spinlock_t *ptl;
-       int ret = 0;
+       pmd_t entry;
+       bool preserve_write;
+       int ret;
 
        ptl = __pmd_trans_huge_lock(pmd, vma);
-       if (ptl) {
-               pmd_t entry;
-               bool preserve_write = prot_numa && pmd_write(*pmd);
-               ret = 1;
+       if (!ptl)
+               return 0;
 
-               /*
-                * Avoid trapping faults against the zero page. The read-only
-                * data is likely to be read-cached on the local CPU and
-                * local/remote hits to the zero page are not interesting.
-                */
-               if (prot_numa && is_huge_zero_pmd(*pmd)) {
-                       spin_unlock(ptl);
-                       return ret;
-               }
+       preserve_write = prot_numa && pmd_write(*pmd);
+       ret = 1;
 
-               if (!prot_numa || !pmd_protnone(*pmd)) {
-                       entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
-                       entry = pmd_modify(entry, newprot);
-                       if (preserve_write)
-                               entry = pmd_mk_savedwrite(entry);
-                       ret = HPAGE_PMD_NR;
-                       set_pmd_at(mm, addr, pmd, entry);
-                       BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
-                                       pmd_write(entry));
-               }
-               spin_unlock(ptl);
-       }
+       /*
+        * Avoid trapping faults against the zero page. The read-only
+        * data is likely to be read-cached on the local CPU and
+        * local/remote hits to the zero page are not interesting.
+        */
+       if (prot_numa && is_huge_zero_pmd(*pmd))
+               goto unlock;
+
+       if (prot_numa && pmd_protnone(*pmd))
+               goto unlock;
+
+       /*
+        * In case prot_numa, we are under down_read(mmap_sem). It's critical
+        * to not clear pmd intermittently to avoid race with MADV_DONTNEED
+        * which is also under down_read(mmap_sem):
+        *
+        *      CPU0:                           CPU1:
+        *                              change_huge_pmd(prot_numa=1)
+        *                               pmdp_huge_get_and_clear_notify()
+        * madvise_dontneed()
+        *  zap_pmd_range()
+        *   pmd_trans_huge(*pmd) == 0 (without ptl)
+        *   // skip the pmd
+        *                               set_pmd_at();
+        *                               // pmd is re-established
+        *
+        * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
+        * which may break userspace.
+        *
+        * pmdp_invalidate() is required to make sure we don't miss
+        * dirty/young flags set by hardware.
+        */
+       entry = *pmd;
+       pmdp_invalidate(vma, addr, pmd);
 
+       /*
+        * Recover dirty/young flags.  It relies on pmdp_invalidate to not
+        * corrupt them.
+        */
+       if (pmd_dirty(*pmd))
+               entry = pmd_mkdirty(entry);
+       if (pmd_young(*pmd))
+               entry = pmd_mkyoung(entry);
+
+       entry = pmd_modify(entry, newprot);
+       if (preserve_write)
+               entry = pmd_mk_savedwrite(entry);
+       ret = HPAGE_PMD_NR;
+       set_pmd_at(mm, addr, pmd, entry);
+       BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
+unlock:
+       spin_unlock(ptl);
        return ret;
 }
 
index ccfc2a2969f4402bdbfb27e0b48df151f4da68b7..266efaeaa370a46debcc5b6b614a72e33833ac4d 100644 (file)
@@ -481,6 +481,13 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
 enum ttu_flags;
 struct tlbflush_unmap_batch;
 
+
+/*
+ * only for MM internal work items which do not depend on
+ * any allocations or locks which might depend on allocations
+ */
+extern struct workqueue_struct *mm_percpu_wq;
+
 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 void try_to_unmap_flush(void);
 void try_to_unmap_flush_dirty(void);
index 75b2745bac4145933a5b969e7b8b2a36fc73d45d..37d0b334bfe9f09222a9626a99b91f4bfca9d784 100644 (file)
@@ -1529,7 +1529,6 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
                       compat_ulong_t, maxnode)
 {
-       long err = 0;
        unsigned long __user *nm = NULL;
        unsigned long nr_bits, alloc_size;
        DECLARE_BITMAP(bm, MAX_NUMNODES);
@@ -1538,14 +1537,13 @@ COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
 
        if (nmask) {
-               err = compat_get_bitmap(bm, nmask, nr_bits);
+               if (compat_get_bitmap(bm, nmask, nr_bits))
+                       return -EFAULT;
                nm = compat_alloc_user_space(alloc_size);
-               err |= copy_to_user(nm, bm, alloc_size);
+               if (copy_to_user(nm, bm, alloc_size))
+                       return -EFAULT;
        }
 
-       if (err)
-               return -EFAULT;
-
        return sys_set_mempolicy(mode, nm, nr_bits+1);
 }
 
@@ -1553,7 +1551,6 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
                       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
                       compat_ulong_t, maxnode, compat_ulong_t, flags)
 {
-       long err = 0;
        unsigned long __user *nm = NULL;
        unsigned long nr_bits, alloc_size;
        nodemask_t bm;
@@ -1562,14 +1559,13 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
 
        if (nmask) {
-               err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
+               if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
+                       return -EFAULT;
                nm = compat_alloc_user_space(alloc_size);
-               err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
+               if (copy_to_user(nm, nodes_addr(bm), alloc_size))
+                       return -EFAULT;
        }
 
-       if (err)
-               return -EFAULT;
-
        return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
 }
 
index 6cbde310abed8df22f9cd6ed80fcc252f4c80f43..f3d603cef2c0c0e5aef09540dd2f8d50da5a808c 100644 (file)
@@ -2373,6 +2373,13 @@ void drain_all_pages(struct zone *zone)
         */
        static cpumask_t cpus_with_pcps;
 
+       /*
+        * Make sure nobody triggers this path before mm_percpu_wq is fully
+        * initialized.
+        */
+       if (WARN_ON_ONCE(!mm_percpu_wq))
+               return;
+
        /* Workqueues cannot recurse */
        if (current->flags & PF_WQ_WORKER)
                return;
@@ -2422,7 +2429,7 @@ void drain_all_pages(struct zone *zone)
        for_each_cpu(cpu, &cpus_with_pcps) {
                struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
                INIT_WORK(work, drain_local_pages_wq);
-               schedule_work_on(cpu, work);
+               queue_work_on(cpu, mm_percpu_wq, work);
        }
        for_each_cpu(cpu, &cpus_with_pcps)
                flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -4519,13 +4526,13 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
                        K(node_page_state(pgdat, NR_FILE_MAPPED)),
                        K(node_page_state(pgdat, NR_FILE_DIRTY)),
                        K(node_page_state(pgdat, NR_WRITEBACK)),
+                       K(node_page_state(pgdat, NR_SHMEM)),
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
                        K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
                        K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
                                        * HPAGE_PMD_NR),
                        K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
 #endif
-                       K(node_page_state(pgdat, NR_SHMEM)),
                        K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
                        K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
                        node_page_state(pgdat, NR_PAGES_SCANNED),
index c4c9def8ffea47b4838fc3095221ee90e0c0fae3..de9c40d7304aa0e714bdd32abe79517ec3d73038 100644 (file)
@@ -111,12 +111,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
        if (pvmw->pmd && !pvmw->pte)
                return not_found(pvmw);
 
-       /* Only for THP, seek to next pte entry makes sense */
-       if (pvmw->pte) {
-               if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
-                       return not_found(pvmw);
+       if (pvmw->pte)
                goto next_pte;
-       }
 
        if (unlikely(PageHuge(pvmw->page))) {
                /* when pud is not present, pte will be NULL */
@@ -165,9 +161,14 @@ restart:
        while (1) {
                if (check_pte(pvmw))
                        return true;
-next_pte:      do {
+next_pte:
+               /* Seek to next pte only makes sense for THP */
+               if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+                       return not_found(pvmw);
+               do {
                        pvmw->address += PAGE_SIZE;
-                       if (pvmw->address >=
+                       if (pvmw->address >= pvmw->vma->vm_end ||
+                           pvmw->address >=
                                        __vma_address(pvmw->page, pvmw->vma) +
                                        hpage_nr_pages(pvmw->page) * PAGE_SIZE)
                                return not_found(pvmw);
index c4910f14f9579ef1d8b165355f9294715968bf2d..5dabf444d724db98595567b0f7daed7d53fc877e 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -670,30 +670,19 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
 
 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
 
-/*
- * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
- * workqueue, aiding in getting memory freed.
- */
-static struct workqueue_struct *lru_add_drain_wq;
-
-static int __init lru_init(void)
-{
-       lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0);
-
-       if (WARN(!lru_add_drain_wq,
-               "Failed to create workqueue lru_add_drain_wq"))
-               return -ENOMEM;
-
-       return 0;
-}
-early_initcall(lru_init);
-
 void lru_add_drain_all(void)
 {
        static DEFINE_MUTEX(lock);
        static struct cpumask has_work;
        int cpu;
 
+       /*
+        * Make sure nobody triggers this path before mm_percpu_wq is fully
+        * initialized.
+        */
+       if (WARN_ON(!mm_percpu_wq))
+               return;
+
        mutex_lock(&lock);
        get_online_cpus();
        cpumask_clear(&has_work);
@@ -707,7 +696,7 @@ void lru_add_drain_all(void)
                    pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
                    need_activate_page_drain(cpu)) {
                        INIT_WORK(work, lru_add_drain_per_cpu);
-                       queue_work_on(cpu, lru_add_drain_wq, work);
+                       queue_work_on(cpu, mm_percpu_wq, work);
                        cpumask_set_cpu(cpu, &has_work);
                }
        }
index 310ac0b8f9746c53eff9306be52585fb9094fc00..ac6318a064d35e6dcc5385d1dc8062ff6e46554c 100644 (file)
@@ -201,6 +201,8 @@ void swap_cgroup_swapoff(int type)
                        struct page *page = map[i];
                        if (page)
                                __free_page(page);
+                       if (!(i % SWAP_CLUSTER_MAX))
+                               cond_resched();
                }
                vfree(map);
        }
index 89f95396ec46be64055f1a658c9c0f7bdad90d5c..809025ed97ea0eee97573a32ba2764c63ee2dffd 100644 (file)
@@ -1552,7 +1552,6 @@ static const struct file_operations proc_vmstat_file_operations = {
 #endif /* CONFIG_PROC_FS */
 
 #ifdef CONFIG_SMP
-static struct workqueue_struct *vmstat_wq;
 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
 int sysctl_stat_interval __read_mostly = HZ;
 
@@ -1623,7 +1622,7 @@ static void vmstat_update(struct work_struct *w)
                 * to occur in the future. Keep on running the
                 * update worker thread.
                 */
-               queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+               queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
                                this_cpu_ptr(&vmstat_work),
                                round_jiffies_relative(sysctl_stat_interval));
        }
@@ -1702,7 +1701,7 @@ static void vmstat_shepherd(struct work_struct *w)
                struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
                if (!delayed_work_pending(dw) && need_update(cpu))
-                       queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+                       queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
        }
        put_online_cpus();
 
@@ -1718,7 +1717,6 @@ static void __init start_shepherd_timer(void)
                INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
                        vmstat_update);
 
-       vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
 }
@@ -1764,11 +1762,16 @@ static int vmstat_cpu_dead(unsigned int cpu)
 
 #endif
 
+struct workqueue_struct *mm_percpu_wq;
+
 void __init init_mm_internals(void)
 {
-#ifdef CONFIG_SMP
-       int ret;
+       int ret __maybe_unused;
 
+       mm_percpu_wq = alloc_workqueue("mm_percpu_wq",
+                                      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
+
+#ifdef CONFIG_SMP
        ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
                                        NULL, vmstat_cpu_dead);
        if (ret < 0)
index f9492bccfd794a1983eabbc4bff32df35b31cea8..54f63c4a809ae123248200ee84642629c6db8ffc 100644 (file)
@@ -185,6 +185,12 @@ static inline void z3fold_page_lock(struct z3fold_header *zhdr)
        spin_lock(&zhdr->page_lock);
 }
 
+/* Try to lock a z3fold page */
+static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
+{
+       return spin_trylock(&zhdr->page_lock);
+}
+
 /* Unlock a z3fold page */
 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
 {
@@ -385,7 +391,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
                        spin_lock(&pool->lock);
                        zhdr = list_first_entry_or_null(&pool->unbuddied[i],
                                                struct z3fold_header, buddy);
-                       if (!zhdr) {
+                       if (!zhdr || !z3fold_page_trylock(zhdr)) {
                                spin_unlock(&pool->lock);
                                continue;
                        }
@@ -394,7 +400,6 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
                        spin_unlock(&pool->lock);
 
                        page = virt_to_page(zhdr);
-                       z3fold_page_lock(zhdr);
                        if (zhdr->first_chunks == 0) {
                                if (zhdr->middle_chunks != 0 &&
                                    chunks >= zhdr->start_middle)
index b7ee9c34dbd678fc984db3295a70c117ee2fc2f2..d41edd28298b68ff335e6324df7e4e793a481d93 100644 (file)
@@ -276,7 +276,7 @@ struct zs_pool {
 struct zspage {
        struct {
                unsigned int fullness:FULLNESS_BITS;
-               unsigned int class:CLASS_BITS;
+               unsigned int class:CLASS_BITS + 1;
                unsigned int isolated:ISOLATED_BITS;
                unsigned int magic:MAGIC_VAL_BITS;
        };
index ea71513fca21a0aea0dd569b482717a1b1dbe673..90f49a194249ff5fecb3d81a05837ea8b75cc586 100644 (file)
@@ -119,6 +119,15 @@ static int br_dev_init(struct net_device *dev)
        return err;
 }
 
+static void br_dev_uninit(struct net_device *dev)
+{
+       struct net_bridge *br = netdev_priv(dev);
+
+       br_multicast_uninit_stats(br);
+       br_vlan_flush(br);
+       free_percpu(br->stats);
+}
+
 static int br_dev_open(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -332,6 +341,7 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_open                = br_dev_open,
        .ndo_stop                = br_dev_stop,
        .ndo_init                = br_dev_init,
+       .ndo_uninit              = br_dev_uninit,
        .ndo_start_xmit          = br_dev_xmit,
        .ndo_get_stats64         = br_get_stats64,
        .ndo_set_mac_address     = br_set_mac_address,
@@ -356,14 +366,6 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_features_check      = passthru_features_check,
 };
 
-static void br_dev_free(struct net_device *dev)
-{
-       struct net_bridge *br = netdev_priv(dev);
-
-       free_percpu(br->stats);
-       free_netdev(dev);
-}
-
 static struct device_type br_type = {
        .name   = "bridge",
 };
@@ -376,7 +378,7 @@ void br_dev_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->netdev_ops = &br_netdev_ops;
-       dev->destructor = br_dev_free;
+       dev->destructor = free_netdev;
        dev->ethtool_ops = &br_ethtool_ops;
        SET_NETDEV_DEVTYPE(dev, &br_type);
        dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
index 8ac1770aa222f21f89027d303a218c49be9dc650..56a2a72e7738c9869e27e77ca13140aed4bd530a 100644 (file)
@@ -311,7 +311,6 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
 
        br_fdb_delete_by_port(br, NULL, 0, 1);
 
-       br_vlan_flush(br);
        br_multicast_dev_del(br);
        cancel_delayed_work_sync(&br->gc_work);
 
index b760f2620abf320307a65c3f5baf86ff91221545..faa7261a992fa6df54afd4656b0e810102d59c30 100644 (file)
@@ -2031,8 +2031,6 @@ void br_multicast_dev_del(struct net_bridge *br)
 
 out:
        spin_unlock_bh(&br->multicast_lock);
-
-       free_percpu(br->mcast_stats);
 }
 
 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2531,6 +2529,11 @@ int br_multicast_init_stats(struct net_bridge *br)
        return 0;
 }
 
+void br_multicast_uninit_stats(struct net_bridge *br)
+{
+       free_percpu(br->mcast_stats);
+}
+
 static void mcast_stats_add_dir(u64 *dst, u64 *src)
 {
        dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
index a8f6acd23e309dcf51e6825076d9b3ba00996a9c..225ef7d5370166baff69080996cf64ff68b055f6 100644 (file)
@@ -1165,11 +1165,14 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
                spin_unlock_bh(&br->lock);
        }
 
-       err = br_changelink(dev, tb, data);
+       err = register_netdevice(dev);
        if (err)
                return err;
 
-       return register_netdevice(dev);
+       err = br_changelink(dev, tb, data);
+       if (err)
+               unregister_netdevice(dev);
+       return err;
 }
 
 static size_t br_get_size(const struct net_device *brdev)
index 61368186edea53841b1f00b37ddaa0d26461aee3..0d177280aa849bf1bc3ba3de79d3a3f3c748d5eb 100644 (file)
@@ -620,6 +620,7 @@ void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
                        const struct sk_buff *skb, u8 type, u8 dir);
 int br_multicast_init_stats(struct net_bridge *br);
+void br_multicast_uninit_stats(struct net_bridge *br);
 void br_multicast_get_stats(const struct net_bridge *br,
                            const struct net_bridge_port *p,
                            struct br_mcast_stats *dest);
@@ -760,6 +761,10 @@ static inline int br_multicast_init_stats(struct net_bridge *br)
        return 0;
 }
 
+static inline void br_multicast_uninit_stats(struct net_bridge *br)
+{
+}
+
 static inline int br_multicast_igmp_type(const struct sk_buff *skb)
 {
        return 0;
index ea633342ab0d046cbc49e55b679440ef9e015c2d..f4947e737f34a0d5dafb2e8232260f46a43fbc27 100644 (file)
@@ -398,7 +398,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
                           struct iov_iter *to, int len)
 {
        int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int i, copy = start - offset, start_off = offset, n;
        struct sk_buff *frag_iter;
 
        trace_skb_copy_datagram_iovec(skb, len);
@@ -407,11 +407,12 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
        if (copy > 0) {
                if (copy > len)
                        copy = len;
-               if (copy_to_iter(skb->data + offset, copy, to) != copy)
+               n = copy_to_iter(skb->data + offset, copy, to);
+               offset += n;
+               if (n != copy)
                        goto short_copy;
                if ((len -= copy) == 0)
                        return 0;
-               offset += copy;
        }
 
        /* Copy paged appendix. Hmm... why does this look so complicated? */
@@ -425,13 +426,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
                if ((copy = end - offset) > 0) {
                        if (copy > len)
                                copy = len;
-                       if (copy_page_to_iter(skb_frag_page(frag),
+                       n = copy_page_to_iter(skb_frag_page(frag),
                                              frag->page_offset + offset -
-                                             start, copy, to) != copy)
+                                             start, copy, to);
+                       offset += n;
+                       if (n != copy)
                                goto short_copy;
                        if (!(len -= copy))
                                return 0;
-                       offset += copy;
                }
                start = end;
        }
@@ -463,6 +465,7 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
         */
 
 fault:
+       iov_iter_revert(to, offset - start_off);
        return -EFAULT;
 
 short_copy:
@@ -613,7 +616,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                      __wsum *csump)
 {
        int start = skb_headlen(skb);
-       int i, copy = start - offset;
+       int i, copy = start - offset, start_off = offset;
        struct sk_buff *frag_iter;
        int pos = 0;
        int n;
@@ -623,11 +626,11 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                if (copy > len)
                        copy = len;
                n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
+               offset += n;
                if (n != copy)
                        goto fault;
                if ((len -= copy) == 0)
                        return 0;
-               offset += copy;
                pos = copy;
        }
 
@@ -649,12 +652,12 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                                                  offset - start, copy,
                                                  &csum2, to);
                        kunmap(page);
+                       offset += n;
                        if (n != copy)
                                goto fault;
                        *csump = csum_block_add(*csump, csum2, pos);
                        if (!(len -= copy))
                                return 0;
-                       offset += copy;
                        pos += copy;
                }
                start = end;
@@ -687,6 +690,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
                return 0;
 
 fault:
+       iov_iter_revert(to, offset - start_off);
        return -EFAULT;
 }
 
@@ -771,6 +775,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
        }
        return 0;
 csum_error:
+       iov_iter_revert(&msg->msg_iter, chunk);
        return -EINVAL;
 fault:
        return -EFAULT;
index 7869ae3837ca741e344b1731dc50d8408d8bcb6c..533a6d6f60920a01e8a9cef8fdb408950b914b05 100644 (file)
@@ -6757,7 +6757,6 @@ int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags)
 
        return err;
 }
-EXPORT_SYMBOL(dev_change_xdp_fd);
 
 /**
  *     dev_new_index   -       allocate an ifindex
index c35aae13c8d22680cb07222cbd9f1ee976f0bd64..d98d4998213da6103665d62d5a85613631236f19 100644 (file)
@@ -390,7 +390,7 @@ mpls:
                        unsigned char ar_tip[4];
                } *arp_eth, _arp_eth;
                const struct arphdr *arp;
-               struct arphdr *_arp;
+               struct arphdr _arp;
 
                arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
                                           hlen, &_arp);
index e7c12caa20c88acc9a5dd86f07d11644fb58341d..4526cbd7e28a1fcdecfc06a41985fd4d19634457 100644 (file)
@@ -860,7 +860,8 @@ static void neigh_probe(struct neighbour *neigh)
        if (skb)
                skb = skb_clone(skb, GFP_ATOMIC);
        write_unlock(&neigh->lock);
-       neigh->ops->solicit(neigh, skb);
+       if (neigh->ops->solicit)
+               neigh->ops->solicit(neigh, skb);
        atomic_inc(&neigh->probes);
        kfree_skb(skb);
 }
index 758f140b6bedc51669fed973b39ee317c2bf1570..d28da7d363f170f35d88623e2b864f04a67c3de5 100644 (file)
 #include <net/tcp.h>
 
 static siphash_key_t net_secret __read_mostly;
+static siphash_key_t ts_secret __read_mostly;
 
 static __always_inline void net_secret_init(void)
 {
+       net_get_random_once(&ts_secret, sizeof(ts_secret));
        net_get_random_once(&net_secret, sizeof(net_secret));
 }
 #endif
@@ -45,6 +47,23 @@ static u32 seq_scale(u32 seq)
 #endif
 
 #if IS_ENABLED(CONFIG_IPV6)
+static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
+{
+       const struct {
+               struct in6_addr saddr;
+               struct in6_addr daddr;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .saddr = *(struct in6_addr *)saddr,
+               .daddr = *(struct in6_addr *)daddr,
+       };
+
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
+
+       return siphash(&combined, offsetofend(typeof(combined), daddr),
+                      &ts_secret);
+}
+
 u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
                                 __be16 sport, __be16 dport, u32 *tsoff)
 {
@@ -63,7 +82,7 @@ u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
        net_secret_init();
        hash = siphash(&combined, offsetofend(typeof(combined), dport),
                       &net_secret);
-       *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+       *tsoff = secure_tcpv6_ts_off(saddr, daddr);
        return seq_scale(hash);
 }
 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
@@ -88,6 +107,14 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #endif
 
 #ifdef CONFIG_INET
+static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
+{
+       if (sysctl_tcp_timestamps != 1)
+               return 0;
+
+       return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+                           &ts_secret);
+}
 
 /* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
  * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
@@ -103,7 +130,7 @@ u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
        hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
                            (__force u32)sport << 16 | (__force u32)dport,
                            &net_secret);
-       *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+       *tsoff = secure_tcp_ts_off(saddr, daddr);
        return seq_scale(hash);
 }
 
index 4ead336e14ea0b8fc5fdcf8e679da54dfca0716b..7f9cc400eca08c01c9014476aa4daf0852505b20 100644 (file)
@@ -408,14 +408,16 @@ static struct ctl_table net_core_table[] = {
                .data           = &sysctl_net_busy_poll,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
        {
                .procname       = "busy_read",
                .data           = &sysctl_net_busy_read,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
 #endif
 #ifdef CONFIG_NET_SCHED
index fd9f34bbd7408a0e9b0342ec6512c69cc30edc39..dfb2ab2dd3c84d93b8d77df41d1160d26f162fc3 100644 (file)
@@ -306,7 +306,7 @@ static void __init ic_close_devs(void)
        while ((d = next)) {
                next = d->next;
                dev = d->dev;
-               if ((!ic_dev || dev != ic_dev->dev) && !netdev_uses_dsa(dev)) {
+               if (d != ic_dev && !netdev_uses_dsa(dev)) {
                        pr_debug("IP-Config: Downing %s\n", dev->name);
                        dev_change_flags(dev, d->flags);
                }
index 52f26459efc345a8a0c00d356306fb5fd398547e..9b8841316e7b94e375cc52d0dfd7f9fe89205195 100644 (file)
@@ -461,7 +461,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
 
        clusterip_config_put(cipinfo->config);
 
-       nf_ct_netns_get(par->net, par->family);
+       nf_ct_netns_put(par->net, par->family);
 }
 
 #ifdef CONFIG_COMPAT
index c9b52c361da2e6acc746c2de86d8c7f3af0a9b39..53e49f5011d3ce482c5180edc47da7928db0f224 100644 (file)
@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
        .timeout        = 180,
 };
 
-static struct nf_conntrack_helper snmp_helper __read_mostly = {
-       .me                     = THIS_MODULE,
-       .help                   = help,
-       .expect_policy          = &snmp_exp_policy,
-       .name                   = "snmp",
-       .tuple.src.l3num        = AF_INET,
-       .tuple.src.u.udp.port   = cpu_to_be16(SNMP_PORT),
-       .tuple.dst.protonum     = IPPROTO_UDP,
-};
-
 static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
        .me                     = THIS_MODULE,
        .help                   = help,
@@ -1288,22 +1278,16 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
 
 static int __init nf_nat_snmp_basic_init(void)
 {
-       int ret = 0;
-
        BUG_ON(nf_nat_snmp_hook != NULL);
        RCU_INIT_POINTER(nf_nat_snmp_hook, help);
 
-       ret = nf_conntrack_helper_register(&snmp_trap_helper);
-       if (ret < 0) {
-               nf_conntrack_helper_unregister(&snmp_helper);
-               return ret;
-       }
-       return ret;
+       return nf_conntrack_helper_register(&snmp_trap_helper);
 }
 
 static void __exit nf_nat_snmp_basic_fini(void)
 {
        RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
+       synchronize_rcu();
        nf_conntrack_helper_unregister(&snmp_trap_helper);
 }
 
index 2af6244b83e27ae384e96cf071c10c5a89674804..ccfbce13a6333a65dab64e4847dd510dfafb1b43 100644 (file)
@@ -156,17 +156,18 @@ int ping_hash(struct sock *sk)
 void ping_unhash(struct sock *sk)
 {
        struct inet_sock *isk = inet_sk(sk);
+
        pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+       write_lock_bh(&ping_table.lock);
        if (sk_hashed(sk)) {
-               write_lock_bh(&ping_table.lock);
                hlist_nulls_del(&sk->sk_nulls_node);
                sk_nulls_node_init(&sk->sk_nulls_node);
                sock_put(sk);
                isk->inet_num = 0;
                isk->inet_sport = 0;
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
-               write_unlock_bh(&ping_table.lock);
        }
+       write_unlock_bh(&ping_table.lock);
 }
 EXPORT_SYMBOL_GPL(ping_unhash);
 
index 8471dd116771462d149e1da2807e446b69b74bcc..acd69cfe2951ed89213403b3e9556da6ddd50ba8 100644 (file)
@@ -2620,7 +2620,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
        skb_reset_network_header(skb);
 
        /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
-       ip_hdr(skb)->protocol = IPPROTO_ICMP;
+       ip_hdr(skb)->protocol = IPPROTO_UDP;
        skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
 
        src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
index 1e319a525d51b0b603a5ccc5143381c752b9f2c7..40ba4249a58677671b68bf495f32f15b7c5f62d7 100644 (file)
@@ -2322,6 +2322,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tcp_init_send_head(sk);
        memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
        __sk_dst_reset(sk);
+       tcp_saved_syn_free(tp);
 
        /* Clean up fastopen related fields */
        tcp_free_fastopen_req(tp);
index c43119726a62e494063fd940001b483215d0fe26..659d1baefb2bba36d96e412eb7ca5a02996fb6dd 100644 (file)
@@ -126,7 +126,8 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define REXMIT_LOST    1 /* retransmit packets marked lost */
 #define REXMIT_NEW     2 /* FRTO-style transmit of unsent/new packets */
 
-static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
+static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
+                            unsigned int len)
 {
        static bool __once __read_mostly;
 
@@ -137,8 +138,9 @@ static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb)
 
                rcu_read_lock();
                dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
-               pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
-                       dev ? dev->name : "Unknown driver");
+               if (!dev || len >= dev->mtu)
+                       pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
+                               dev ? dev->name : "Unknown driver");
                rcu_read_unlock();
        }
 }
@@ -161,8 +163,10 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
        if (len >= icsk->icsk_ack.rcv_mss) {
                icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
                                               tcp_sk(sk)->advmss);
-               if (unlikely(icsk->icsk_ack.rcv_mss != len))
-                       tcp_gro_dev_warn(sk, skb);
+               /* Account for possibly-removed options */
+               if (unlikely(len > icsk->icsk_ack.rcv_mss +
+                                  MAX_TCP_OPTION_SPACE))
+                       tcp_gro_dev_warn(sk, skb, len);
        } else {
                /* Otherwise, we make more careful check taking into account,
                 * that SACKs block is variable.
@@ -874,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
                                  const int ts)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       if (metric > tp->reordering) {
-               int mib_idx;
+       int mib_idx;
 
+       if (metric > tp->reordering) {
                tp->reordering = min(sysctl_tcp_max_reordering, metric);
 
-               /* This exciting event is worth to be remembered. 8) */
-               if (ts)
-                       mib_idx = LINUX_MIB_TCPTSREORDER;
-               else if (tcp_is_reno(tp))
-                       mib_idx = LINUX_MIB_TCPRENOREORDER;
-               else if (tcp_is_fack(tp))
-                       mib_idx = LINUX_MIB_TCPFACKREORDER;
-               else
-                       mib_idx = LINUX_MIB_TCPSACKREORDER;
-
-               NET_INC_STATS(sock_net(sk), mib_idx);
 #if FASTRETRANS_DEBUG > 1
                pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
                         tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -902,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
        }
 
        tp->rack.reord = 1;
+
+       /* This exciting event is worth to be remembered. 8) */
+       if (ts)
+               mib_idx = LINUX_MIB_TCPTSREORDER;
+       else if (tcp_is_reno(tp))
+               mib_idx = LINUX_MIB_TCPRENOREORDER;
+       else if (tcp_is_fack(tp))
+               mib_idx = LINUX_MIB_TCPFACKREORDER;
+       else
+               mib_idx = LINUX_MIB_TCPSACKREORDER;
+
+       NET_INC_STATS(sock_net(sk), mib_idx);
 }
 
 /* This must be called before lost_out is incremented */
@@ -1930,6 +1935,7 @@ void tcp_enter_loss(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
        struct sk_buff *skb;
+       bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
        bool is_reneg;                  /* is receiver reneging on SACKs? */
        bool mark_lost;
 
@@ -1989,15 +1995,18 @@ void tcp_enter_loss(struct sock *sk)
        tp->high_seq = tp->snd_nxt;
        tcp_ecn_queue_cwr(tp);
 
-       /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO
-        * if a previous recovery is underway, otherwise it may incorrectly
-        * call a timeout spurious if some previously retransmitted packets
-        * are s/acked (sec 3.2). We do not apply that retriction since
-        * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS
-        * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO
-        * on PTMU discovery to avoid sending new data.
+       /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
+        * loss recovery is underway except recurring timeout(s) on
+        * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
+        *
+        * In theory F-RTO can be used repeatedly during loss recovery.
+        * In practice this interacts badly with broken middle-boxes that
+        * falsely raise the receive window, which results in repeated
+        * timeouts and stop-and-go behavior.
         */
-       tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size;
+       tp->frto = sysctl_tcp_frto &&
+                  (new_recovery || icsk->icsk_retransmits) &&
+                  !inet_csk(sk)->icsk_mtup.probe_size;
 }
 
 /* If ACK arrived pointing to a remembered SACK, it means that our
index 22548b5f05cbe5a655e0c53df2d31c5cc2e8a702..c3c082ed38796f65948e7a1042e37813b71d5abf 100644 (file)
@@ -2999,6 +2999,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
 {
        struct sk_buff *skb;
 
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
+
        /* NOTE: No TCP options attached and we never retransmit this. */
        skb = alloc_skb(MAX_TCP_HEADER, priority);
        if (!skb) {
@@ -3014,8 +3016,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
        /* Send it off. */
        if (tcp_transmit_skb(sk, skb, 0, priority))
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
-
-       TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
 }
 
 /* Send a crossed SYN-ACK during socket establishment.
index 4ecb38ae85042db7fa59e1aa6c74c9c3da0b1099..d8acbd9f477a2ac6b0f8eee1bf59f3ab43abff07 100644 (file)
@@ -12,7 +12,8 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
                /* Account for retransmits that are lost again */
                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                tp->retrans_out -= tcp_skb_pcount(skb);
-               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
+               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
+                             tcp_skb_pcount(skb));
        }
 }
 
index 363172527e433e321cfa9fe8e96cfe32e4a78043..80ce478c4851318f6eef9320bbdb8548641f920c 100644 (file)
@@ -3626,14 +3626,19 @@ restart:
        INIT_LIST_HEAD(&del_list);
        list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
                struct rt6_info *rt = NULL;
+               bool keep;
 
                addrconf_del_dad_work(ifa);
 
+               keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
+                       !addr_is_local(&ifa->addr);
+               if (!keep)
+                       list_move(&ifa->if_list, &del_list);
+
                write_unlock_bh(&idev->lock);
                spin_lock_bh(&ifa->lock);
 
-               if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
-                   !addr_is_local(&ifa->addr)) {
+               if (keep) {
                        /* set state to skip the notifier below */
                        state = INET6_IFADDR_STATE_DEAD;
                        ifa->state = 0;
@@ -3645,8 +3650,6 @@ restart:
                } else {
                        state = ifa->state;
                        ifa->state = INET6_IFADDR_STATE_DEAD;
-
-                       list_move(&ifa->if_list, &del_list);
                }
 
                spin_unlock_bh(&ifa->lock);
index 309062f3debe298c1cf7666f77505f8d353d76d8..31762f76cdb5f2a3ec322135068402be532218ed 100644 (file)
@@ -1687,7 +1687,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                struct kcm_attach info;
 
                if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-                       err = -EFAULT;
+                       return -EFAULT;
 
                err = kcm_attach_ioctl(sock, &info);
 
@@ -1697,7 +1697,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                struct kcm_unattach info;
 
                if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-                       err = -EFAULT;
+                       return -EFAULT;
 
                err = kcm_unattach_ioctl(sock, &info);
 
@@ -1708,7 +1708,7 @@ static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                struct socket *newsock = NULL;
 
                if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
-                       err = -EFAULT;
+                       return -EFAULT;
 
                err = kcm_clone(sock, &info, &newsock);
 
index 8adab6335ced9f1018318094be20c132a70f8475..e37d9554da7b47df0571c41478e6e758b8a8e6f9 100644 (file)
@@ -278,7 +278,57 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
 }
 EXPORT_SYMBOL_GPL(l2tp_session_find);
 
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+/* Like l2tp_session_find() but takes a reference on the returned session.
+ * Optionally calls session->ref() too if do_ref is true.
+ */
+struct l2tp_session *l2tp_session_get(struct net *net,
+                                     struct l2tp_tunnel *tunnel,
+                                     u32 session_id, bool do_ref)
+{
+       struct hlist_head *session_list;
+       struct l2tp_session *session;
+
+       if (!tunnel) {
+               struct l2tp_net *pn = l2tp_pernet(net);
+
+               session_list = l2tp_session_id_hash_2(pn, session_id);
+
+               rcu_read_lock_bh();
+               hlist_for_each_entry_rcu(session, session_list, global_hlist) {
+                       if (session->session_id == session_id) {
+                               l2tp_session_inc_refcount(session);
+                               if (do_ref && session->ref)
+                                       session->ref(session);
+                               rcu_read_unlock_bh();
+
+                               return session;
+                       }
+               }
+               rcu_read_unlock_bh();
+
+               return NULL;
+       }
+
+       session_list = l2tp_session_id_hash(tunnel, session_id);
+       read_lock_bh(&tunnel->hlist_lock);
+       hlist_for_each_entry(session, session_list, hlist) {
+               if (session->session_id == session_id) {
+                       l2tp_session_inc_refcount(session);
+                       if (do_ref && session->ref)
+                               session->ref(session);
+                       read_unlock_bh(&tunnel->hlist_lock);
+
+                       return session;
+               }
+       }
+       read_unlock_bh(&tunnel->hlist_lock);
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_get);
+
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+                                         bool do_ref)
 {
        int hash;
        struct l2tp_session *session;
@@ -288,6 +338,9 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
        for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
                hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
                        if (++count > nth) {
+                               l2tp_session_inc_refcount(session);
+                               if (do_ref && session->ref)
+                                       session->ref(session);
                                read_unlock_bh(&tunnel->hlist_lock);
                                return session;
                        }
@@ -298,12 +351,13 @@ struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
 
 /* Lookup a session by interface name.
  * This is very inefficient but is only used by management interfaces.
  */
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+                                               bool do_ref)
 {
        struct l2tp_net *pn = l2tp_pernet(net);
        int hash;
@@ -313,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
        for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
                hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
                        if (!strcmp(session->ifname, ifname)) {
+                               l2tp_session_inc_refcount(session);
+                               if (do_ref && session->ref)
+                                       session->ref(session);
                                rcu_read_unlock_bh();
+
                                return session;
                        }
                }
@@ -323,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
+EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
+
+static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
+                                     struct l2tp_session *session)
+{
+       struct l2tp_session *session_walk;
+       struct hlist_head *g_head;
+       struct hlist_head *head;
+       struct l2tp_net *pn;
+
+       head = l2tp_session_id_hash(tunnel, session->session_id);
+
+       write_lock_bh(&tunnel->hlist_lock);
+       hlist_for_each_entry(session_walk, head, hlist)
+               if (session_walk->session_id == session->session_id)
+                       goto exist;
+
+       if (tunnel->version == L2TP_HDR_VER_3) {
+               pn = l2tp_pernet(tunnel->l2tp_net);
+               g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
+                                               session->session_id);
+
+               spin_lock_bh(&pn->l2tp_session_hlist_lock);
+               hlist_for_each_entry(session_walk, g_head, global_hlist)
+                       if (session_walk->session_id == session->session_id)
+                               goto exist_glob;
+
+               hlist_add_head_rcu(&session->global_hlist, g_head);
+               spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+       }
+
+       hlist_add_head(&session->hlist, head);
+       write_unlock_bh(&tunnel->hlist_lock);
+
+       return 0;
+
+exist_glob:
+       spin_unlock_bh(&pn->l2tp_session_hlist_lock);
+exist:
+       write_unlock_bh(&tunnel->hlist_lock);
+
+       return -EEXIST;
+}
 
 /* Lookup a tunnel by id
  */
@@ -633,6 +733,9 @@ discard:
  * a data (not control) frame before coming here. Fields up to the
  * session-id have already been parsed and ptr points to the data
  * after the session-id.
+ *
+ * session->ref() must have been called prior to l2tp_recv_common().
+ * session->deref() will be called automatically after skb is processed.
  */
 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                      unsigned char *ptr, unsigned char *optr, u16 hdrflags,
@@ -642,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        int offset;
        u32 ns, nr;
 
-       /* The ref count is increased since we now hold a pointer to
-        * the session. Take care to decrement the refcnt when exiting
-        * this function from now on...
-        */
-       l2tp_session_inc_refcount(session);
-       if (session->ref)
-               (*session->ref)(session);
-
        /* Parse and check optional cookie */
        if (session->peer_cookie_len > 0) {
                if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
@@ -802,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        /* Try to dequeue as many skbs from reorder_q as we can. */
        l2tp_recv_dequeue(session);
 
-       l2tp_session_dec_refcount(session);
-
        return;
 
 discard:
@@ -812,8 +905,6 @@ discard:
 
        if (session->deref)
                (*session->deref)(session);
-
-       l2tp_session_dec_refcount(session);
 }
 EXPORT_SYMBOL(l2tp_recv_common);
 
@@ -920,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        }
 
        /* Find the session context */
-       session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
+       session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
        if (!session || !session->recv_skb) {
+               if (session) {
+                       if (session->deref)
+                               session->deref(session);
+                       l2tp_session_dec_refcount(session);
+               }
+
                /* Not found? Pass to userspace to deal with */
                l2tp_info(tunnel, L2TP_MSG_DATA,
                          "%s: no session found (%u/%u). Passing up.\n",
@@ -930,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        }
 
        l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
+       l2tp_session_dec_refcount(session);
 
        return 0;
 
@@ -1738,6 +1836,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
 {
        struct l2tp_session *session;
+       int err;
 
        session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
        if (session != NULL) {
@@ -1793,6 +1892,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
 
                l2tp_session_set_header_len(session, tunnel->version);
 
+               err = l2tp_session_add_to_tunnel(tunnel, session);
+               if (err) {
+                       kfree(session);
+
+                       return ERR_PTR(err);
+               }
+
                /* Bump the reference count. The session context is deleted
                 * only when this drops to zero.
                 */
@@ -1802,28 +1908,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
                /* Ensure tunnel socket isn't deleted */
                sock_hold(tunnel->sock);
 
-               /* Add session to the tunnel's hash list */
-               write_lock_bh(&tunnel->hlist_lock);
-               hlist_add_head(&session->hlist,
-                              l2tp_session_id_hash(tunnel, session_id));
-               write_unlock_bh(&tunnel->hlist_lock);
-
-               /* And to the global session list if L2TPv3 */
-               if (tunnel->version != L2TP_HDR_VER_2) {
-                       struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
-                       spin_lock_bh(&pn->l2tp_session_hlist_lock);
-                       hlist_add_head_rcu(&session->global_hlist,
-                                          l2tp_session_id_hash_2(pn, session_id));
-                       spin_unlock_bh(&pn->l2tp_session_hlist_lock);
-               }
-
                /* Ignore management session in session count value */
                if (session->session_id != 0)
                        atomic_inc(&l2tp_session_count);
+
+               return session;
        }
 
-       return session;
+       return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL_GPL(l2tp_session_create);
 
index aebf281d09eeb31c531eb624bd2ddd78cab8da9b..8ce7818c7a9d0578b79e70eff1916d3248e7604a 100644 (file)
@@ -230,11 +230,16 @@ out:
        return tunnel;
 }
 
+struct l2tp_session *l2tp_session_get(struct net *net,
+                                     struct l2tp_tunnel *tunnel,
+                                     u32 session_id, bool do_ref);
 struct l2tp_session *l2tp_session_find(struct net *net,
                                       struct l2tp_tunnel *tunnel,
                                       u32 session_id);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+                                         bool do_ref);
+struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
+                                               bool do_ref);
 struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
 struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
 
index 2d6760a2ae347b96d465e30192ab8a7957258d32..d100aed3d06fb63b8851a00c55350f1728b18599 100644 (file)
@@ -53,7 +53,7 @@ static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
 
 static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
 {
-       pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+       pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
        pd->session_idx++;
 
        if (pd->session == NULL) {
@@ -238,10 +238,14 @@ static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
        }
 
        /* Show the tunnel or session context */
-       if (pd->session == NULL)
+       if (!pd->session) {
                l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
-       else
+       } else {
                l2tp_dfs_seq_session_show(m, pd->session);
+               if (pd->session->deref)
+                       pd->session->deref(pd->session);
+               l2tp_session_dec_refcount(pd->session);
+       }
 
 out:
        return 0;
index 8bf18a5f66e0c465ef3640ae4168c875c4c9e1ed..6fd41d7afe1ef27592970de333c75928264dc275 100644 (file)
@@ -221,12 +221,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
                goto out;
        }
 
-       session = l2tp_session_find(net, tunnel, session_id);
-       if (session) {
-               rc = -EEXIST;
-               goto out;
-       }
-
        if (cfg->ifname) {
                dev = dev_get_by_name(net, cfg->ifname);
                if (dev) {
@@ -240,8 +234,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
 
        session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
                                      peer_session_id, cfg);
-       if (!session) {
-               rc = -ENOMEM;
+       if (IS_ERR(session)) {
+               rc = PTR_ERR(session);
                goto out;
        }
 
index d25038cfd64e1ae5d5819fe1e7049529f4b5a2e4..4d322c1b7233e5b546ff75a585a3603503e076bc 100644 (file)
@@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
        }
 
        /* Ok, this is a data packet. Lookup the session. */
-       session = l2tp_session_find(net, NULL, session_id);
-       if (session == NULL)
+       session = l2tp_session_get(net, NULL, session_id, true);
+       if (!session)
                goto discard;
 
        tunnel = session->tunnel;
-       if (tunnel == NULL)
-               goto discard;
+       if (!tunnel)
+               goto discard_sess;
 
        /* Trace packet contents, if enabled */
        if (tunnel->debug & L2TP_MSG_DATA) {
                length = min(32u, skb->len);
                if (!pskb_may_pull(skb, length))
-                       goto discard;
+                       goto discard_sess;
 
                /* Point to L2TP header */
                optr = ptr = skb->data;
@@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
        }
 
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
+       l2tp_session_dec_refcount(session);
 
        return 0;
 
@@ -178,9 +179,10 @@ pass_up:
 
        tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
        tunnel = l2tp_tunnel_find(net, tunnel_id);
-       if (tunnel != NULL)
+       if (tunnel) {
                sk = tunnel->sock;
-       else {
+               sock_hold(sk);
+       } else {
                struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
 
                read_lock_bh(&l2tp_ip_lock);
@@ -202,6 +204,12 @@ pass_up:
 
        return sk_receive_skb(sk, skb, 1);
 
+discard_sess:
+       if (session->deref)
+               session->deref(session);
+       l2tp_session_dec_refcount(session);
+       goto discard;
+
 discard_put:
        sock_put(sk);
 
index a4abcbc4c09ae65424a701a1200b7535fa3635ac..88b397c30d86af8d6a22daeb466cedac36aac57e 100644 (file)
@@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
        }
 
        /* Ok, this is a data packet. Lookup the session. */
-       session = l2tp_session_find(net, NULL, session_id);
-       if (session == NULL)
+       session = l2tp_session_get(net, NULL, session_id, true);
+       if (!session)
                goto discard;
 
        tunnel = session->tunnel;
-       if (tunnel == NULL)
-               goto discard;
+       if (!tunnel)
+               goto discard_sess;
 
        /* Trace packet contents, if enabled */
        if (tunnel->debug & L2TP_MSG_DATA) {
                length = min(32u, skb->len);
                if (!pskb_may_pull(skb, length))
-                       goto discard;
+                       goto discard_sess;
 
                /* Point to L2TP header */
                optr = ptr = skb->data;
@@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
 
        l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
                         tunnel->recv_payload_hook);
+       l2tp_session_dec_refcount(session);
+
        return 0;
 
 pass_up:
@@ -191,9 +193,10 @@ pass_up:
 
        tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
        tunnel = l2tp_tunnel_find(net, tunnel_id);
-       if (tunnel != NULL)
+       if (tunnel) {
                sk = tunnel->sock;
-       else {
+               sock_hold(sk);
+       } else {
                struct ipv6hdr *iph = ipv6_hdr(skb);
 
                read_lock_bh(&l2tp_ip6_lock);
@@ -215,6 +218,12 @@ pass_up:
 
        return sk_receive_skb(sk, skb, 1);
 
+discard_sess:
+       if (session->deref)
+               session->deref(session);
+       l2tp_session_dec_refcount(session);
+       goto discard;
+
 discard_put:
        sock_put(sk);
 
index 3620fba317863dc59c93c1089faf63451e831aa5..7e3e669baac42df9d0be2864b927ba4f390258e9 100644 (file)
@@ -48,7 +48,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
 /* Accessed under genl lock */
 static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
 
-static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
+static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
+                                               bool do_ref)
 {
        u32 tunnel_id;
        u32 session_id;
@@ -59,14 +60,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
 
        if (info->attrs[L2TP_ATTR_IFNAME]) {
                ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
-               session = l2tp_session_find_by_ifname(net, ifname);
+               session = l2tp_session_get_by_ifname(net, ifname, do_ref);
        } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
                   (info->attrs[L2TP_ATTR_CONN_ID])) {
                tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
                session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
                tunnel = l2tp_tunnel_find(net, tunnel_id);
                if (tunnel)
-                       session = l2tp_session_find(net, tunnel, session_id);
+                       session = l2tp_session_get(net, tunnel, session_id,
+                                                  do_ref);
        }
 
        return session;
@@ -642,10 +644,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                        session_id, peer_session_id, &cfg);
 
        if (ret >= 0) {
-               session = l2tp_session_find(net, tunnel, session_id);
-               if (session)
+               session = l2tp_session_get(net, tunnel, session_id, false);
+               if (session) {
                        ret = l2tp_session_notify(&l2tp_nl_family, info, session,
                                                  L2TP_CMD_SESSION_CREATE);
+                       l2tp_session_dec_refcount(session);
+               }
        }
 
 out:
@@ -658,7 +662,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
        struct l2tp_session *session;
        u16 pw_type;
 
-       session = l2tp_nl_session_find(info);
+       session = l2tp_nl_session_get(info, true);
        if (session == NULL) {
                ret = -ENODEV;
                goto out;
@@ -672,6 +676,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
                if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
                        ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
 
+       if (session->deref)
+               session->deref(session);
+       l2tp_session_dec_refcount(session);
+
 out:
        return ret;
 }
@@ -681,7 +689,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
        int ret = 0;
        struct l2tp_session *session;
 
-       session = l2tp_nl_session_find(info);
+       session = l2tp_nl_session_get(info, false);
        if (session == NULL) {
                ret = -ENODEV;
                goto out;
@@ -716,6 +724,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
        ret = l2tp_session_notify(&l2tp_nl_family, info,
                                  session, L2TP_CMD_SESSION_MODIFY);
 
+       l2tp_session_dec_refcount(session);
+
 out:
        return ret;
 }
@@ -811,29 +821,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
        struct sk_buff *msg;
        int ret;
 
-       session = l2tp_nl_session_find(info);
+       session = l2tp_nl_session_get(info, false);
        if (session == NULL) {
                ret = -ENODEV;
-               goto out;
+               goto err;
        }
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg) {
                ret = -ENOMEM;
-               goto out;
+               goto err_ref;
        }
 
        ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
                                   0, session, L2TP_CMD_SESSION_GET);
        if (ret < 0)
-               goto err_out;
+               goto err_ref_msg;
 
-       return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
+       ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
 
-err_out:
-       nlmsg_free(msg);
+       l2tp_session_dec_refcount(session);
 
-out:
+       return ret;
+
+err_ref_msg:
+       nlmsg_free(msg);
+err_ref:
+       l2tp_session_dec_refcount(session);
+err:
        return ret;
 }
 
@@ -852,7 +867,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
                                goto out;
                }
 
-               session = l2tp_session_find_nth(tunnel, si);
+               session = l2tp_session_get_nth(tunnel, si, false);
                if (session == NULL) {
                        ti++;
                        tunnel = NULL;
@@ -862,8 +877,11 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
 
                if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
-                                        session, L2TP_CMD_SESSION_GET) < 0)
+                                        session, L2TP_CMD_SESSION_GET) < 0) {
+                       l2tp_session_dec_refcount(session);
                        break;
+               }
+               l2tp_session_dec_refcount(session);
 
                si++;
        }
index 36cc56fd041871c73796cc0a52241ef3e38483c9..32ea0f3d868c6459c4194732ffcc14d8a20ec8bc 100644 (file)
@@ -450,6 +450,10 @@ static void pppol2tp_session_close(struct l2tp_session *session)
 static void pppol2tp_session_destruct(struct sock *sk)
 {
        struct l2tp_session *session = sk->sk_user_data;
+
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_queue_purge(&sk->sk_write_queue);
+
        if (session) {
                sk->sk_user_data = NULL;
                BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -488,9 +492,6 @@ static int pppol2tp_release(struct socket *sock)
                l2tp_session_queue_purge(session);
                sock_put(sk);
        }
-       skb_queue_purge(&sk->sk_receive_queue);
-       skb_queue_purge(&sk->sk_write_queue);
-
        release_sock(sk);
 
        /* This will delete the session context via
@@ -582,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        int error = 0;
        u32 tunnel_id, peer_tunnel_id;
        u32 session_id, peer_session_id;
+       bool drop_refcnt = false;
        int ver = 2;
        int fd;
 
@@ -683,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        if (tunnel->peer_tunnel_id == 0)
                tunnel->peer_tunnel_id = peer_tunnel_id;
 
-       /* Create session if it doesn't already exist. We handle the
-        * case where a session was previously created by the netlink
-        * interface by checking that the session doesn't already have
-        * a socket and its tunnel socket are what we expect. If any
-        * of those checks fail, return EEXIST to the caller.
-        */
-       session = l2tp_session_find(sock_net(sk), tunnel, session_id);
-       if (session == NULL) {
-               /* Default MTU must allow space for UDP/L2TP/PPP
-                * headers.
+       session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
+       if (session) {
+               drop_refcnt = true;
+               ps = l2tp_session_priv(session);
+
+               /* Using a pre-existing session is fine as long as it hasn't
+                * been connected yet.
                 */
-               cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+               if (ps->sock) {
+                       error = -EEXIST;
+                       goto end;
+               }
 
-               /* Allocate and initialize a new session context. */
-               session = l2tp_session_create(sizeof(struct pppol2tp_session),
-                                             tunnel, session_id,
-                                             peer_session_id, &cfg);
-               if (session == NULL) {
-                       error = -ENOMEM;
+               /* consistency checks */
+               if (ps->tunnel_sock != tunnel->sock) {
+                       error = -EEXIST;
                        goto end;
                }
        } else {
-               ps = l2tp_session_priv(session);
-               error = -EEXIST;
-               if (ps->sock != NULL)
-                       goto end;
+               /* Default MTU must allow space for UDP/L2TP/PPP headers */
+               cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
+               cfg.mru = cfg.mtu;
 
-               /* consistency checks */
-               if (ps->tunnel_sock != tunnel->sock)
+               session = l2tp_session_create(sizeof(struct pppol2tp_session),
+                                             tunnel, session_id,
+                                             peer_session_id, &cfg);
+               if (IS_ERR(session)) {
+                       error = PTR_ERR(session);
                        goto end;
+               }
        }
 
        /* Associate session with its PPPoL2TP socket */
@@ -777,6 +779,8 @@ out_no_ppp:
                  session->name);
 
 end:
+       if (drop_refcnt)
+               l2tp_session_dec_refcount(session);
        release_sock(sk);
 
        return error;
@@ -804,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
        if (tunnel->sock == NULL)
                goto out;
 
-       /* Check that this session doesn't already exist */
-       error = -EEXIST;
-       session = l2tp_session_find(net, tunnel, session_id);
-       if (session != NULL)
-               goto out;
-
        /* Default MTU values. */
        if (cfg->mtu == 0)
                cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
@@ -817,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
                cfg->mru = cfg->mtu;
 
        /* Allocate and initialize a new session context. */
-       error = -ENOMEM;
        session = l2tp_session_create(sizeof(struct pppol2tp_session),
                                      tunnel, session_id,
                                      peer_session_id, cfg);
-       if (session == NULL)
+       if (IS_ERR(session)) {
+               error = PTR_ERR(session);
                goto out;
+       }
 
        ps = l2tp_session_priv(session);
        ps->tunnel_sock = tunnel->sock;
@@ -1140,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
                if (stats.session_id != 0) {
                        /* resend to session ioctl handler */
                        struct l2tp_session *session =
-                               l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
-                       if (session != NULL)
-                               err = pppol2tp_session_ioctl(session, cmd, arg);
-                       else
+                               l2tp_session_get(sock_net(sk), tunnel,
+                                                stats.session_id, true);
+
+                       if (session) {
+                               err = pppol2tp_session_ioctl(session, cmd,
+                                                            arg);
+                               if (session->deref)
+                                       session->deref(session);
+                               l2tp_session_dec_refcount(session);
+                       } else {
                                err = -EBADR;
+                       }
                        break;
                }
 #ifdef CONFIG_XFRM
@@ -1377,8 +1383,6 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
        } else
                err = pppol2tp_session_setsockopt(sk, session, optname, val);
 
-       err = 0;
-
 end_put_sess:
        sock_put(sk);
 end:
@@ -1501,8 +1505,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
 
                err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
                sock_put(ps->tunnel_sock);
-       } else
+               if (err)
+                       goto end_put_sess;
+       } else {
                err = pppol2tp_session_getsockopt(sk, session, optname, &val);
+               if (err)
+                       goto end_put_sess;
+       }
 
        err = -EFAULT;
        if (put_user(len, optlen))
@@ -1554,7 +1563,7 @@ static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
 
 static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
 {
-       pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+       pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
        pd->session_idx++;
 
        if (pd->session == NULL) {
@@ -1681,10 +1690,14 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
 
        /* Show the tunnel or session context.
         */
-       if (pd->session == NULL)
+       if (!pd->session) {
                pppol2tp_seq_tunnel_show(m, pd->tunnel);
-       else
+       } else {
                pppol2tp_seq_session_show(m, pd->session);
+               if (pd->session->deref)
+                       pd->session->deref(pd->session);
+               l2tp_session_dec_refcount(pd->session);
+       }
 
 out:
        return 0;
@@ -1843,4 +1856,4 @@ MODULE_DESCRIPTION("PPP over L2TP over UDP");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(PPPOL2TP_DRV_VERSION);
 MODULE_ALIAS_NET_PF_PROTO(PF_PPPOX, PX_PROTO_OL2TP);
-MODULE_ALIAS_L2TP_PWTYPE(11);
+MODULE_ALIAS_L2TP_PWTYPE(7);
index 40813dd3301c600978374e259953ca5d661022ce..5bb0c501281954dfe656c5e886c9032b958061be 100644 (file)
@@ -718,7 +718,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
        ieee80211_recalc_ps(local);
 
        if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
-           sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+           sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+           local->ops->wake_tx_queue) {
                /* XXX: for AP_VLAN, actually track AP queues */
                netif_tx_start_all_queues(dev);
        } else if (dev) {
index da9df2d56e669ed33d8126a484788e600dcaaabd..22fc32143e9c4ae17bc48edc68eb0decf11d931c 100644 (file)
@@ -290,6 +290,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
        BUG_ON(notify != new);
        RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
+       /* synchronize_rcu() is called from ctnetlink_exit. */
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
 
@@ -326,6 +327,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
        BUG_ON(notify != new);
        RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
+       /* synchronize_rcu() is called from ctnetlink_exit. */
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
 
index 4b2e1fb28bb438d695715fc492f52bf7809ade5d..d80073037856db9081e57b7c88e482a61b94a45f 100644 (file)
@@ -57,7 +57,7 @@ void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
        hlist_del_rcu(&exp->hnode);
        net->ct.expect_count--;
 
-       hlist_del(&exp->lnode);
+       hlist_del_rcu(&exp->lnode);
        master_help->expecting[exp->class]--;
 
        nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
@@ -363,7 +363,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        /* two references : one for hash insert, one for the timer */
        atomic_add(2, &exp->use);
 
-       hlist_add_head(&exp->lnode, &master_help->expectations);
+       hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
        master_help->expecting[exp->class]++;
 
        hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
index 02bcf00c24920b332401cd7c82ab6ced951659f5..008299b7f78fe3754946cf0a58029090234ad905 100644 (file)
@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
 
        rcu_read_lock();
        t = rcu_dereference(nf_ct_ext_types[id]);
-       BUG_ON(t == NULL);
+       if (!t) {
+               rcu_read_unlock();
+               return NULL;
+       }
+
        off = ALIGN(sizeof(struct nf_ct_ext), t->align);
        len = off + t->len + var_alloc_len;
        alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
 
        rcu_read_lock();
        t = rcu_dereference(nf_ct_ext_types[id]);
-       BUG_ON(t == NULL);
+       if (!t) {
+               rcu_read_unlock();
+               return NULL;
+       }
 
        newoff = ALIGN(old->len, t->align);
        newlen = newoff + t->len + var_alloc_len;
@@ -175,6 +182,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
        RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
        update_alloc_size(type);
        mutex_unlock(&nf_ct_ext_type_mutex);
-       rcu_barrier(); /* Wait for completion of call_rcu()'s */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
index 6dc44d9b41900bea12f487e5a044259e92a47f7e..4eeb3418366ad5473945395346b6e4e5fc213fbc 100644 (file)
@@ -158,16 +158,25 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
 {
        struct nf_conntrack_helper *h;
 
+       rcu_read_lock();
+
        h = __nf_conntrack_helper_find(name, l3num, protonum);
 #ifdef CONFIG_MODULES
        if (h == NULL) {
-               if (request_module("nfct-helper-%s", name) == 0)
+               rcu_read_unlock();
+               if (request_module("nfct-helper-%s", name) == 0) {
+                       rcu_read_lock();
                        h = __nf_conntrack_helper_find(name, l3num, protonum);
+               } else {
+                       return h;
+               }
        }
 #endif
        if (h != NULL && !try_module_get(h->me))
                h = NULL;
 
+       rcu_read_unlock();
+
        return h;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
@@ -311,38 +320,36 @@ void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
 
+/* Caller should hold the rcu lock */
 struct nf_ct_helper_expectfn *
 nf_ct_helper_expectfn_find_by_name(const char *name)
 {
        struct nf_ct_helper_expectfn *cur;
        bool found = false;
 
-       rcu_read_lock();
        list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
                if (!strcmp(cur->name, name)) {
                        found = true;
                        break;
                }
        }
-       rcu_read_unlock();
        return found ? cur : NULL;
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
 
+/* Caller should hold the rcu lock */
 struct nf_ct_helper_expectfn *
 nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
 {
        struct nf_ct_helper_expectfn *cur;
        bool found = false;
 
-       rcu_read_lock();
        list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
                if (cur->expectfn == symbol) {
                        found = true;
                        break;
                }
        }
-       rcu_read_unlock();
        return found ? cur : NULL;
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
index 6806b5e73567bb0220b248682abed3e5e34f780e..dc7dfd68fafe5d8db341488ac7d9ad5bf8f80b46 100644 (file)
@@ -1488,11 +1488,16 @@ static int ctnetlink_change_helper(struct nf_conn *ct,
                 * treat the second attempt as a no-op instead of returning
                 * an error.
                 */
-               if (help && help->helper &&
-                   !strcmp(help->helper->name, helpname))
-                       return 0;
-               else
-                       return -EBUSY;
+               err = -EBUSY;
+               if (help) {
+                       rcu_read_lock();
+                       helper = rcu_dereference(help->helper);
+                       if (helper && !strcmp(helper->name, helpname))
+                               err = 0;
+                       rcu_read_unlock();
+               }
+
+               return err;
        }
 
        if (!strcmp(helpname, "")) {
@@ -1929,9 +1934,9 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl,
 
                        err = 0;
                        if (test_bit(IPS_EXPECTED_BIT, &ct->status))
-                               events = IPCT_RELATED;
+                               events = 1 << IPCT_RELATED;
                        else
-                               events = IPCT_NEW;
+                               events = 1 << IPCT_NEW;
 
                        if (cda[CTA_LABELS] &&
                            ctnetlink_attach_labels(ct, cda) == 0)
@@ -2675,8 +2680,8 @@ ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        last = (struct nf_conntrack_expect *)cb->args[1];
        for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
 restart:
-               hlist_for_each_entry(exp, &nf_ct_expect_hash[cb->args[0]],
-                                    hnode) {
+               hlist_for_each_entry_rcu(exp, &nf_ct_expect_hash[cb->args[0]],
+                                        hnode) {
                        if (l3proto && exp->tuple.src.l3num != l3proto)
                                continue;
 
@@ -2727,7 +2732,7 @@ ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
        last = (struct nf_conntrack_expect *)cb->args[1];
 restart:
-       hlist_for_each_entry(exp, &help->expectations, lnode) {
+       hlist_for_each_entry_rcu(exp, &help->expectations, lnode) {
                if (l3proto && exp->tuple.src.l3num != l3proto)
                        continue;
                if (cb->args[1]) {
@@ -2789,6 +2794,12 @@ static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
                return -ENOENT;
 
        ct = nf_ct_tuplehash_to_ctrack(h);
+       /* No expectation linked to this connection tracking. */
+       if (!nfct_help(ct)) {
+               nf_ct_put(ct);
+               return 0;
+       }
+
        c.data = ct;
 
        err = netlink_dump_start(ctnl, skb, nlh, &c);
@@ -3133,23 +3144,27 @@ ctnetlink_create_expect(struct net *net,
                return -ENOENT;
        ct = nf_ct_tuplehash_to_ctrack(h);
 
+       rcu_read_lock();
        if (cda[CTA_EXPECT_HELP_NAME]) {
                const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
 
                helper = __nf_conntrack_helper_find(helpname, u3,
                                                    nf_ct_protonum(ct));
                if (helper == NULL) {
+                       rcu_read_unlock();
 #ifdef CONFIG_MODULES
                        if (request_module("nfct-helper-%s", helpname) < 0) {
                                err = -EOPNOTSUPP;
                                goto err_ct;
                        }
+                       rcu_read_lock();
                        helper = __nf_conntrack_helper_find(helpname, u3,
                                                            nf_ct_protonum(ct));
                        if (helper) {
                                err = -EAGAIN;
-                               goto err_ct;
+                               goto err_rcu;
                        }
+                       rcu_read_unlock();
 #endif
                        err = -EOPNOTSUPP;
                        goto err_ct;
@@ -3159,11 +3174,13 @@ ctnetlink_create_expect(struct net *net,
        exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
        if (IS_ERR(exp)) {
                err = PTR_ERR(exp);
-               goto err_ct;
+               goto err_rcu;
        }
 
        err = nf_ct_expect_related_report(exp, portid, report);
        nf_ct_expect_put(exp);
+err_rcu:
+       rcu_read_unlock();
 err_ct:
        nf_ct_put(ct);
        return err;
@@ -3442,6 +3459,7 @@ static void __exit ctnetlink_exit(void)
 #ifdef CONFIG_NETFILTER_NETLINK_GLUE_CT
        RCU_INIT_POINTER(nfnl_ct_hook, NULL);
 #endif
+       synchronize_rcu();
 }
 
 module_init(ctnetlink_init);
index 94b14c5a8b177277e218790da32eafebff3be963..82802e4a6640817e64eb3f3a6ffcb875ad14a747 100644 (file)
@@ -903,6 +903,8 @@ static void __exit nf_nat_cleanup(void)
 #ifdef CONFIG_XFRM
        RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
 #endif
+       synchronize_rcu();
+
        for (i = 0; i < NFPROTO_NUMPROTO; i++)
                kfree(nf_nat_l4protos[i]);
 
index d43869879fcfcea6ca23e3a579bd21f8aa855b10..86067560a3184f26c521e35b8f63e4952c95955e 100644 (file)
@@ -101,11 +101,13 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
                rcu_read_lock();
                idev = __in6_dev_get(skb->dev);
                if (idev != NULL) {
+                       read_lock_bh(&idev->lock);
                        list_for_each_entry(ifa, &idev->addr_list, if_list) {
                                newdst = ifa->addr;
                                addr = true;
                                break;
                        }
+                       read_unlock_bh(&idev->lock);
                }
                rcu_read_unlock();
 
index de8782345c863777c8cedf95a5ccf60504e9586f..d45558178da5b62a8ad7c896e096c1862c512091 100644 (file)
@@ -32,6 +32,13 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
 
+struct nfnl_cthelper {
+       struct list_head                list;
+       struct nf_conntrack_helper      helper;
+};
+
+static LIST_HEAD(nfnl_cthelper_list);
+
 static int
 nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
                        struct nf_conn *ct, enum ip_conntrack_info ctinfo)
@@ -161,6 +168,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
        int i, ret;
        struct nf_conntrack_expect_policy *expect_policy;
        struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+       unsigned int class_max;
 
        ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
                               nfnl_cthelper_expect_policy_set);
@@ -170,19 +178,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
        if (!tb[NFCTH_POLICY_SET_NUM])
                return -EINVAL;
 
-       helper->expect_class_max =
-               ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
-
-       if (helper->expect_class_max != 0 &&
-           helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+       class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+       if (class_max == 0)
+               return -EINVAL;
+       if (class_max > NF_CT_MAX_EXPECT_CLASSES)
                return -EOVERFLOW;
 
        expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
-                               helper->expect_class_max, GFP_KERNEL);
+                               class_max, GFP_KERNEL);
        if (expect_policy == NULL)
                return -ENOMEM;
 
-       for (i=0; i<helper->expect_class_max; i++) {
+       for (i = 0; i < class_max; i++) {
                if (!tb[NFCTH_POLICY_SET+i])
                        goto err;
 
@@ -191,6 +198,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
                if (ret < 0)
                        goto err;
        }
+
+       helper->expect_class_max = class_max - 1;
        helper->expect_policy = expect_policy;
        return 0;
 err:
@@ -203,18 +212,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
                     struct nf_conntrack_tuple *tuple)
 {
        struct nf_conntrack_helper *helper;
+       struct nfnl_cthelper *nfcth;
        int ret;
 
        if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
                return -EINVAL;
 
-       helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
-       if (helper == NULL)
+       nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
+       if (nfcth == NULL)
                return -ENOMEM;
+       helper = &nfcth->helper;
 
        ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
        if (ret < 0)
-               goto err;
+               goto err1;
 
        strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
        helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
@@ -245,14 +256,100 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
 
        ret = nf_conntrack_helper_register(helper);
        if (ret < 0)
-               goto err;
+               goto err2;
 
+       list_add_tail(&nfcth->list, &nfnl_cthelper_list);
        return 0;
-err:
-       kfree(helper);
+err2:
+       kfree(helper->expect_policy);
+err1:
+       kfree(nfcth);
        return ret;
 }
 
+static int
+nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
+                               struct nf_conntrack_expect_policy *new_policy,
+                               const struct nlattr *attr)
+{
+       struct nlattr *tb[NFCTH_POLICY_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
+                              nfnl_cthelper_expect_pol);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFCTH_POLICY_NAME] ||
+           !tb[NFCTH_POLICY_EXPECT_MAX] ||
+           !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+               return -EINVAL;
+
+       if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
+               return -EBUSY;
+
+       new_policy->max_expected =
+               ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+       new_policy->timeout =
+               ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
+
+       return 0;
+}
+
+static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
+                                          struct nf_conntrack_helper *helper)
+{
+       struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
+       struct nf_conntrack_expect_policy *policy;
+       int i, err;
+
+       /* Check first that all policy attributes are well-formed, so we don't
+        * leave things in inconsistent state on errors.
+        */
+       for (i = 0; i < helper->expect_class_max + 1; i++) {
+
+               if (!tb[NFCTH_POLICY_SET + i])
+                       return -EINVAL;
+
+               err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
+                                                     &new_policy[i],
+                                                     tb[NFCTH_POLICY_SET + i]);
+               if (err < 0)
+                       return err;
+       }
+       /* Now we can safely update them. */
+       for (i = 0; i < helper->expect_class_max + 1; i++) {
+               policy = (struct nf_conntrack_expect_policy *)
+                               &helper->expect_policy[i];
+               policy->max_expected = new_policy->max_expected;
+               policy->timeout = new_policy->timeout;
+       }
+
+       return 0;
+}
+
+static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
+                                      const struct nlattr *attr)
+{
+       struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
+       unsigned int class_max;
+       int err;
+
+       err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+                              nfnl_cthelper_expect_policy_set);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFCTH_POLICY_SET_NUM])
+               return -EINVAL;
+
+       class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+       if (helper->expect_class_max + 1 != class_max)
+               return -EBUSY;
+
+       return nfnl_cthelper_update_policy_all(tb, helper);
+}
+
 static int
 nfnl_cthelper_update(const struct nlattr * const tb[],
                     struct nf_conntrack_helper *helper)
@@ -263,8 +360,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
                return -EBUSY;
 
        if (tb[NFCTH_POLICY]) {
-               ret = nfnl_cthelper_parse_expect_policy(helper,
-                                                       tb[NFCTH_POLICY]);
+               ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
                if (ret < 0)
                        return ret;
        }
@@ -293,7 +389,8 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
        const char *helper_name;
        struct nf_conntrack_helper *cur, *helper = NULL;
        struct nf_conntrack_tuple tuple;
-       int ret = 0, i;
+       struct nfnl_cthelper *nlcth;
+       int ret = 0;
 
        if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
                return -EINVAL;
@@ -304,31 +401,22 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
        if (ret < 0)
                return ret;
 
-       rcu_read_lock();
-       for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
-               hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+       list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
 
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+               if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+                       continue;
 
-                       if (strncmp(cur->name, helper_name,
-                                       NF_CT_HELPER_NAME_LEN) != 0)
-                               continue;
+               if ((tuple.src.l3num != cur->tuple.src.l3num ||
+                    tuple.dst.protonum != cur->tuple.dst.protonum))
+                       continue;
 
-                       if ((tuple.src.l3num != cur->tuple.src.l3num ||
-                            tuple.dst.protonum != cur->tuple.dst.protonum))
-                               continue;
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
 
-                       if (nlh->nlmsg_flags & NLM_F_EXCL) {
-                               ret = -EEXIST;
-                               goto err;
-                       }
-                       helper = cur;
-                       break;
-               }
+               helper = cur;
+               break;
        }
-       rcu_read_unlock();
 
        if (helper == NULL)
                ret = nfnl_cthelper_create(tb, &tuple);
@@ -336,9 +424,6 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
                ret = nfnl_cthelper_update(tb, helper);
 
        return ret;
-err:
-       rcu_read_unlock();
-       return ret;
 }
 
 static int
@@ -377,10 +462,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
                goto nla_put_failure;
 
        if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
-                        htonl(helper->expect_class_max)))
+                        htonl(helper->expect_class_max + 1)))
                goto nla_put_failure;
 
-       for (i=0; i<helper->expect_class_max; i++) {
+       for (i = 0; i < helper->expect_class_max + 1; i++) {
                nest_parms2 = nla_nest_start(skb,
                                (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
                if (nest_parms2 == NULL)
@@ -502,11 +587,12 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
                             struct sk_buff *skb, const struct nlmsghdr *nlh,
                             const struct nlattr * const tb[])
 {
-       int ret = -ENOENT, i;
+       int ret = -ENOENT;
        struct nf_conntrack_helper *cur;
        struct sk_buff *skb2;
        char *helper_name = NULL;
        struct nf_conntrack_tuple tuple;
+       struct nfnl_cthelper *nlcth;
        bool tuple_set = false;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -527,45 +613,39 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
                tuple_set = true;
        }
 
-       for (i = 0; i < nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+       list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
+               if (helper_name &&
+                   strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+                       continue;
 
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+               if (tuple_set &&
+                   (tuple.src.l3num != cur->tuple.src.l3num ||
+                    tuple.dst.protonum != cur->tuple.dst.protonum))
+                       continue;
 
-                       if (helper_name && strncmp(cur->name, helper_name,
-                                               NF_CT_HELPER_NAME_LEN) != 0) {
-                               continue;
-                       }
-                       if (tuple_set &&
-                           (tuple.src.l3num != cur->tuple.src.l3num ||
-                            tuple.dst.protonum != cur->tuple.dst.protonum))
-                               continue;
-
-                       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-                       if (skb2 == NULL) {
-                               ret = -ENOMEM;
-                               break;
-                       }
+               skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+               if (skb2 == NULL) {
+                       ret = -ENOMEM;
+                       break;
+               }
 
-                       ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
-                                               nlh->nlmsg_seq,
-                                               NFNL_MSG_TYPE(nlh->nlmsg_type),
-                                               NFNL_MSG_CTHELPER_NEW, cur);
-                       if (ret <= 0) {
-                               kfree_skb(skb2);
-                               break;
-                       }
+               ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+                                             nlh->nlmsg_seq,
+                                             NFNL_MSG_TYPE(nlh->nlmsg_type),
+                                             NFNL_MSG_CTHELPER_NEW, cur);
+               if (ret <= 0) {
+                       kfree_skb(skb2);
+                       break;
+               }
 
-                       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
-                                               MSG_DONTWAIT);
-                       if (ret > 0)
-                               ret = 0;
+               ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+                                     MSG_DONTWAIT);
+               if (ret > 0)
+                       ret = 0;
 
-                       /* this avoids a loop in nfnetlink. */
-                       return ret == -EAGAIN ? -ENOBUFS : ret;
-               }
+               /* this avoids a loop in nfnetlink. */
+               return ret == -EAGAIN ? -ENOBUFS : ret;
        }
        return ret;
 }
@@ -576,10 +656,10 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
 {
        char *helper_name = NULL;
        struct nf_conntrack_helper *cur;
-       struct hlist_node *tmp;
        struct nf_conntrack_tuple tuple;
        bool tuple_set = false, found = false;
-       int i, j = 0, ret;
+       struct nfnl_cthelper *nlcth, *n;
+       int j = 0, ret;
 
        if (tb[NFCTH_NAME])
                helper_name = nla_data(tb[NFCTH_NAME]);
@@ -592,28 +672,27 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
                tuple_set = true;
        }
 
-       for (i = 0; i < nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-                                                               hnode) {
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+       list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
+               j++;
 
-                       j++;
+               if (helper_name &&
+                   strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
+                       continue;
 
-                       if (helper_name && strncmp(cur->name, helper_name,
-                                               NF_CT_HELPER_NAME_LEN) != 0) {
-                               continue;
-                       }
-                       if (tuple_set &&
-                           (tuple.src.l3num != cur->tuple.src.l3num ||
-                            tuple.dst.protonum != cur->tuple.dst.protonum))
-                               continue;
+               if (tuple_set &&
+                   (tuple.src.l3num != cur->tuple.src.l3num ||
+                    tuple.dst.protonum != cur->tuple.dst.protonum))
+                       continue;
 
-                       found = true;
-                       nf_conntrack_helper_unregister(cur);
-               }
+               found = true;
+               nf_conntrack_helper_unregister(cur);
+               kfree(cur->expect_policy);
+
+               list_del(&nlcth->list);
+               kfree(nlcth);
        }
+
        /* Make sure we return success if we flush and there is no helpers */
        return (found || j == 0) ? 0 : -ENOENT;
 }
@@ -662,20 +741,16 @@ err_out:
 static void __exit nfnl_cthelper_exit(void)
 {
        struct nf_conntrack_helper *cur;
-       struct hlist_node *tmp;
-       int i;
+       struct nfnl_cthelper *nlcth, *n;
 
        nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
 
-       for (i=0; i<nf_ct_helper_hsize; i++) {
-               hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
-                                                                       hnode) {
-                       /* skip non-userspace conntrack helpers. */
-                       if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
-                               continue;
+       list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
+               cur = &nlcth->helper;
 
-                       nf_conntrack_helper_unregister(cur);
-               }
+               nf_conntrack_helper_unregister(cur);
+               kfree(cur->expect_policy);
+               kfree(nlcth);
        }
 }
 
index 139e0867e56e9e606942c98e75148eb17b2ec7eb..47d6656c9119fd5c75fb1fb5f0bfd4bb49f1c38b 100644 (file)
@@ -646,8 +646,8 @@ static void __exit cttimeout_exit(void)
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
        RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+       synchronize_rcu();
 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
-       rcu_barrier();
 }
 
 module_init(cttimeout_init);
index 3ee0b8a000a41ec901faeb239e752a126428dc4d..933509ebf3d3e2e84aecd55fd7f19f21f69e46d4 100644 (file)
@@ -443,7 +443,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        skb = alloc_skb(size, GFP_ATOMIC);
        if (!skb) {
                skb_tx_error(entskb);
-               return NULL;
+               goto nlmsg_failure;
        }
 
        nlh = nlmsg_put(skb, 0, 0,
@@ -452,7 +452,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        if (!nlh) {
                skb_tx_error(entskb);
                kfree_skb(skb);
-               return NULL;
+               goto nlmsg_failure;
        }
        nfmsg = nlmsg_data(nlh);
        nfmsg->nfgen_family = entry->state.pf;
@@ -598,12 +598,17 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
        }
 
        nlh->nlmsg_len = skb->len;
+       if (seclen)
+               security_release_secctx(secdata, seclen);
        return skb;
 
 nla_put_failure:
        skb_tx_error(entskb);
        kfree_skb(skb);
        net_err_ratelimited("nf_queue: error creating packet message\n");
+nlmsg_failure:
+       if (seclen)
+               security_release_secctx(secdata, seclen);
        return NULL;
 }
 
index eb2721af898dbb54ab099f4878d7b2673cbb9522..c4dad1254ead01818fb5b2c0474b26f74762fee2 100644 (file)
@@ -21,6 +21,7 @@ struct nft_hash {
        enum nft_registers      sreg:8;
        enum nft_registers      dreg:8;
        u8                      len;
+       bool                    autogen_seed:1;
        u32                     modulus;
        u32                     seed;
        u32                     offset;
@@ -82,10 +83,12 @@ static int nft_hash_init(const struct nft_ctx *ctx,
        if (priv->offset + priv->modulus - 1 < priv->offset)
                return -EOVERFLOW;
 
-       if (tb[NFTA_HASH_SEED])
+       if (tb[NFTA_HASH_SEED]) {
                priv->seed = ntohl(nla_get_be32(tb[NFTA_HASH_SEED]));
-       else
+       } else {
+               priv->autogen_seed = true;
                get_random_bytes(&priv->seed, sizeof(priv->seed));
+       }
 
        return nft_validate_register_load(priv->sreg, len) &&
               nft_validate_register_store(ctx, priv->dreg, NULL,
@@ -105,7 +108,8 @@ static int nft_hash_dump(struct sk_buff *skb,
                goto nla_put_failure;
        if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus)))
                goto nla_put_failure;
-       if (nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
+       if (!priv->autogen_seed &&
+           nla_put_be32(skb, NFTA_HASH_SEED, htonl(priv->seed)))
                goto nla_put_failure;
        if (priv->offset != 0)
                if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset)))
index 27241a767f17b4b27d24095a31e5e9a2d3e29ce4..c64aca611ac5c5f81ad7c925652bbb90554763ac 100644 (file)
@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
        tcp_hdrlen = tcph->doff * 4;
 
-       if (len < tcp_hdrlen)
+       if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
                return -1;
 
        if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        if (len > tcp_hdrlen)
                return 0;
 
+       /* tcph->doff has 4 bits, do not wrap it to 0 */
+       if (tcp_hdrlen >= 15 * 4)
+               return 0;
+
        /*
         * MSS Option not found ?! add it..
         */
index 80cb7babeb6427d5768f9e636d5d9633d46f8413..df7f1df0033090c0cd76f3afda43e5159a509791 100644 (file)
@@ -393,7 +393,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
 
        rcu_read_lock();
        indev = __in6_dev_get(skb->dev);
-       if (indev)
+       if (indev) {
+               read_lock_bh(&indev->lock);
                list_for_each_entry(ifa, &indev->addr_list, if_list) {
                        if (ifa->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
                                continue;
@@ -401,6 +402,8 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
                        laddr = &ifa->addr;
                        break;
                }
+               read_unlock_bh(&indev->lock);
+       }
        rcu_read_unlock();
 
        return laddr ? laddr : daddr;
index e0a87776a010a3be352c0b2b71859e56c75a6b6f..7b2c2fce408a02d4251f03a2e3f0b4d9e7fccb80 100644 (file)
@@ -643,8 +643,8 @@ static bool skb_nfct_cached(struct net *net,
                 */
                if (nf_ct_is_confirmed(ct))
                        nf_ct_delete(ct, 0, 0);
-               else
-                       nf_conntrack_put(&ct->ct_general);
+
+               nf_conntrack_put(&ct->ct_general);
                nf_ct_set(skb, NULL, 0);
                return false;
        }
index 9d4bb8eb63f25c2e9e9e5f4190e6c943a32be547..3f76cb765e5bb71d18c3e9a4c220ed9fa3906186 100644 (file)
@@ -527,7 +527,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 
        /* Link layer. */
        clear_vlan(key);
-       if (key->mac_proto == MAC_PROTO_NONE) {
+       if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
                if (unlikely(eth_type_vlan(skb->protocol)))
                        return -EINVAL;
 
@@ -745,7 +745,13 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
 
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
 {
-       return key_extract(skb, key);
+       int res;
+
+       res = key_extract(skb, key);
+       if (!res)
+               key->mac_proto &= ~SW_FLOW_KEY_INVALID;
+
+       return res;
 }
 
 static int key_extract_mac_proto(struct sk_buff *skb)
index a0dbe7ca8f724cd33b675ea15fb263d82041994c..8489beff5c25c971067f38833ed4a790a98dd86e 100644 (file)
@@ -3665,6 +3665,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                        return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
+               if (val > INT_MAX)
+                       return -EINVAL;
                po->tp_reserve = val;
                return 0;
        }
@@ -4193,8 +4195,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
                        goto out;
                if (po->tp_version >= TPACKET_V3 &&
-                   (int)(req->tp_block_size -
-                         BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
+                   req->tp_block_size <=
+                         BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
                        goto out;
                if (unlikely(req->tp_frame_size < po->tp_hdrlen +
                                        po->tp_reserve))
@@ -4205,6 +4207,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
                if (unlikely(rb->frames_per_block == 0))
                        goto out;
+               if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+                       goto out;
                if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
                                        req->tp_frame_nr))
                        goto out;
index b052b27a984e39c244c94132f1162a7033e5cc63..1a2f9e964330a5cd0c0b9a5cac91807221b0ffd9 100644 (file)
@@ -794,7 +794,7 @@ static void attach_default_qdiscs(struct net_device *dev)
                }
        }
 #ifdef CONFIG_NET_SCHED
-       if (dev->qdisc)
+       if (dev->qdisc != &noop_qdisc)
                qdisc_hash_add(dev->qdisc);
 #endif
 }
index 0439a1a6836784fd5096d85a4217980cb5c49690..a9708da28eb53ff2987264c6c7d7ca6ec2ff09e9 100644 (file)
@@ -246,6 +246,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        if (!sctp_ulpq_init(&asoc->ulpq, asoc))
                goto fail_init;
 
+       if (sctp_stream_new(asoc, gfp))
+               goto fail_init;
+
        /* Assume that peer would support both address types unless we are
         * told otherwise.
         */
@@ -264,7 +267,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        /* AUTH related initializations */
        INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
        if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp))
-               goto fail_init;
+               goto stream_free;
 
        asoc->active_key_id = ep->active_key_id;
        asoc->prsctp_enable = ep->prsctp_enable;
@@ -287,6 +290,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 
        return asoc;
 
+stream_free:
+       sctp_stream_free(asoc->stream);
 fail_init:
        sock_put(asoc->base.sk);
        sctp_endpoint_put(asoc->ep);
@@ -1407,7 +1412,7 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
 /* Update the association's pmtu and frag_point by going through all the
  * transports. This routine is called when a transport's PMTU has changed.
  */
-void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
+void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
 {
        struct sctp_transport *t;
        __u32 pmtu = 0;
@@ -1419,8 +1424,8 @@ void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
        list_for_each_entry(t, &asoc->peer.transport_addr_list,
                                transports) {
                if (t->pmtu_pending && t->dst) {
-                       sctp_transport_update_pmtu(sk, t,
-                                                  SCTP_TRUNC4(dst_mtu(t->dst)));
+                       sctp_transport_update_pmtu(
+                                       t, SCTP_TRUNC4(dst_mtu(t->dst)));
                        t->pmtu_pending = 0;
                }
                if (!pmtu || (t->pathmtu < pmtu))
index 2a28ab20487f03f61ed8d74cb511bce2973ce242..0e06a278d2a911e2360e75e983b623e453284b7b 100644 (file)
@@ -401,10 +401,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
 
        if (t->param_flags & SPP_PMTUD_ENABLE) {
                /* Update transports view of the MTU */
-               sctp_transport_update_pmtu(sk, t, pmtu);
+               sctp_transport_update_pmtu(t, pmtu);
 
                /* Update association pmtu. */
-               sctp_assoc_sync_pmtu(sk, asoc);
+               sctp_assoc_sync_pmtu(asoc);
        }
 
        /* Retransmit with the new pmtu setting.
index 1224421036b3e59c4dba1dd5d672923b55c7923c..1409a875ad8e22172a4b6ec08ce339da3c8b80ab 100644 (file)
@@ -86,43 +86,53 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
 {
        struct sctp_transport *tp = packet->transport;
        struct sctp_association *asoc = tp->asoc;
+       struct sock *sk;
 
        pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
-
        packet->vtag = vtag;
 
-       if (asoc && tp->dst) {
-               struct sock *sk = asoc->base.sk;
-
-               rcu_read_lock();
-               if (__sk_dst_get(sk) != tp->dst) {
-                       dst_hold(tp->dst);
-                       sk_setup_caps(sk, tp->dst);
-               }
-
-               if (sk_can_gso(sk)) {
-                       struct net_device *dev = tp->dst->dev;
+       /* do the following jobs only once for a flush schedule */
+       if (!sctp_packet_empty(packet))
+               return;
 
-                       packet->max_size = dev->gso_max_size;
-               } else {
-                       packet->max_size = asoc->pathmtu;
-               }
-               rcu_read_unlock();
+       /* set packet max_size with pathmtu */
+       packet->max_size = tp->pathmtu;
+       if (!asoc)
+               return;
 
-       } else {
-               packet->max_size = tp->pathmtu;
+       /* update dst or transport pathmtu if in need */
+       sk = asoc->base.sk;
+       if (!sctp_transport_dst_check(tp)) {
+               sctp_transport_route(tp, NULL, sctp_sk(sk));
+               if (asoc->param_flags & SPP_PMTUD_ENABLE)
+                       sctp_assoc_sync_pmtu(asoc);
+       } else if (!sctp_transport_pmtu_check(tp)) {
+               if (asoc->param_flags & SPP_PMTUD_ENABLE)
+                       sctp_assoc_sync_pmtu(asoc);
        }
 
-       if (ecn_capable && sctp_packet_empty(packet)) {
-               struct sctp_chunk *chunk;
+       /* If there a is a prepend chunk stick it on the list before
+        * any other chunks get appended.
+        */
+       if (ecn_capable) {
+               struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
 
-               /* If there a is a prepend chunk stick it on the list before
-                * any other chunks get appended.
-                */
-               chunk = sctp_get_ecne_prepend(asoc);
                if (chunk)
                        sctp_packet_append_chunk(packet, chunk);
        }
+
+       if (!tp->dst)
+               return;
+
+       /* set packet max_size with gso_max_size if gso is enabled*/
+       rcu_read_lock();
+       if (__sk_dst_get(sk) != tp->dst) {
+               dst_hold(tp->dst);
+               sk_setup_caps(sk, tp->dst);
+       }
+       packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
+                                         : asoc->pathmtu;
+       rcu_read_unlock();
 }
 
 /* Initialize the packet structure. */
@@ -582,12 +592,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        sh->vtag = htonl(packet->vtag);
        sh->checksum = 0;
 
-       /* update dst if in need */
-       if (!sctp_transport_dst_check(tp)) {
-               sctp_transport_route(tp, NULL, sctp_sk(sk));
-               if (asoc && asoc->param_flags & SPP_PMTUD_ENABLE)
-                       sctp_assoc_sync_pmtu(sk, asoc);
-       }
+       /* drop packet if no dst */
        dst = dst_clone(tp->dst);
        if (!dst) {
                IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
@@ -704,7 +709,7 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
         */
 
        if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
-           !chunk->msg->force_delay)
+           !asoc->force_delay)
                /* Nothing unacked */
                return SCTP_XMIT_OK;
 
index 025ccff670720bd38b9cc9bd995b87e134c9f931..8081476ed313cca8ad8f9876ec5ef5dc8fd68207 100644 (file)
@@ -1026,8 +1026,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                        /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
                         * stream identifier.
                         */
-                       if (chunk->sinfo.sinfo_stream >=
-                           asoc->c.sinit_num_ostreams) {
+                       if (chunk->sinfo.sinfo_stream >= asoc->stream->outcnt) {
 
                                /* Mark as failed send. */
                                sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
index 206377fe91ec4db4a59b24bf45daa0e42be0015b..a0b29d43627f48425e83d7d3c9698d99315dd869 100644 (file)
@@ -361,8 +361,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        sctp_seq_dump_remote_addrs(seq, assoc);
        seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d "
                   "%8d %8d %8d %8d",
-               assoc->hbinterval, assoc->c.sinit_max_instreams,
-               assoc->c.sinit_num_ostreams, assoc->max_retrans,
+               assoc->hbinterval, assoc->stream->incnt,
+               assoc->stream->outcnt, assoc->max_retrans,
                assoc->init_retries, assoc->shutdown_retries,
                assoc->rtx_data_chunks,
                atomic_read(&sk->sk_wmem_alloc),
index 969a30c7bb5431530b293c8ed51f2fdea61dd8bf..118faff6a332ee24caf3d772b6f00641128ef104 100644 (file)
@@ -2460,15 +2460,10 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
         * association.
         */
        if (!asoc->temp) {
-               int error;
-
-               asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
-                                              asoc->c.sinit_num_ostreams, gfp);
-               if (!asoc->stream)
+               if (sctp_stream_init(asoc, gfp))
                        goto clean_up;
 
-               error = sctp_assoc_set_id(asoc, gfp);
-               if (error)
+               if (sctp_assoc_set_id(asoc, gfp))
                        goto clean_up;
        }
 
index e03bb1aab4d095b65259c33f4fba6990e90f586b..24c6ccce7539097728f3733ae5ecc037562cefcf 100644 (file)
@@ -3946,7 +3946,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
 
        /* Silently discard the chunk if stream-id is not valid */
        sctp_walk_fwdtsn(skip, chunk) {
-               if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+               if (ntohs(skip->stream) >= asoc->stream->incnt)
                        goto discard_noforce;
        }
 
@@ -4017,7 +4017,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
 
        /* Silently discard the chunk if stream-id is not valid */
        sctp_walk_fwdtsn(skip, chunk) {
-               if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
+               if (ntohs(skip->stream) >= asoc->stream->incnt)
                        goto gen_shutdown;
        }
 
@@ -6353,7 +6353,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * and discard the DATA chunk.
         */
        sid = ntohs(data_hdr->stream);
-       if (sid >= asoc->c.sinit_max_instreams) {
+       if (sid >= asoc->stream->incnt) {
                /* Mark tsn as received even though we drop it */
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
 
index 0f378ea2ae38828d75dc215abdbc258e75cec431..d9d4c92e06b312e6c300afd8f3d5db33161fd9f7 100644 (file)
@@ -1907,7 +1907,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
        }
 
        if (asoc->pmtu_pending)
-               sctp_assoc_pending_pmtu(sk, asoc);
+               sctp_assoc_pending_pmtu(asoc);
 
        /* If fragmentation is disabled and the message length exceeds the
         * association fragmentation point, return EMSGSIZE.  The I-D
@@ -1920,7 +1920,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
        }
 
        /* Check for invalid stream. */
-       if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
+       if (sinfo->sinfo_stream >= asoc->stream->outcnt) {
                err = -EINVAL;
                goto out_free;
        }
@@ -1965,7 +1965,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
                err = PTR_ERR(datamsg);
                goto out_free;
        }
-       datamsg->force_delay = !!(msg->msg_flags & MSG_MORE);
+       asoc->force_delay = !!(msg->msg_flags & MSG_MORE);
 
        /* Now send the (possibly) fragmented message. */
        list_for_each_entry(chunk, &datamsg->chunks, frag_list) {
@@ -2435,7 +2435,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
                if (trans) {
                        trans->pathmtu = params->spp_pathmtu;
-                       sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+                       sctp_assoc_sync_pmtu(asoc);
                } else if (asoc) {
                        asoc->pathmtu = params->spp_pathmtu;
                } else {
@@ -2451,7 +2451,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                                (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
                        if (update) {
                                sctp_transport_pmtu(trans, sctp_opt2sk(sp));
-                               sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
+                               sctp_assoc_sync_pmtu(asoc);
                        }
                } else if (asoc) {
                        asoc->param_flags =
@@ -4461,8 +4461,8 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
        info->sctpi_rwnd = asoc->a_rwnd;
        info->sctpi_unackdata = asoc->unack_data;
        info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
-       info->sctpi_instrms = asoc->c.sinit_max_instreams;
-       info->sctpi_outstrms = asoc->c.sinit_num_ostreams;
+       info->sctpi_instrms = asoc->stream->incnt;
+       info->sctpi_outstrms = asoc->stream->outcnt;
        list_for_each(pos, &asoc->base.inqueue.in_chunk_list)
                info->sctpi_inqueue++;
        list_for_each(pos, &asoc->outqueue.out_chunk_list)
@@ -4691,8 +4691,8 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
        status.sstat_unackdata = asoc->unack_data;
 
        status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
-       status.sstat_instrms = asoc->c.sinit_max_instreams;
-       status.sstat_outstrms = asoc->c.sinit_num_ostreams;
+       status.sstat_instrms = asoc->stream->incnt;
+       status.sstat_outstrms = asoc->stream->outcnt;
        status.sstat_fragmentation_point = asoc->frag_point;
        status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc);
        memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
@@ -7034,6 +7034,9 @@ int sctp_inet_listen(struct socket *sock, int backlog)
        if (sock->state != SS_UNCONNECTED)
                goto out;
 
+       if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+               goto out;
+
        /* If backlog is zero, disable listening. */
        if (!backlog) {
                if (sctp_sstate(sk, CLOSED))
index 1c6cc04fa3a41f7266597f9cd80420c228094a2b..bbed997e1c5f01d4401adcd4664be4b6d16a93fa 100644 (file)
 #include <net/sctp/sctp.h>
 #include <net/sctp/sm.h>
 
-struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp)
+int sctp_stream_new(struct sctp_association *asoc, gfp_t gfp)
 {
        struct sctp_stream *stream;
        int i;
 
        stream = kzalloc(sizeof(*stream), gfp);
        if (!stream)
-               return NULL;
+               return -ENOMEM;
 
-       stream->outcnt = outcnt;
+       stream->outcnt = asoc->c.sinit_num_ostreams;
        stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
        if (!stream->out) {
                kfree(stream);
-               return NULL;
+               return -ENOMEM;
        }
        for (i = 0; i < stream->outcnt; i++)
                stream->out[i].state = SCTP_STREAM_OPEN;
 
-       stream->incnt = incnt;
+       asoc->stream = stream;
+
+       return 0;
+}
+
+int sctp_stream_init(struct sctp_association *asoc, gfp_t gfp)
+{
+       struct sctp_stream *stream = asoc->stream;
+       int i;
+
+       /* Initial stream->out size may be very big, so free it and alloc
+        * a new one with new outcnt to save memory.
+        */
+       kfree(stream->out);
+       stream->outcnt = asoc->c.sinit_num_ostreams;
+       stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
+       if (!stream->out)
+               goto nomem;
+
+       for (i = 0; i < stream->outcnt; i++)
+               stream->out[i].state = SCTP_STREAM_OPEN;
+
+       stream->incnt = asoc->c.sinit_max_instreams;
        stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
        if (!stream->in) {
                kfree(stream->out);
-               kfree(stream);
-               return NULL;
+               goto nomem;
        }
 
-       return stream;
+       return 0;
+
+nomem:
+       asoc->stream = NULL;
+       kfree(stream);
+
+       return -ENOMEM;
 }
 
 void sctp_stream_free(struct sctp_stream *stream)
index 3379668af3686de2ec14db980b1ef527a6d1045f..721eeebfcd8a50609877db61ede41575e012606a 100644 (file)
@@ -251,14 +251,13 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 }
 
-void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
+void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
-       struct dst_entry *dst;
+       struct dst_entry *dst = sctp_transport_dst_check(t);
 
        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
                pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
-                       __func__, pmtu,
-                       SCTP_DEFAULT_MINSEGMENT);
+                       __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
                /* Use default minimum segment size and disable
                 * pmtu discovery on this transport.
                 */
@@ -267,17 +266,13 @@ void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 p
                t->pathmtu = pmtu;
        }
 
-       dst = sctp_transport_dst_check(t);
-       if (!dst)
-               t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
-
        if (dst) {
-               dst->ops->update_pmtu(dst, sk, NULL, pmtu);
-
+               dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
                dst = sctp_transport_dst_check(t);
-               if (!dst)
-                       t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
        }
+
+       if (!dst)
+               t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
 }
 
 /* Caches the dst entry and source address for a transport's destination
index 16b6b5988be969299c34a9881f258a300b366e2c..570a2b67ca1036796cc5021a0f0ce546811a4e6f 100644 (file)
@@ -132,12 +132,10 @@ static int wiphy_resume(struct device *dev)
        /* Age scan results with time spent in suspend */
        cfg80211_bss_age(rdev, get_seconds() - rdev->suspend_at);
 
-       if (rdev->ops->resume) {
-               rtnl_lock();
-               if (rdev->wiphy.registered)
-                       ret = rdev_resume(rdev);
-               rtnl_unlock();
-       }
+       rtnl_lock();
+       if (rdev->wiphy.registered && rdev->ops->resume)
+               ret = rdev_resume(rdev);
+       rtnl_unlock();
 
        return ret;
 }
index 8571d766331dd1a513a3f772f3a7ce050909598b..d4d77b09412c416fbe96e648e179a3789347d5ea 100644 (file)
@@ -141,8 +141,8 @@ static void dump_statx(struct statx *stx)
        if (stx->stx_mask & STATX_BTIME)
                print_time(" Birth: ", &stx->stx_btime);
 
-       if (stx->stx_attributes) {
-               unsigned char bits;
+       if (stx->stx_attributes_mask) {
+               unsigned char bits, mbits;
                int loop, byte;
 
                static char attr_representation[64 + 1] =
@@ -160,14 +160,18 @@ static void dump_statx(struct statx *stx)
                printf("Attributes: %016llx (", stx->stx_attributes);
                for (byte = 64 - 8; byte >= 0; byte -= 8) {
                        bits = stx->stx_attributes >> byte;
+                       mbits = stx->stx_attributes_mask >> byte;
                        for (loop = 7; loop >= 0; loop--) {
                                int bit = byte + loop;
 
-                               if (bits & 0x80)
+                               if (!(mbits & 0x80))
+                                       putchar('.');   /* Not supported */
+                               else if (bits & 0x80)
                                        putchar(attr_representation[63 - bit]);
                                else
-                                       putchar('-');
+                                       putchar('-');   /* Not set */
                                bits <<= 1;
+                               mbits <<= 1;
                        }
                        if (byte)
                                putchar(' ');
index 0a07f9014944ed92a8e2e42983ae43be60b3e471..7234e61e7ce370a775ec6981b391b6d102a01770 100644 (file)
@@ -155,7 +155,7 @@ else
 # $(call addtree,-I$(obj)) locates .h files in srctree, from generated .c files
 #   and locates generated .h files
 # FIXME: Replace both with specific CFLAGS* statements in the makefiles
-__c_flags      = $(if $(obj),-I$(srctree)/$(src) -I$(obj)) \
+__c_flags      = $(if $(obj),$(call addtree,-I$(src)) -I$(obj)) \
                  $(call flags,_c_flags)
 __a_flags      = $(call flags,_a_flags)
 __cpp_flags     = $(call flags,_cpp_flags)
index 26d208b435a0d347b8f11df13bcb79cf3618770c..cfddddb9c9d722b63c522450a5b277a4fd22d318 100644 (file)
@@ -914,7 +914,7 @@ on_treeview2_button_press_event(GtkWidget * widget,
                        current = menu;
                        display_tree_part();
                        gtk_widget_set_sensitive(back_btn, TRUE);
-               } else if ((col == COL_OPTION)) {
+               } else if (col == COL_OPTION) {
                        toggle_sym_value(menu);
                        gtk_tree_view_expand_row(view, path, TRUE);
                }
index 122153b16ea4eeba1e84bf30a71a3dda199ae534..390d7c9685fd6107c83be2296ead9cb198b571a3 100644 (file)
                .off   = OFF,                                   \
                .imm   = 0 })
 
+/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                      \
+       ((struct bpf_insn) {                                    \
+               .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,   \
+               .dst_reg = DST,                                 \
+               .src_reg = SRC,                                 \
+               .off   = OFF,                                   \
+               .imm   = 0 })
+
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
index 273f21fa32b55999ab1271e6d3cc316c3a257a41..7aa57225cbf7971b08db04c27151494c071b163b 100644 (file)
@@ -130,6 +130,12 @@ static struct arch architectures[] = {
                .name = "powerpc",
                .init = powerpc__annotate_init,
        },
+       {
+               .name = "s390",
+               .objdump =  {
+                       .comment_char = '#',
+               },
+       },
 };
 
 static void ins__delete(struct ins_operands *ops)
index 93b0aa74ca03bada7db24ae827b902ce09591bef..39c2c7d067bba55cae5cf19983128369af1bb81c 100644 (file)
@@ -156,6 +156,7 @@ out:
                                         */
                        case 0x2C:      /* Westmere EP - Gulftown */
                                cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+                               break;
                        case 0x2A:      /* SNB */
                        case 0x2D:      /* SNB Xeon */
                        case 0x3A:      /* IVB */
index fedca32853262152cb7e1028139728c4aa677b11..ccf2a69365ccbb2a7cd27fa03089be54ce506c1a 100644 (file)
@@ -100,6 +100,8 @@ The system configuration dump (if --quiet is not used) is followed by statistics
 \fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.  These numbers are from hardware residency counters.
 \fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
 \fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBGFX%rc6\fP The percentage of time the GPU is in the "render C6" state, rc6, during the measurement interval. From /sys/class/drm/card0/power/rc6_residency_ms.
+\fBGFXMHz\fP Instantaneous snapshot of what sysfs presents at the end of the measurement interval. From /sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz.
 \fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states.  These numbers are from hardware residency counters.
 \fBPkgWatt\fP Watts consumed by the whole package.
 \fBCorWatt\fP Watts consumed by the core part of the package.
index 828dccd3f01eaf324bf2d3d2c674a585417cd07a..b11294730771bed6766f77138460813f8abbfce8 100644 (file)
@@ -1142,7 +1142,7 @@ delta_thread(struct thread_data *new, struct thread_data *old,
                 * it is possible for mperf's non-halted cycles + idle states
                 * to exceed TSC's all cycles: show c1 = 0% in that case.
                 */
-               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > (old->tsc * tsc_tweak))
                        old->c1 = 0;
                else {
                        /* normal case, derive c1 */
@@ -2485,8 +2485,10 @@ int snapshot_gfx_mhz(void)
 
        if (fp == NULL)
                fp = fopen_or_die("/sys/class/graphics/fb0/device/drm/card0/gt_cur_freq_mhz", "r");
-       else
+       else {
                rewind(fp);
+               fflush(fp);
+       }
 
        retval = fscanf(fp, "%d", &gfx_cur_mhz);
        if (retval != 1)
@@ -3111,7 +3113,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        fprintf(outf, "cpu%d: MSR_HWP_CAPABILITIES: 0x%08llx "
-                       "(high 0x%x guar 0x%x eff 0x%x low 0x%x)\n",
+                       "(high %d guar %d eff %d low %d)\n",
                        cpu, msr,
                        (unsigned int)HWP_HIGHEST_PERF(msr),
                        (unsigned int)HWP_GUARANTEED_PERF(msr),
@@ -3122,7 +3124,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return 0;
 
        fprintf(outf, "cpu%d: MSR_HWP_REQUEST: 0x%08llx "
-                       "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x pkg 0x%x)\n",
+                       "(min %d max %d des %d epp 0x%x window 0x%x pkg 0x%x)\n",
                        cpu, msr,
                        (unsigned int)(((msr) >> 0) & 0xff),
                        (unsigned int)(((msr) >> 8) & 0xff),
@@ -3136,7 +3138,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        return 0;
 
                fprintf(outf, "cpu%d: MSR_HWP_REQUEST_PKG: 0x%08llx "
-                       "(min 0x%x max 0x%x des 0x%x epp 0x%x window 0x%x)\n",
+                       "(min %d max %d des %d epp 0x%x window 0x%x)\n",
                        cpu, msr,
                        (unsigned int)(((msr) >> 0) & 0xff),
                        (unsigned int)(((msr) >> 8) & 0xff),
@@ -3353,17 +3355,19 @@ void rapl_probe(unsigned int family, unsigned int model)
        case INTEL_FAM6_SKYLAKE_DESKTOP:        /* SKL */
        case INTEL_FAM6_KABYLAKE_MOBILE:        /* KBL */
        case INTEL_FAM6_KABYLAKE_DESKTOP:       /* KBL */
-               do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
                BIC_PRESENT(BIC_PKG__);
                BIC_PRESENT(BIC_RAM__);
                if (rapl_joules) {
                        BIC_PRESENT(BIC_Pkg_J);
                        BIC_PRESENT(BIC_Cor_J);
                        BIC_PRESENT(BIC_RAM_J);
+                       BIC_PRESENT(BIC_GFX_J);
                } else {
                        BIC_PRESENT(BIC_PkgWatt);
                        BIC_PRESENT(BIC_CorWatt);
                        BIC_PRESENT(BIC_RAMWatt);
+                       BIC_PRESENT(BIC_GFXWatt);
                }
                break;
        case INTEL_FAM6_HASWELL_X:      /* HSX */
@@ -3478,7 +3482,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
 int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 {
        unsigned long long msr;
-       unsigned int dts;
+       unsigned int dts, dts2;
        int cpu;
 
        if (!(do_dts || do_ptm))
@@ -3503,7 +3507,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
                        cpu, msr, tcc_activation_temp - dts);
 
-#ifdef THERM_DEBUG
                if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &msr))
                        return 0;
 
@@ -3511,11 +3514,10 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                dts2 = (msr >> 8) & 0x7F;
                fprintf(outf, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
                        cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
        }
 
 
-       if (do_dts) {
+       if (do_dts && debug) {
                unsigned int resolution;
 
                if (get_msr(cpu, MSR_IA32_THERM_STATUS, &msr))
@@ -3526,7 +3528,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                fprintf(outf, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
                        cpu, msr, tcc_activation_temp - dts, resolution);
 
-#ifdef THERM_DEBUG
                if (get_msr(cpu, MSR_IA32_THERM_INTERRUPT, &msr))
                        return 0;
 
@@ -3534,7 +3535,6 @@ int print_thermal(struct thread_data *t, struct core_data *c, struct pkg_data *p
                dts2 = (msr >> 8) & 0x7F;
                fprintf(outf, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
                        cpu, msr, tcc_activation_temp - dts, tcc_activation_temp - dts2);
-#endif
        }
 
        return 0;
@@ -4578,7 +4578,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 17.02.24"
+       fprintf(outf, "turbostat version 17.04.12"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index 6a1ad58cb66f4ae6faaf4837e29c5f8b487f000c..9af09e8099c0aae9fd6acd5d42cc5afb949871e9 100644 (file)
@@ -1,7 +1,14 @@
 LIBDIR := ../../../lib
 BPFDIR := $(LIBDIR)/bpf
+APIDIR := ../../../include/uapi
+GENDIR := ../../../../include/generated
+GENHDR := $(GENDIR)/autoconf.h
 
-CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR)
+ifneq ($(wildcard $(GENHDR)),)
+  GENFLAGS := -DHAVE_GENHDR
+endif
+
+CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS)
 LDLIBS += -lcap
 
 TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map
index d1555e4240c0fb3066c459e05f76a6f91f3b66af..c848e90b64213128a248c9669a2a2796692c5776 100644 (file)
 
 #include <bpf/bpf.h>
 
+#ifdef HAVE_GENHDR
+# include "autoconf.h"
+#else
+# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
+#  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
+# endif
+#endif
+
 #include "../../../include/linux/filter.h"
 
 #ifndef ARRAY_SIZE
@@ -39,6 +47,8 @@
 #define MAX_INSNS      512
 #define MAX_FIXUPS     8
 
+#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS     (1 << 0)
+
 struct bpf_test {
        const char *descr;
        struct bpf_insn insns[MAX_INSNS];
@@ -53,6 +63,7 @@ struct bpf_test {
                REJECT
        } result, result_unpriv;
        enum bpf_prog_type prog_type;
+       uint8_t flags;
 };
 
 /* Note we want this to be 64 bit aligned so that the end of our array is
@@ -2431,6 +2442,30 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "direct packet access: test15 (spill with xadd)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+                       BPF_MOV64_IMM(BPF_REG_5, 4096),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_4, BPF_REG_5, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_2, BPF_REG_5, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "R2 invalid mem access 'inv'",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
        {
                "helper access to packet: test1, valid packet_ptr range",
                .insns = {
@@ -2934,6 +2969,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "valid map access into an array with a variable",
@@ -2957,6 +2993,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "valid map access into an array with a signed variable",
@@ -2984,6 +3021,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a constant",
@@ -3025,6 +3063,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is outside of the array range",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a variable",
@@ -3048,6 +3087,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with no floor check",
@@ -3074,6 +3114,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a invalid max check",
@@ -3100,6 +3141,7 @@ static struct bpf_test tests[] = {
                .errstr = "invalid access to map value, value_size=48 off=44 size=8",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid map access into an array with a invalid max check",
@@ -3129,6 +3171,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result_unpriv = REJECT,
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "multiple registers share map_lookup_elem result",
@@ -3252,6 +3295,7 @@ static struct bpf_test tests[] = {
                .result = REJECT,
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "constant register |= constant should keep constant type",
@@ -3417,6 +3461,26 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_LWT_XMIT,
        },
+       {
+               "overlapping checks for direct packet access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
        {
                "invalid access of tc_classid for LWT_IN",
                .insns = {
@@ -3961,7 +4025,208 @@ static struct bpf_test tests[] = {
                .result_unpriv = REJECT,
        },
        {
-               "map element value (adjusted) is preserved across register spilling",
+               "map element value or null is marked on register spilling",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -152),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 leaks addr",
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value store of cleared call register",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R1 !read_ok",
+               .errstr = "R1 !read_ok",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value with unaligned store",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 17),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 43),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, -2, 44),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 32),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 2, 33),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, -2, 34),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 5),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 0, 22),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, 4, 23),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_8, -7, 24),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_8),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 3),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 22),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_7, 4, 23),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_7, -4, 24),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "map element value with unaligned load",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 9),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 2),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_8, 2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 5),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 4),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+       },
+       {
+               "map element value illegal alu op, 1",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 2",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 3",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_IMM(BPF_DIV, BPF_REG_0, 42),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 4",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ENDIAN(BPF_FROM_BE, BPF_REG_0, 64),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value illegal alu op, 5",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_IMM(BPF_REG_3, 4096),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+                       BPF_STX_XADD(BPF_DW, BPF_REG_2, BPF_REG_3, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 22),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 invalid mem access 'inv'",
+               .errstr = "R0 invalid mem access 'inv'",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "map element value is preserved across register spilling",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
@@ -3983,6 +4248,7 @@ static struct bpf_test tests[] = {
                .errstr_unpriv = "R0 pointer arithmetic prohibited",
                .result = ACCEPT,
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
@@ -4421,6 +4687,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid range check",
@@ -4452,6 +4719,7 @@ static struct bpf_test tests[] = {
                .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
                .result = REJECT,
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        }
 };
 
@@ -4530,11 +4798,11 @@ static void do_test_fixup(struct bpf_test *test, struct bpf_insn *prog,
 static void do_test_single(struct bpf_test *test, bool unpriv,
                           int *passes, int *errors)
 {
+       int fd_prog, expected_ret, reject_from_alignment;
        struct bpf_insn *prog = test->insns;
        int prog_len = probe_filter_length(prog);
        int prog_type = test->prog_type;
        int fd_f1 = -1, fd_f2 = -1, fd_f3 = -1;
-       int fd_prog, expected_ret;
        const char *expected_err;
 
        do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
@@ -4547,8 +4815,19 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                       test->result_unpriv : test->result;
        expected_err = unpriv && test->errstr_unpriv ?
                       test->errstr_unpriv : test->errstr;
+
+       reject_from_alignment = fd_prog < 0 &&
+                               (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
+                               strstr(bpf_vlog, "Unknown alignment.");
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (reject_from_alignment) {
+               printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
+                      strerror(errno));
+               goto fail_log;
+       }
+#endif
        if (expected_ret == ACCEPT) {
-               if (fd_prog < 0) {
+               if (fd_prog < 0 && !reject_from_alignment) {
                        printf("FAIL\nFailed to load prog '%s'!\n",
                               strerror(errno));
                        goto fail_log;
@@ -4558,14 +4837,15 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                        printf("FAIL\nUnexpected success to load!\n");
                        goto fail_log;
                }
-               if (!strstr(bpf_vlog, expected_err)) {
+               if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
                        printf("FAIL\nUnexpected error message!\n");
                        goto fail_log;
                }
        }
 
        (*passes)++;
-       printf("OK\n");
+       printf("OK%s\n", reject_from_alignment ?
+              " (NOTE: reject due to unknown alignment)" : "");
 close_fds:
        close(fd_prog);
        close(fd_f1);
index 1c5d0575802e47113b49b4b0facfd7bff47a6620..bf13fc2297aab48fad929415dbee9bc9f10532a2 100644 (file)
@@ -34,34 +34,34 @@ endif
 all: $(SUB_DIRS)
 
 $(SUB_DIRS):
-       BUILD_TARGET=$$OUTPUT/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
+       BUILD_TARGET=$(OUTPUT)/$@; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $@ all
 
 include ../lib.mk
 
 override define RUN_TESTS
        @for TARGET in $(SUB_DIRS); do \
-               BUILD_TARGET=$$OUTPUT/$$TARGET; \
+               BUILD_TARGET=$(OUTPUT)/$$TARGET;        \
                $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests;\
        done;
 endef
 
 override define INSTALL_RULE
        @for TARGET in $(SUB_DIRS); do \
-               BUILD_TARGET=$$OUTPUT/$$TARGET; \
+               BUILD_TARGET=$(OUTPUT)/$$TARGET;        \
                $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET install;\
        done;
 endef
 
 override define EMIT_TESTS
        @for TARGET in $(SUB_DIRS); do \
-               BUILD_TARGET=$$OUTPUT/$$TARGET; \
+               BUILD_TARGET=$(OUTPUT)/$$TARGET;        \
                $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
        done;
 endef
 
 clean:
        @for TARGET in $(SUB_DIRS); do \
-               BUILD_TARGET=$$OUTPUT/$$TARGET; \
+               BUILD_TARGET=$(OUTPUT)/$$TARGET;        \
                $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET clean; \
        done;
        rm -f tags
index 276139a24e6fd097f791537c07b7c182717e0693..702f8108608d053d43a6eff18ef8be84c3589994 100644 (file)
@@ -391,6 +391,25 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+/**
+ * kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
+ *
+ * For a specific CPU, initialize the GIC VE hardware.
+ */
+void kvm_vgic_init_cpu_hardware(void)
+{
+       BUG_ON(preemptible());
+
+       /*
+        * We want to make sure the list registers start out clear so that we
+        * only have the program the used registers.
+        */
+       if (kvm_vgic_global_state.type == VGIC_V2)
+               vgic_v2_init_lrs();
+       else
+               kvm_call_hyp(__vgic_v3_init_lrs);
+}
+
 /**
  * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
  * according to the host GIC model. Accordingly calls either
index a3ad7ff95c9b3ba95e80863d8e16a2f05fba8071..0a4283ed9aa735e55e476bdea0a19ed159952646 100644 (file)
@@ -229,7 +229,15 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
                val = vmcr.ctlr;
                break;
        case GIC_CPU_PRIMASK:
-               val = vmcr.pmr;
+               /*
+                * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+                * the PMR field as GICH_VMCR.VMPriMask rather than
+                * GICC_PMR.Priority, so we expose the upper five bits of
+                * priority mask to userspace using the lower bits in the
+                * unsigned long.
+                */
+               val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
+                       GICV_PMR_PRIORITY_SHIFT;
                break;
        case GIC_CPU_BINPOINT:
                val = vmcr.bpr;
@@ -262,7 +270,15 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
                vmcr.ctlr = val;
                break;
        case GIC_CPU_PRIMASK:
-               vmcr.pmr = val;
+               /*
+                * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
+                * the PMR field as GICH_VMCR.VMPriMask rather than
+                * GICC_PMR.Priority, so we expose the upper five bits of
+                * priority mask to userspace using the lower bits in the
+                * unsigned long.
+                */
+               vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
+                       GICV_PMR_PRIORITY_MASK;
                break;
        case GIC_CPU_BINPOINT:
                vmcr.bpr = val;
index b834ecdf322503c09bffc58c7af5609cd4b71e43..b637d9c7afe3ff51b9e8dfdcd947ee37ef2df029 100644 (file)
@@ -36,6 +36,21 @@ static unsigned long *u64_to_bitmask(u64 *val)
        return (unsigned long *)val;
 }
 
+static inline void vgic_v2_write_lr(int lr, u32 val)
+{
+       void __iomem *base = kvm_vgic_global_state.vctrl_base;
+
+       writel_relaxed(val, base + GICH_LR0 + (lr * 4));
+}
+
+void vgic_v2_init_lrs(void)
+{
+       int i;
+
+       for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
+               vgic_v2_write_lr(i, 0);
+}
+
 void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
 {
        struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -191,8 +206,8 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
                GICH_VMCR_ALIAS_BINPOINT_MASK;
        vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
                GICH_VMCR_BINPOINT_MASK;
-       vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
-               GICH_VMCR_PRIMASK_MASK;
+       vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
+                GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
 
        vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
 }
@@ -207,8 +222,8 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
                        GICH_VMCR_ALIAS_BINPOINT_SHIFT;
        vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
                        GICH_VMCR_BINPOINT_SHIFT;
-       vmcrp->pmr  = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
-                       GICH_VMCR_PRIMASK_SHIFT;
+       vmcrp->pmr  = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
+                       GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
 }
 
 void vgic_v2_enable(struct kvm_vcpu *vcpu)
index db28f7cadab28b5859ce58dcd35eb36b6632e60f..6cf557e9f71807f05017d10af8074bd4416c151c 100644 (file)
@@ -81,11 +81,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
                return irq->pending_latch || irq->line_level;
 }
 
+/*
+ * This struct provides an intermediate representation of the fields contained
+ * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
+ * state to userspace can generate either GICv2 or GICv3 CPU interface
+ * registers regardless of the hardware backed GIC used.
+ */
 struct vgic_vmcr {
        u32     ctlr;
        u32     abpr;
        u32     bpr;
-       u32     pmr;
+       u32     pmr;  /* Priority mask field in the GICC_PMR and
+                      * ICC_PMR_EL1 priority field format */
        /* Below member variable are valid only for GICv3 */
        u32     grpen0;
        u32     grpen1;
@@ -130,6 +137,8 @@ int vgic_v2_map_resources(struct kvm *kvm);
 int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
                             enum vgic_type);
 
+void vgic_v2_init_lrs(void);
+
 static inline void vgic_get_irq_kref(struct vgic_irq *irq)
 {
        if (irq->intid < VGIC_MIN_LPI)