]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'uuid-types' of bombadil.infradead.org:public_git/uuid into nvme-base
authorChristoph Hellwig <hch@lst.de>
Tue, 13 Jun 2017 09:45:14 +0000 (11:45 +0200)
committerChristoph Hellwig <hch@lst.de>
Tue, 13 Jun 2017 09:45:14 +0000 (11:45 +0200)
595 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/net/dsa/marvell.txt
Documentation/devicetree/bindings/usb/dwc2.txt
Documentation/networking/dpaa.txt [new file with mode: 0644]
Documentation/networking/tcp.txt
MAINTAINERS
Makefile
arch/arm/boot/compressed/efi-header.S
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/imx6ul-14x14-evk.dts
arch/arm/boot/dts/keystone-k2l-netcp.dtsi
arch/arm/boot/dts/keystone-k2l.dtsi
arch/arm/boot/dts/versatile-pb.dts
arch/arm/common/mcpm_entry.c
arch/arm/include/asm/device.h
arch/arm/include/asm/pgtable-nommu.h
arch/arm/kvm/init.S
arch/arm/mach-at91/Kconfig
arch/arm/mach-davinci/pm.c
arch/arm/mm/dma-mapping.c
arch/arm64/Kconfig
arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/sysreg.h
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/vgic-sys-reg-v3.c
arch/hexagon/mm/uaccess.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/perf/power9-pmu.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/spufs/coredump.c
arch/powerpc/platforms/powernv/subcore.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/simple_gpio.c
arch/s390/Kconfig
arch/s390/include/asm/eadm.h
arch/s390/include/asm/kvm_host.h
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/sparc/Kconfig
arch/sparc/include/asm/mmu_64.h
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/include/asm/pil.h
arch/sparc/include/asm/vio.h
arch/sparc/kernel/ds.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/tsb.S
arch/sparc/kernel/ttable_64.S
arch/sparc/kernel/vio.c
arch/sparc/lib/Makefile
arch/sparc/lib/multi3.S [new file with mode: 0644]
arch/sparc/mm/init_64.c
arch/sparc/mm/tsb.c
arch/sparc/mm/ultra.S
arch/um/drivers/ubd_kern.c
arch/x86/Kconfig
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/kvm.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
block/bfq-cgroup.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bio-integrity.c
block/bio.c
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-integrity.c
block/blk-mq-debugfs.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
block/blk-throttle.c
block/bounce.c
block/bsg-lib.c
block/bsg.c
block/cfq-iosched.c
block/t10-pi.c
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/verify_pefile.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/drbg.c
crypto/gcm.c
drivers/acpi/arm64/iort.c
drivers/acpi/battery.c
drivers/acpi/button.c
drivers/acpi/device_pm.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/ata/ahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/sata_mv.c
drivers/ata/sata_rcar.c
drivers/base/power/main.c
drivers/base/power/wakeup.c
drivers/block/DAC960.c
drivers/block/amiflop.c
drivers/block/aoe/aoecmd.c
drivers/block/aoe/aoedev.c
drivers/block/ataflop.c
drivers/block/cciss.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/loop.h
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ps3vram.c
drivers/block/rbd.c
drivers/block/rsxx/dev.c
drivers/block/rsxx/dma.c
drivers/block/rsxx/rsxx_priv.h
drivers/block/skd_main.c
drivers/block/sunvdc.c
drivers/block/swim.c
drivers/block/swim3.c
drivers/block/sx8.c
drivers/block/umem.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkfront.c
drivers/block/xsysace.c
drivers/block/z2ram.c
drivers/cdrom/cdrom.c
drivers/cdrom/gdrom.c
drivers/char/mem.c
drivers/char/random.c
drivers/cpufreq/intel_pstate.c
drivers/dax/super.c
drivers/firmware/efi/efi-bgrt.c
drivers/firmware/google/vpd.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-crystalcove.c
drivers/gpio/gpio-mvebu.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
drivers/gpu/drm/rockchip/cdn-dp-core.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.h
drivers/gpu/drm/rockchip/rockchip_vop_reg.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/gpu/ipu-v3/ipu-pre.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-dma.c
drivers/ide/ide-eh.c
drivers/ide/ide-floppy.c
drivers/ide/ide-io.c
drivers/ide/ide-pm.c
drivers/ide/ide-probe.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/ide/siimage.c
drivers/iio/adc/bcm_iproc_adc.c
drivers/iio/adc/max9611.c
drivers/iio/adc/sun4i-gpadc-iio.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/ltr501.c
drivers/iio/proximity/as3935.c
drivers/input/mouse/elantech.c
drivers/input/rmi4/rmi_f03.c
drivers/iommu/of_iommu.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/mISDN/stack.c
drivers/lightnvm/pblk-core.c
drivers/lightnvm/pblk-read.c
drivers/lightnvm/pblk-write.c
drivers/lightnvm/rrpc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/io.c
drivers/md/bcache/journal.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/dm-bio-prison-v1.c
drivers/md/dm-bio-prison-v1.h
drivers/md/dm-bufio.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-flakey.c
drivers/md/dm-integrity.c
drivers/md/dm-io.c
drivers/md/dm-log-writes.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-rq.c
drivers/md/dm-rq.h
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-target.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/md/dm-zero.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5-ppl.c
drivers/md/raid5.c
drivers/media/Kconfig
drivers/media/Makefile
drivers/media/cec/Kconfig
drivers/media/cec/Makefile
drivers/media/cec/cec-adap.c
drivers/media/cec/cec-core.c
drivers/media/i2c/Kconfig
drivers/media/platform/Kconfig
drivers/media/platform/vivid/Kconfig
drivers/media/rc/rc-ir-raw.c
drivers/media/usb/pulse8-cec/Kconfig
drivers/media/usb/rainshadow-cec/Kconfig
drivers/media/usb/rainshadow-cec/rainshadow-cec.c
drivers/memory/atmel-ebi.c
drivers/memstick/core/ms_block.c
drivers/memstick/core/mspro_block.c
drivers/misc/cxl/file.c
drivers/misc/cxl/native.c
drivers/misc/mei/bus.c
drivers/mmc/core/block.c
drivers/mmc/core/queue.c
drivers/mtd/mtd_blkdevs.c
drivers/mtd/ubi/block.c
drivers/net/dsa/mv88e6xxx/global2.h
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/qualcomm/emac/emac-phy.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/geneve.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
drivers/nvdimm/blk.c
drivers/nvdimm/btt.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/lightnvm.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/io-cmd.c
drivers/nvme/target/loop.c
drivers/of/device.c
drivers/phy/phy-qcom-qmp.c
drivers/platform/goldfish/goldfish_pipe.c
drivers/reset/hisilicon/hi6220_reset.c
drivers/s390/block/dasd.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk.h
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/scm.c
drivers/sbus/char/jsflash.c
drivers/scsi/bnx2fc/bnx2fc.h
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_nvmet.c
drivers/scsi/osd/osd_initiator.c
drivers/scsi/osst.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/staging/ccree/Kconfig
drivers/staging/ccree/ssi_buffer_mgr.c
drivers/staging/lustre/lustre/lov/lov_pack.c
drivers/staging/media/atomisp/i2c/Makefile
drivers/staging/media/atomisp/i2c/imx/Makefile
drivers/staging/media/atomisp/i2c/ov5693/Makefile
drivers/staging/media/atomisp/pci/atomisp2/Makefile
drivers/target/target_core_iblock.c
drivers/target/target_core_pscsi.c
drivers/usb/chipidea/core.c
drivers/usb/chipidea/debug.c
drivers/usb/chipidea/udc.c
drivers/usb/chipidea/usbmisc_imx.c
drivers/usb/dwc2/params.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/musb/musb_dsps.c
drivers/xen/privcmd.c
fs/block_dev.c
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.h
fs/btrfs/dir-item.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/buffer.c
fs/crypto/bio.c
fs/direct-io.c
fs/ext4/acl.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/readpage.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/f2fs/data.c
fs/f2fs/segment.c
fs/gfs2/incore.h
fs/gfs2/lops.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/iomap.c
fs/jbd2/transaction.c
fs/jfs/jfs_logmgr.c
fs/jfs/jfs_metapage.c
fs/mpage.c
fs/nfs/blocklayout/blocklayout.c
fs/nfsd/blocklayout.c
fs/nilfs2/segbuf.c
fs/ocfs2/cluster/heartbeat.c
fs/quota/dquot.c
fs/stat.c
fs/ufs/balloc.c
fs/ufs/inode.c
fs/ufs/super.c
fs/ufs/util.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compiler-clang.h
include/linux/device-mapper.h
include/linux/dma-iommu.h
include/linux/elevator.h
include/linux/fs.h
include/linux/ide.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic.h
include/linux/key.h
include/linux/mlx4/qp.h
include/linux/quotaops.h
include/linux/srcu.h
include/linux/suspend.h
include/media/cec-notifier.h
include/media/cec.h
include/net/ipv6.h
include/net/tcp.h
include/scsi/osd_initiator.h
include/uapi/linux/dm-ioctl.h
include/uapi/linux/keyctl.h
include/uapi/linux/loop.h
include/uapi/linux/nbd.h
kernel/cgroup/cgroup.c
kernel/cgroup/cpuset.c
kernel/cpu.c
kernel/events/core.c
kernel/power/process.c
kernel/power/suspend.c
kernel/power/swap.c
kernel/printk/printk.c
kernel/rcu/srcu.c
kernel/rcu/srcutiny.c
kernel/rcu/srcutree.c
kernel/trace/blktrace.c
mm/page_io.c
net/bridge/br_netlink.c
net/bridge/br_stp_if.c
net/core/devlink.c
net/core/skbuff.c
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/legacy.c
net/ipv4/af_inet.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv6/calipso.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_tunnel.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/xfrm6_mode_ro.c
net/ipv6/xfrm6_mode_transport.c
net/mac80211/agg-tx.c
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mpls/af_mpls.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_nat_core.c
net/netfilter/nft_set_rbtree.c
net/netlink/af_netlink.c
security/keys/Kconfig
security/keys/dh.c
security/keys/encrypted-keys/encrypted.c
security/keys/gc.c
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/process_keys.c
security/keys/trusted.c
security/keys/user_defined.c
sound/core/timer.c
sound/pci/hda/patch_realtek.c
sound/soc/atmel/atmel-classd.c
sound/soc/codecs/da7213.c
sound/soc/codecs/rt286.c
sound/soc/generic/simple-card.c
sound/soc/intel/skylake/skl-sst-ipc.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/intel/skylake/skl.c
sound/soc/intel/skylake/skl.h
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/cmd.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/gen.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/src.c
sound/soc/sh/rcar/ssi.c
sound/soc/sh/rcar/ssiu.c
sound/soc/soc-core.c
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-script-perl.txt
tools/perf/Documentation/perf-script-python.txt
tools/perf/arch/common.c
tools/perf/builtin-stat.c
tools/perf/builtin-trace.c
tools/perf/tests/bp_signal.c
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/tests.h
tools/perf/util/annotate.c
tools/perf/util/build-id.c
tools/perf/util/build-id.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/header.c
tools/perf/util/machine.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/unwind-libdw.c
virt/kvm/arm/hyp/vgic-v3-sr.c
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.h

index 15f79c27748df1611b1643b77ca68e2a5e7cfaab..0f5c3b4347c6f94a82385f193e76bc99dba19db5 100644 (file)
 
        dscc4.setup=    [NET]
 
+       dt_cpu_ftrs=    [PPC]
+                       Format: {"off" | "known"}
+                       Control how the dt_cpu_ftrs device-tree binding is
+                       used for CPU feature discovery and setup (if it
+                       exists).
+                       off: Do not use it, fall back to legacy cpu table.
+                       known: Do not pass through unknown features to guests
+                       or userspace, only those that the kernel is aware of.
+
        dump_apple_properties   [X86]
                        Dump name and content of EFI device properties on
                        x86 Macs.  Useful for driver authors to determine
index 7ef9dbb08957a593528a4d873d1f3af29892f5c2..1d4d0f49c9d06eb66d9957fb0661cec35ddc7af9 100644 (file)
@@ -26,6 +26,10 @@ Optional properties:
 - interrupt-controller : Indicates the switch is itself an interrupt
                          controller. This is used for the PHY interrupts.
 #interrupt-cells = <2> : Controller uses two cells, number and flag
+- eeprom-length                : Set to the length of an EEPROM connected to the
+                         switch. Must be set if the switch can not detect
+                         the presence and/or size of a connected EEPROM,
+                         otherwise optional.
 - mdio                 : Container of PHY and devices on the switches MDIO
                          bus.
 - mdio?                : Container of PHYs and devices on the external MDIO
index 00bea038639e4ea341735c875040ea55a12e8be3..fcf199b64d3d3f2b703c42001d7cf7dfff5ca7bf 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
   - "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
   - "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
   - "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
+  - "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
   - "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
   - "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
   - "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt
new file mode 100644 (file)
index 0000000..76e016d
--- /dev/null
@@ -0,0 +1,194 @@
+The QorIQ DPAA Ethernet Driver
+==============================
+
+Authors:
+Madalin Bucur <madalin.bucur@nxp.com>
+Camelia Groza <camelia.groza@nxp.com>
+
+Contents
+========
+
+       - DPAA Ethernet Overview
+       - DPAA Ethernet Supported SoCs
+       - Configuring DPAA Ethernet in your kernel
+       - DPAA Ethernet Frame Processing
+       - DPAA Ethernet Features
+       - Debugging
+
+DPAA Ethernet Overview
+======================
+
+DPAA stands for Data Path Acceleration Architecture and it is a
+set of networking acceleration IPs that are available on several
+generations of SoCs, both on PowerPC and ARM64.
+
+The Freescale DPAA architecture consists of a series of hardware blocks
+that support Ethernet connectivity. The Ethernet driver depends upon the
+following drivers in the Linux kernel:
+
+ - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
+    drivers/iommu/fsl_*
+ - Frame Manager (FMan)
+    drivers/net/ethernet/freescale/fman
+ - Queue Manager (QMan), Buffer Manager (BMan)
+    drivers/soc/fsl/qbman
+
+A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
+
+  dpaa_eth       /eth0\     ...       /ethN\
+  driver        |      |             |      |
+  -------------   ----   -----------   ----   -------------
+       -Ports  / Tx  Rx \    ...    / Tx  Rx \
+  FMan        |          |         |          |
+       -MACs  |   MAC0   |         |   MACN   |
+             /   dtsec0   \  ...  /   dtsecN   \ (or tgec)
+            /              \     /              \(or memac)
+  ---------  --------------  ---  --------------  ---------
+      FMan, FMan Port, FMan SP, FMan MURAM drivers
+  ---------------------------------------------------------
+      FMan HW blocks: MURAM, MACs, Ports, SP
+  ---------------------------------------------------------
+
+The dpaa_eth relation to the QMan, BMan and FMan:
+              ________________________________
+  dpaa_eth   /            eth0                \
+  driver    /                                  \
+  ---------   -^-   -^-   -^-   ---    ---------
+  QMan driver / \   / \   / \  \   /  | BMan    |
+             |Rx | |Rx | |Tx | |Tx |  | driver  |
+  ---------  |Dfl| |Err| |Cnf| |FQs|  |         |
+  QMan HW    |FQ | |FQ | |FQs| |   |  |         |
+             /   \ /   \ /   \  \ /   |         |
+  ---------   ---   ---   ---   -v-    ---------
+            |        FMan QMI         |         |
+            | FMan HW       FMan BMI  | BMan HW |
+              -----------------------   --------
+
+where the acronyms used above (and in the code) are:
+DPAA = Data Path Acceleration Architecture
+FMan = DPAA Frame Manager
+QMan = DPAA Queue Manager
+BMan = DPAA Buffers Manager
+QMI = QMan interface in FMan
+BMI = BMan interface in FMan
+FMan SP = FMan Storage Profiles
+MURAM = Multi-user RAM in FMan
+FQ = QMan Frame Queue
+Rx Dfl FQ = default reception FQ
+Rx Err FQ = Rx error frames FQ
+Tx Cnf FQ = Tx confirmation FQs
+Tx FQs = transmission frame queues
+dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
+tgec = ten gigabit Ethernet controller (10 Gbps)
+memac = multirate Ethernet MAC (10/100/1000/10000)
+
+DPAA Ethernet Supported SoCs
+============================
+
+The DPAA drivers enable the Ethernet controllers present on the following SoCs:
+
+# PPC
+P1023
+P2041
+P3041
+P4080
+P5020
+P5040
+T1023
+T1024
+T1040
+T1042
+T2080
+T4240
+B4860
+
+# ARM
+LS1043A
+LS1046A
+
+Configuring DPAA Ethernet in your kernel
+========================================
+
+To enable the DPAA Ethernet driver, the following Kconfig options are required:
+
+# common for arch/arm64 and arch/powerpc platforms
+CONFIG_FSL_DPAA=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
+CONFIG_FSL_XGMAC_MDIO=y
+
+# for arch/powerpc only
+CONFIG_FSL_PAMU=y
+
+# common options needed for the PHYs used on the RDBs
+CONFIG_VITESSE_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_AQUANTIA_PHY=y
+
+DPAA Ethernet Frame Processing
+==============================
+
+On Rx, buffers for the incoming frames are retrieved from one of the three
+existing buffers pools. The driver initializes and seeds these, each with
+buffers of different sizes: 1KB, 2KB and 4KB.
+
+On Tx, all transmitted frames are returned to the driver through Tx
+confirmation frame queues. The driver is then responsible for freeing the
+buffers. In order to do this properly, a backpointer is added to the buffer
+before transmission that points to the skb. When the buffer returns to the
+driver on a confirmation FQ, the skb can be correctly consumed.
+
+DPAA Ethernet Features
+======================
+
+Currently the DPAA Ethernet driver enables the basic features required for
+a Linux Ethernet driver. The support for advanced features will be added
+gradually.
+
+The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
+checksum offload feature is enabled by default and cannot be controlled through
+ethtool.
+
+The driver has support for multiple prioritized Tx traffic classes. Priorities
+range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
+strict priority levels. Each traffic class contains NR_CPU TX queues. By
+default, only one traffic class is enabled and the lowest priority Tx queues
+are used. Higher priority traffic classes can be enabled with the mqprio
+qdisc. For example, all four traffic classes are enabled on an interface with
+the following command. Furthermore, skb priority levels are mapped to traffic
+classes as follows:
+
+       * priorities 0 to 3 - traffic class 0 (low priority)
+       * priorities 4 to 7 - traffic class 1 (medium-low priority)
+       * priorities 8 to 11 - traffic class 2 (medium-high priority)
+       * priorities 12 to 15 - traffic class 3 (high priority)
+
+tc qdisc add dev <int> root handle 1: \
+        mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
+
+Debugging
+=========
+
+The following statistics are exported for each interface through ethtool:
+
+       - interrupt count per CPU
+       - Rx packets count per CPU
+       - Tx packets count per CPU
+       - Tx confirmed packets count per CPU
+       - Tx S/G frames count per CPU
+       - Tx error count per CPU
+       - Rx error count per CPU
+       - Rx error count per type
+       - congestion related statistics:
+               - congestion status
+               - time spent in congestion
+               - number of time the device entered congestion
+               - dropped packets count per cause
+
+The driver also exports the following information in sysfs:
+
+       - the FQ IDs for each FQ type
+       /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
+
+       - the IDs of the buffer pools in use
+       /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
index bdc4c0db51e1078fb002907124fe7008ef4c0cd4..9c7139d57e5748508ce20400349cfdefd0284f74 100644 (file)
@@ -1,7 +1,7 @@
 TCP protocol
 ============
 
-Last updated: 9 February 2008
+Last updated: 3 June 2017
 
 Contents
 ========
@@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
 A congestion control mechanism can be registered through functions in
 tcp_cong.c. The functions used by the congestion control mechanism are
 registered via passing a tcp_congestion_ops struct to
-tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid must be valid.
+tcp_register_congestion_control. As a minimum, the congestion control
+mechanism must provide a valid name and must implement either ssthresh,
+cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
 
 Private data for a congestion control mechanism is stored in tp->ca_priv.
 tcp_ca(tp) returns a pointer to this space.  This is preallocated space - it
 is important to check the size of your private data will fit this space, or
-alternatively space could be allocated elsewhere and a pointer to it could
+alternatively, space could be allocated elsewhere and a pointer to it could
 be stored here.
 
 There are three kinds of congestion control algorithms currently: The
 simplest ones are derived from TCP reno (highspeed, scalable) and just
-provide an alternative the congestion window calculation. More complex
+provide an alternative congestion window calculation. More complex
 ones like BIC try to look at other events to provide better
 heuristics.  There are also round trip time based algorithms like
 Vegas and Westwood+.
@@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
 needs to maintain fairness and performance. Please review current
 research and RFC's before developing new modules.
 
-The method that is used to determine which congestion control mechanism is
-determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
-The default congestion control will be the last one registered (LIFO);
-so if you built everything as modules, the default will be reno. If you
-build with the defaults from Kconfig, then CUBIC will be builtin (not a
-module) and it will end up the default.
+The default congestion control mechanism is chosen based on the
+DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
+value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
+module will be autoloaded if needed and you will get the expected protocol. If
+you ask for an unknown congestion method, then the sysctl attempt will fail.
 
-If you really want a particular default value then you will need
-to set it with the sysctl.  If you use a sysctl, the module will be autoloaded
-if needed and you will get the expected protocol. If you ask for an
-unknown congestion method, then the sysctl attempt will fail.
-
-If you remove a tcp congestion control module, then you will get the next
+If you remove a TCP congestion control module, then you will get the next
 available one. Since reno cannot be built as a module, and cannot be
-deleted, it will always be available.
+removed, it will always be available.
 
 How the new TCP output machine [nyi] works.
 ===========================================
index 714da939a8cc4cafd056539c1be26fdb2b89e48c..8b9b56d5806587b065fe4599e2ceec05bafe7004 100644 (file)
@@ -1172,7 +1172,7 @@ N:        clps711x
 
 ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
 M:     Hartley Sweeten <hsweeten@visionengravers.com>
-M:     Ryan Mallon <rmallon@gmail.com>
+M:     Alexander Sverdlin <alexander.sverdlin@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-ep93xx/
@@ -1489,13 +1489,15 @@ M:      Gregory Clement <gregory.clement@free-electrons.com>
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
-F:     arch/arm/mach-mvebu/
-F:     drivers/rtc/rtc-armada38x.c
 F:     arch/arm/boot/dts/armada*
 F:     arch/arm/boot/dts/kirkwood*
+F:     arch/arm/configs/mvebu_*_defconfig
+F:     arch/arm/mach-mvebu/
 F:     arch/arm64/boot/dts/marvell/armada*
 F:     drivers/cpufreq/mvebu-cpufreq.c
-F:     arch/arm/configs/mvebu_*_defconfig
+F:     drivers/irqchip/irq-armada-370-xp.c
+F:     drivers/irqchip/irq-mvebu-*
+F:     drivers/rtc/rtc-armada38x.c
 
 ARM/Marvell Berlin SoC support
 M:     Jisheng Zhang <jszhang@marvell.com>
@@ -1721,7 +1723,6 @@ N:        rockchip
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
 M:     Krzysztof Kozlowski <krzk@kernel.org>
-R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
@@ -1829,7 +1830,6 @@ F:        drivers/edac/altera_edac.
 ARM/STI ARCHITECTURE
 M:     Patrice Chotard <patrice.chotard@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-L:     kernel@stlinux.com
 W:     http://www.stlinux.com
 S:     Maintained
 F:     arch/arm/mach-sti/
@@ -5622,7 +5622,7 @@ F:        scripts/get_maintainer.pl
 
 GENWQE (IBM Generic Workqueue Card)
 M:     Frank Haverkamp <haver@linux.vnet.ibm.com>
-M:     Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
+M:     Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
 S:     Supported
 F:     drivers/misc/genwqe/
 
@@ -5667,7 +5667,6 @@ F:        tools/testing/selftests/gpio/
 
 GPIO SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
-M:     Alexandre Courbot <gnurou@gmail.com>
 L:     linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
 S:     Maintained
@@ -7707,7 +7706,7 @@ F:        drivers/platform/x86/hp_accel.c
 
 LIVE PATCHING
 M:     Josh Poimboeuf <jpoimboe@redhat.com>
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Jiri Kosina <jikos@kernel.org>
 M:     Miroslav Benes <mbenes@suse.cz>
 R:     Petr Mladek <pmladek@suse.com>
@@ -8508,7 +8507,7 @@ S:        Odd Fixes
 F:     drivers/media/radio/radio-miropcm20*
 
 MELLANOX MLX4 core VPI driver
-M:     Yishai Hadas <yishaih@mellanox.com>
+M:     Tariq Toukan <tariqt@mellanox.com>
 L:     netdev@vger.kernel.org
 L:     linux-rdma@vger.kernel.org
 W:     http://www.mellanox.com
@@ -8516,7 +8515,6 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 S:     Supported
 F:     drivers/net/ethernet/mellanox/mlx4/
 F:     include/linux/mlx4/
-F:     include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX4 IB driver
 M:     Yishai Hadas <yishaih@mellanox.com>
@@ -8526,6 +8524,7 @@ Q:        http://patchwork.kernel.org/project/linux-rdma/list/
 S:     Supported
 F:     drivers/infiniband/hw/mlx4/
 F:     include/linux/mlx4/
+F:     include/uapi/rdma/mlx4-abi.h
 
 MELLANOX MLX5 core VPI driver
 M:     Saeed Mahameed <saeedm@mellanox.com>
@@ -8538,7 +8537,6 @@ Q:        http://patchwork.ozlabs.org/project/netdev/list/
 S:     Supported
 F:     drivers/net/ethernet/mellanox/mlx5/core/
 F:     include/linux/mlx5/
-F:     include/uapi/rdma/mlx5-abi.h
 
 MELLANOX MLX5 IB driver
 M:     Matan Barak <matanb@mellanox.com>
@@ -8549,6 +8547,7 @@ Q:        http://patchwork.kernel.org/project/linux-rdma/list/
 S:     Supported
 F:     drivers/infiniband/hw/mlx5/
 F:     include/linux/mlx5/
+F:     include/uapi/rdma/mlx5-abi.h
 
 MELEXIS MLX90614 DRIVER
 M:     Crt Mori <cmo@melexis.com>
@@ -8588,7 +8587,7 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
 MODULE SUPPORT
-M:     Jessica Yu <jeyu@redhat.com>
+M:     Jessica Yu <jeyu@kernel.org>
 M:     Rusty Russell <rusty@rustcorp.com.au>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
 S:     Maintained
@@ -11268,7 +11267,6 @@ F:      drivers/media/rc/serial_ir.c
 
 STI CEC DRIVER
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-L:     kernel@stlinux.com
 S:     Maintained
 F:     drivers/staging/media/st-cec/
 F:     Documentation/devicetree/bindings/media/stih-cec.txt
@@ -11778,6 +11776,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
 S:     Supported
 F:     arch/arm/mach-davinci/
 F:     drivers/i2c/busses/i2c-davinci.c
+F:     arch/arm/boot/dts/da850*
 
 TI DAVINCI SERIES MEDIA DRIVER
 M:     "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
@@ -13872,7 +13871,7 @@ S:      Odd fixes
 F:     drivers/net/wireless/wl3501*
 
 WOLFSON MICROELECTRONICS DRIVERS
-L:     patches@opensource.wolfsonmicro.com
+L:     patches@opensource.cirrus.com
 T:     git https://github.com/CirrusLogic/linux-drivers.git
 W:     https://github.com/CirrusLogic/linux-drivers/wiki
 S:     Supported
index 853ae9179af93a0ca9751a7faf40372e7fdb15dd..83f6d9972cab0028038d95d0d34b728e754ca0ad 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Fearless Coyote
 
 # *DOCUMENTATION*
index 9d5dc4fda3c16710c0443b6a5043f212230f057d..3f7d1b74c5e02bd46730c58b0a66756c89b904ab 100644 (file)
                @ there.
                .inst   'M' | ('Z' << 8) | (0x1310 << 16)   @ tstne r0, #0x4d000
 #else
-               mov     r0, r0
+               W(mov)  r0, r0
 #endif
                .endm
 
                .macro  __EFI_HEADER
 #ifdef CONFIG_EFI_STUB
-               b       __efi_start
-
                .set    start_offset, __efi_start - start
                .org    start + 0x3c
                @
index 7c711ba614173d91d8c2fd6ff4ccb13980bb3109..8a756870c238435af684215c653f54a739f4f1a5 100644 (file)
@@ -130,19 +130,22 @@ start:
                .rept   7
                __nop
                .endr
-   ARM(                mov     r0, r0          )
-   ARM(                b       1f              )
- THUMB(                badr    r12, 1f         )
- THUMB(                bx      r12             )
+#ifndef CONFIG_THUMB2_KERNEL
+               mov     r0, r0
+#else
+ AR_CLASS(     sub     pc, pc, #3      )       @ A/R: switch to Thumb2 mode
+  M_CLASS(     nop.w                   )       @ M: already in Thumb2 mode
+               .thumb
+#endif
+               W(b)    1f
 
                .word   _magic_sig      @ Magic numbers to help the loader
                .word   _magic_start    @ absolute load/run zImage address
                .word   _magic_end      @ zImage end address
                .word   0x04030201      @ endianness flag
 
- THUMB(                .thumb                  )
-1:             __EFI_HEADER
-
+               __EFI_HEADER
+1:
  ARM_BE8(      setend  be              )       @ go BE8 if compiled for BE8
  AR_CLASS(     mrs     r9, cpsr        )
 #ifdef CONFIG_ARM_VIRT_EXT
index 561f27d8d92224fe8f4f8c3224a5441f2d41175a..9444a9a9ba1057e6b594dc8e2595ac1e5ec593fb 100644 (file)
@@ -3,6 +3,11 @@
 #include <dt-bindings/clock/bcm2835-aux.h>
 #include <dt-bindings/gpio/gpio.h>
 
+/* firmware-provided startup stubs live here, where the secondary CPUs are
+ * spinning.
+ */
+/memreserve/ 0x00000000 0x00001000;
+
 /* This include file covers the common peripherals and configuration between
  * bcm2835 and bcm2836 implementations, leaving the CPU configuration to
  * bcm2835.dtsi and bcm2836.dtsi.
index f18e1f1d0ce2c6aad83c08b7ad8ac4cfc41e715e..d2be8aa3370b7840e014964c147a4742d5ca87e8 100644 (file)
 
                ethphy0: ethernet-phy@2 {
                        reg = <2>;
+                       micrel,led-mode = <1>;
+                       clocks = <&clks IMX6UL_CLK_ENET_REF>;
+                       clock-names = "rmii-ref";
                };
 
                ethphy1: ethernet-phy@1 {
                        reg = <1>;
+                       micrel,led-mode = <1>;
+                       clocks = <&clks IMX6UL_CLK_ENET2_REF>;
+                       clock-names = "rmii-ref";
                };
        };
 };
index b6f26824e83a96a88e34d33b99a0f4dace08e306..66f615a74118b9da1fe77088b63be8738c17f6c4 100644 (file)
@@ -137,8 +137,8 @@ netcp: netcp@26000000 {
        /* NetCP address range */
        ranges = <0 0x26000000 0x1000000>;
 
-       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
-       clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
+       clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
+       clock-names = "pa_clk", "ethss_clk", "cpts";
        dma-coherent;
 
        ti,navigator-dmas = <&dma_gbe 0>,
index b58e7ebc091994645dd1adb35c7e7dc843fae7b0..148650406cf701cd7ffc5ac92d9054d606e97bfd 100644 (file)
                        };
                };
 
+               osr: sram@70000000 {
+                       compatible = "mmio-sram";
+                       reg = <0x70000000 0x10000>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       clocks = <&clkosr>;
+               };
+
                dspgpio0: keystone_dsp_gpio@02620240 {
                        compatible = "ti,keystone-dsp-gpio";
                        gpio-controller;
index 33a8eb28374eaa8d3b8aca95d8801227bedd87ca..06e2331f666d45fb2a2432ac1ef5401c3c50e37d 100644 (file)
@@ -1,4 +1,4 @@
-#include <versatile-ab.dts>
+#include "versatile-ab.dts"
 
 / {
        model = "ARM Versatile PB";
index cf062472e07bcb4be470bf35ab029df3438dbc7e..2b913f17d50f5d91f50d3aa30e3a8a26c97847b9 100644 (file)
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
        return ret;
 }
 
-typedef void (*phys_reset_t)(unsigned long);
+typedef typeof(cpu_reset) phys_reset_t;
 
 void mcpm_cpu_power_down(void)
 {
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
         * on the CPU.
         */
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
 
        /* should never get here */
        BUG();
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
        __mcpm_cpu_down(cpu, cluster);
 
        phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
-       phys_reset(__pa_symbol(mcpm_entry_point));
+       phys_reset(__pa_symbol(mcpm_entry_point), false);
        BUG();
 }
 
index 36ec9c8f6e161d59d56caafbbe92fe0d2239d3f9..3234fe9bba6e76196d8a852db8c26d078150d989 100644 (file)
@@ -19,7 +19,8 @@ struct dev_archdata {
 #ifdef CONFIG_XEN
        const struct dma_map_ops *dev_dma_ops;
 #endif
-       bool dma_coherent;
+       unsigned int dma_coherent:1;
+       unsigned int dma_ops_setup:1;
 };
 
 struct omap_device;
index 302240c19a5aa688e7bdab1ece506dfbeaccea4e..a0d726a47c8a272b722b0d5623021058da50e113 100644 (file)
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
 #define pgprot_noncached(prot) (prot)
 #define pgprot_writecombine(prot) (prot)
 #define pgprot_dmacoherent(prot) (prot)
+#define pgprot_device(prot)    (prot)
 
 
 /*
index 570ed4a9c2618ba7810f96af0b106c8cfbdfd2cc..5386528665b54fc191cdf0e456a133531c78b626 100644 (file)
@@ -104,7 +104,6 @@ __do_hyp_init:
        @  - Write permission implies XN: disabled
        @  - Instruction cache: enabled
        @  - Data/Unified cache: enabled
-       @  - Memory alignment checks: enabled
        @  - MMU: enabled (this code must be run from an identity mapping)
        mrc     p15, 4, r0, c1, c0, 0   @ HSCR
        ldr     r2, =HSCTLR_MASK
@@ -112,8 +111,8 @@ __do_hyp_init:
        mrc     p15, 0, r1, c1, c0, 0   @ SCTLR
        ldr     r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
        and     r1, r1, r2
- ARM(  ldr     r2, =(HSCTLR_M | HSCTLR_A)                      )
- THUMB(        ldr     r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE)          )
+ ARM(  ldr     r2, =(HSCTLR_M)                                 )
+ THUMB(        ldr     r2, =(HSCTLR_M | HSCTLR_TE)                     )
        orr     r1, r1, r2
        orr     r0, r0, r1
        mcr     p15, 4, r0, c1, c0, 0   @ HSCR
index 841e924143f90e089bae9269acacdff65ab597f9..cbd959b73654c43deb72cbe084bdb99043a22b66 100644 (file)
@@ -1,6 +1,7 @@
 menuconfig ARCH_AT91
        bool "Atmel SoCs"
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
+       select ARM_CPU_SUSPEND if PM
        select COMMON_CLK_AT91
        select GPIOLIB
        select PINCTRL
index efb80354f3034d856ab259bb1568dab41173d3b3..b5cc05dc2cb27c9e20e5f8290b65fff2efeec8ec 100644 (file)
@@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
        davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
        if (!davinci_sram_suspend) {
                pr_err("PM: cannot allocate SRAM memory\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto no_sram_mem;
        }
 
        davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
@@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
 
        suspend_set_ops(&davinci_pm_ops);
 
+       return 0;
+
+no_sram_mem:
+       iounmap(pm_config.ddrpsc_reg_base);
 no_ddrpsc_mem:
        iounmap(pm_config.ddrpll_reg_base);
 no_ddrpll_mem:
index c742dfd2967bcae6057c3ea59b81979fcf60aa55..bd83c531828a7349eecf233b5c213a65388e9567 100644 (file)
@@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 
-static void __arm_iommu_detach_device(struct device *dev)
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
 {
        struct dma_iommu_mapping *mapping;
 
@@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
        iommu_detach_device(mapping->domain, dev);
        kref_put(&mapping->kref, release_iommu_mapping);
        to_dma_iommu_mapping(dev) = NULL;
+       set_dma_ops(dev, NULL);
 
        pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 }
-
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
- */
-void arm_iommu_detach_device(struct device *dev)
-{
-       __arm_iommu_detach_device(dev);
-       set_dma_ops(dev, NULL);
-}
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
        if (!mapping)
                return;
 
-       __arm_iommu_detach_device(dev);
+       arm_iommu_detach_device(dev);
        arm_iommu_release_mapping(mapping);
 }
 
@@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                dev->dma_ops = xen_dma_ops;
        }
 #endif
+       dev->archdata.dma_ops_setup = true;
 }
 
 void arch_teardown_dma_ops(struct device *dev)
 {
+       if (!dev->archdata.dma_ops_setup)
+               return;
+
        arm_teardown_iommu_dma_ops(dev);
 }
index 3dcd7ec69bca8f939dba09e9087902fbd1561e8a..b2024db225a9dd116cf5d2ba49d62bc79c8e0bf8 100644 (file)
@@ -1084,10 +1084,6 @@ config SYSVIPC_COMPAT
        def_bool y
        depends on COMPAT && SYSVIPC
 
-config KEYS_COMPAT
-       def_bool y
-       depends on COMPAT && KEYS
-
 endmenu
 
 menu "Power management options"
index ac8df5201cd656d70073bc03cd13436435b79c66..b4bc42ece7541154431a5855c4bbe0f984094445 100644 (file)
                        cpm_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
index 7740a75a823084d027ffab1c02d221f3083dea87..6e2058847ddcd59ca9fd0d472bfb94f5331b00cd 100644 (file)
                        cps_crypto: crypto@800000 {
                                compatible = "inside-secure,safexcel-eip197";
                                reg = <0x800000 0x200000>;
-                               interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
-                               | IRQ_TYPE_LEVEL_HIGH)>,
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
                                             <GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
index 65cdd878cfbd603b323a08006872f5de90e90aee..97c123e09e45bfd80173029de0da0161dd4be0c7 100644 (file)
@@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
 CONFIG_PCIE_ARMADA_8K=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCIE_RCAR=y
+CONFIG_PCIE_ROCKCHIP=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_ARM64_VA_BITS_48=y
@@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
 CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_PM8941_PWRKEY=y
@@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPI_ROCKCHIP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
@@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
 CONFIG_EXYNOS_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
 CONFIG_MESON_GXBB_WATCHDOG=m
@@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
 CONFIG_BCM2835_WDT=y
 CONFIG_MFD_CROS_EC=y
 CONFIG_MFD_CROS_EC_I2C=y
+CONFIG_MFD_CROS_EC_SPI=y
 CONFIG_MFD_EXYNOS_LPASS=m
 CONFIG_MFD_HI655X_PMIC=y
 CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FAN53555=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI655X=y
@@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
+CONFIG_ROCKCHIP_SARADC=m
 CONFIG_PWM=y
 CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
@@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_ARM_SCPI_PROTOCOL=y
index 15c142ce991cd9180172f0ecb8ddf399e8e31c60..b4d13d9267ff8b56c95aa981928bb3b7ed950a88 100644 (file)
 #define SCTLR_ELx_A    (1 << 1)
 #define SCTLR_ELx_M    1
 
+#define SCTLR_EL2_RES1 ((1 << 4)  | (1 << 5)  | (1 << 11) | (1 << 16) | \
+                        (1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
+                        (1 << 28) | (1 << 29))
+
 #define SCTLR_ELx_FLAGS        (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
                         SCTLR_ELx_SA | SCTLR_ELx_I)
 
index 839425c24b1c7078c7397470aa811286eb119f49..3f9615582377661a88fab8be6a12365d625d830a 100644 (file)
@@ -106,10 +106,13 @@ __do_hyp_init:
        tlbi    alle2
        dsb     sy
 
-       mrs     x4, sctlr_el2
-       and     x4, x4, #SCTLR_ELx_EE   // preserve endianness of EL2
-       ldr     x5, =SCTLR_ELx_FLAGS
-       orr     x4, x4, x5
+       /*
+        * Preserve all the RES1 bits while setting the default flags,
+        * as well as the EE bit on BE. Drop the A flag since the compiler
+        * is allowed to generate unaligned accesses.
+        */
+       ldr     x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
+CPU_BE(        orr     x4, x4, #SCTLR_ELx_EE)
        msr     sctlr_el2, x4
        isb
 
index 79f37e37d367c88b2bd76a9a93a91a587ee6347a..6260b69e5622930b87d100e0dfbdf8fda249deee 100644 (file)
@@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                 * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
                 * The vgic_set_vmcr() will convert to ICH_VMCR layout.
                 */
-               vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
-               vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
+               vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
+               vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
                vgic_set_vmcr(vcpu, &vmcr);
        } else {
                val = 0;
@@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                 * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
                 * Extract it directly using ICC_CTLR_EL1 reg definitions.
                 */
-               val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
-               val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
+               val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
+               val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
 
                p->regval = val;
        }
@@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                p->regval = 0;
 
        vgic_get_vmcr(vcpu, &vmcr);
-       if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
+       if (!vmcr.cbpr) {
                if (p->is_write) {
                        vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
                                     ICC_BPR1_EL1_SHIFT;
index ec90afdb3ad084e999e6980077aff846564f7b7c..c599eb126c9e7be9a3324744297f562ad743e97a 100644 (file)
@@ -37,15 +37,14 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
        long uncleared;
 
        while (count > PAGE_SIZE) {
-               uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
-                                               PAGE_SIZE);
+               uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
                if (uncleared)
                        return count - (PAGE_SIZE - uncleared);
                count -= PAGE_SIZE;
                dest += PAGE_SIZE;
        }
        if (count)
-               count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
+               count = raw_copy_to_user(dest, &empty_zero_page, count);
 
        return count;
 }
index f7c8f9972f618109209e4892512ed903f6b865f3..bf4391d189233847a7cc82a5623fa5fd3bf30a31 100644 (file)
@@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
 
 menu "Kernel options"
 
-config PPC_DT_CPU_FTRS
-       bool "Device-tree based CPU feature discovery & setup"
-       depends on PPC_BOOK3S_64
-       default n
-       help
-         This enables code to use a new device tree binding for describing CPU
-         compatibility and features. Saying Y here will attempt to use the new
-         binding if the firmware provides it. Currently only the skiboot
-         firmware provides this binding.
-         If you're not sure say Y.
-
-config PPC_CPUFEATURES_ENABLE_UNKNOWN
-       bool "cpufeatures pass through unknown features to guest/userspace"
-       depends on PPC_DT_CPU_FTRS
-       default y
-
 config HIGHMEM
        bool "High memory support"
        depends on PPC32
@@ -1215,11 +1199,6 @@ source "arch/powerpc/Kconfig.debug"
 
 source "security/Kconfig"
 
-config KEYS_COMPAT
-       bool
-       depends on COMPAT && KEYS
-       default y
-
 source "crypto/Kconfig"
 
 config PPC_LIB_RHEAP
index b4b5e6b671ca4dedc27fc35d59b30d4ad488e1c3..0c4e470571ca0faa74d3e9fa38fa57a384cab4bf 100644 (file)
@@ -8,7 +8,7 @@
 #define H_PTE_INDEX_SIZE  9
 #define H_PMD_INDEX_SIZE  7
 #define H_PUD_INDEX_SIZE  9
-#define H_PGD_INDEX_SIZE  12
+#define H_PGD_INDEX_SIZE  9
 
 #ifndef __ASSEMBLY__
 #define H_PTE_TABLE_SIZE       (sizeof(pte_t) << H_PTE_INDEX_SIZE)
index c2d509584a98070accd6be62519fa992e5bc1f3f..d02ad93bf70892f8d342b9d8890a4e1b8065eed6 100644 (file)
@@ -214,7 +214,6 @@ enum {
 #define CPU_FTR_DAWR                   LONG_ASM_CONST(0x0400000000000000)
 #define CPU_FTR_DABRX                  LONG_ASM_CONST(0x0800000000000000)
 #define CPU_FTR_PMAO_BUG               LONG_ASM_CONST(0x1000000000000000)
-#define CPU_FTR_SUBCORE                        LONG_ASM_CONST(0x2000000000000000)
 #define CPU_FTR_POWER9_DD1             LONG_ASM_CONST(0x4000000000000000)
 
 #ifndef __ASSEMBLY__
@@ -463,7 +462,7 @@ enum {
            CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
            CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
-           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
+           CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
 #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
index a2123f291ab0c5c8dc13cc9364c3a12848a4bb2c..bb99b651085aaf292e5f98ee23c7cdc53d443cd2 100644 (file)
@@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
 #define TASK_SIZE_128TB (0x0000800000000000UL)
 #define TASK_SIZE_512TB (0x0002000000000000UL)
 
-#ifdef CONFIG_PPC_BOOK3S_64
+/*
+ * For now 512TB is only supported with book3s and 64K linux page size.
+ */
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
 /*
  * Max value currently used:
  */
-#define TASK_SIZE_USER64       TASK_SIZE_512TB
+#define TASK_SIZE_USER64               TASK_SIZE_512TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_128TB
 #else
-#define TASK_SIZE_USER64       TASK_SIZE_64TB
+#define TASK_SIZE_USER64               TASK_SIZE_64TB
+#define DEFAULT_MAP_WINDOW_USER64      TASK_SIZE_64TB
 #endif
 
 /*
@@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
  * space during mmap's.
  */
 #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
-#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
+#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
 
 #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
                TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
@@ -143,21 +148,15 @@ void release_thread(struct task_struct *);
  * with 128TB and conditionally enable upto 512TB
  */
 #ifdef CONFIG_PPC_BOOK3S_64
-#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ? \
-                                TASK_SIZE_USER32 : TASK_SIZE_128TB)
+#define DEFAULT_MAP_WINDOW     ((is_32bit_task()) ?                    \
+                                TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
 #else
 #define DEFAULT_MAP_WINDOW     TASK_SIZE
 #endif
 
 #ifdef __powerpc64__
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/* Limit stack to 128TB */
-#define STACK_TOP_USER64 TASK_SIZE_128TB
-#else
-#define STACK_TOP_USER64 TASK_SIZE_USER64
-#endif
-
+#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
 #define STACK_TOP_USER32 TASK_SIZE_USER32
 
 #define STACK_TOP (is_32bit_task() ? \
index 8b3b46b7b0f2795b6195eb95ee649d3dece6dc9a..329771559cbbb16048d67d27450865703a248c90 100644 (file)
@@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
 extern int sysfs_add_device_to_node(struct device *dev, int nid);
 extern void sysfs_remove_device_from_node(struct device *dev, int nid);
 
+static inline int early_cpu_to_node(int cpu)
+{
+       int nid;
+
+       nid = numa_cpu_lookup_table[cpu];
+
+       /*
+        * Fall back to node 0 if nid is unset (it should be, except bugs).
+        * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
+        */
+       return (nid < 0) ? 0 : nid;
+}
 #else
 
+static inline int early_cpu_to_node(int cpu) { return 0; }
+
 static inline void dump_numa_cpu_topology(void) {}
 
 static inline int sysfs_add_device_to_node(struct device *dev, int nid)
index fcc7588a96d694265899935eae34521eda29690d..4c7656dc4e04f09bed8b9bbc8d8f979876237202 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/jump_label.h>
+#include <linux/libfdt.h>
 #include <linux/memblock.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
@@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
        {"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
        {"processor-utilization-of-resources-register", feat_enable_purr, 0},
-       {"subcore", feat_enable, CPU_FTR_SUBCORE},
        {"no-execute", feat_enable, 0},
        {"strong-access-ordering", feat_enable, CPU_FTR_SAO},
        {"cache-inhibited-large-page", feat_enable_large_ci, 0},
@@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
        {"wait-v3", feat_enable, 0},
 };
 
-/* XXX: how to configure this? Default + boot time? */
-#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
-#define CPU_FEATURE_ENABLE_UNKNOWN 1
-#else
-#define CPU_FEATURE_ENABLE_UNKNOWN 0
-#endif
+static bool __initdata using_dt_cpu_ftrs;
+static bool __initdata enable_unknown = true;
+
+static int __init dt_cpu_ftrs_parse(char *str)
+{
+       if (!str)
+               return 0;
+
+       if (!strcmp(str, "off"))
+               using_dt_cpu_ftrs = false;
+       else if (!strcmp(str, "known"))
+               enable_unknown = false;
+       else
+               return 1;
+
+       return 0;
+}
+early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
 
 static void __init cpufeatures_setup_start(u32 isa)
 {
@@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
                }
        }
 
-       if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
+       if (!known && enable_unknown) {
                if (!feat_try_enable_unknown(f)) {
                        pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
                                f->name);
@@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
                cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
 }
 
+static int __init disabled_on_cmdline(void)
+{
+       unsigned long root, chosen;
+       const char *p;
+
+       root = of_get_flat_dt_root();
+       chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
+       if (chosen == -FDT_ERR_NOTFOUND)
+               return false;
+
+       p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
+       if (!p)
+               return false;
+
+       if (strstr(p, "dt_cpu_ftrs=off"))
+               return true;
+
+       return false;
+}
+
 static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
                                        int depth, void *data)
 {
@@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
        return 0;
 }
 
-static bool __initdata using_dt_cpu_ftrs = false;
-
 bool __init dt_cpu_ftrs_in_use(void)
 {
        return using_dt_cpu_ftrs;
@@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
 
 bool __init dt_cpu_ftrs_init(void *fdt)
 {
+       using_dt_cpu_ftrs = false;
+
        /* Setup and verify the FDT, if it fails we just bail */
        if (!early_init_dt_verify(fdt))
                return false;
@@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
        if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
                return false;
 
+       if (disabled_on_cmdline())
+               return false;
+
        cpufeatures_setup_cpu();
 
        using_dt_cpu_ftrs = true;
@@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
 
 void __init dt_cpu_ftrs_scan(void)
 {
+       if (!using_dt_cpu_ftrs)
+               return;
+
        of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
 }
index baae104b16c7ba9f7cdf4a305ab5227ebf002467..2ad725ef4368a3e525681b0ce4a56ef8e960bf48 100644 (file)
@@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
+       current->thread.load_fp = 0;
        memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
        current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
@@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.vr_save_area = NULL;
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
+       current->thread.load_vec = 0;
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_SPE
        memset(current->thread.evr, 0, sizeof(current->thread.evr));
@@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
        current->thread.tm_tfhar = 0;
        current->thread.tm_texasr = 0;
        current->thread.tm_tfiar = 0;
+       current->thread.load_tm = 0;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 }
 EXPORT_SYMBOL(start_thread);
index 71dcda91755d51a2e5705f29308239e7e9c7e506..857129acf960a1bf93c0c5791a6de1d84585caa3 100644 (file)
@@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
 
 #ifdef CONFIG_PPC_MM_SLICES
 #ifdef CONFIG_PPC64
-       init_mm.context.addr_limit = TASK_SIZE_128TB;
+       init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 #else
 #error "context.addr_limit not initialized."
 #endif
index f35ff9dea4fb4607459c10d42a29c47f2984e613..a8c1f99e96072530cb1f2d9ed702dffd78665720 100644 (file)
@@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
 
 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
 {
-       return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
+       return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
                                    __pa(MAX_DMA_ADDRESS));
 }
 
@@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
 
 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
 {
-       if (cpu_to_node(from) == cpu_to_node(to))
+       if (early_cpu_to_node(from) == early_cpu_to_node(to))
                return LOCAL_DISTANCE;
        else
                return REMOTE_DISTANCE;
index c6dca2ae78ef9f1225dd6a13e0997034132315a4..a3edf813d4556c547e5b00155c5f9a0dc411872d 100644 (file)
@@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
         * mm->context.addr_limit. Default to max task size so that we copy the
         * default values to paca which will help us to handle slb miss early.
         */
-       mm->context.addr_limit = TASK_SIZE_128TB;
+       mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
 
        /*
         * The old code would re-promote on fork, we don't do that when using
index 018f8e90ac35fd19bf37bdbb99438f970621634a..bb28e1a412576ea15492be658a5bf78ee75950a5 100644 (file)
@@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = ISA207_TEST_ADDER,
+       .test_adder             = P9_DD1_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
@@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
        .name                   = "POWER9",
        .n_counter              = MAX_PMU_COUNTERS,
        .add_fields             = ISA207_ADD_FIELDS,
-       .test_adder             = P9_DD1_TEST_ADDER,
+       .test_adder             = ISA207_TEST_ADDER,
        .compute_mmcr           = isa207_compute_mmcr,
        .config_bhrb            = power9_config_bhrb,
        .bhrb_filter_map        = power9_bhrb_filter_map,
index 33244e3d9375eae3ccd1224b7dbac87b3822f92a..4fd64d3f5c4429206b8c838ca99387e6668aee11 100644 (file)
@@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
 
          In case of doubt, say Y
 
+config PPC_DT_CPU_FTRS
+       bool "Device-tree based CPU feature discovery & setup"
+       depends on PPC_BOOK3S_64
+       default y
+       help
+         This enables code to use a new device tree binding for describing CPU
+         compatibility and features. Saying Y here will attempt to use the new
+         binding if the firmware provides it. Currently only the skiboot
+         firmware provides this binding.
+         If you're not sure say Y.
+
 config UDBG_RTAS_CONSOLE
        bool "RTAS based debug console"
        depends on PPC_RTAS
index e5a891ae80ee5e6881bd10be148c04519d3dffd0..84b7ac926ce65682e83781ec6ab4ac97b6d0727f 100644 (file)
@@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
        skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
        if (!dump_skip(cprm, skip))
                goto Eio;
+
+       rc = 0;
 out:
        free_page((unsigned long)buf);
        return rc;
index 0babef11136fc8daba7f2666fed3f3b649cc2bd8..8c6119280c1306afd399d2c86ce88381882ab5df 100644 (file)
@@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
 
 static int subcore_init(void)
 {
-       if (!cpu_has_feature(CPU_FTR_SUBCORE))
+       unsigned pvr_ver;
+
+       pvr_ver = PVR_VER(mfspr(SPRN_PVR));
+
+       if (pvr_ver != PVR_POWER8 &&
+           pvr_ver != PVR_POWER8E &&
+           pvr_ver != PVR_POWER8NVL)
                return 0;
 
        /*
index e104c71ea44ab5bf715fa5720c0a1ed47c6221c7..1fb162ba9d1c6aaa730123d07258a3923b3d245d 100644 (file)
@@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
                lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+               lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
                lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
        }
 
@@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
        for (i = 0; i < num_lmbs; i++) {
                lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
                lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+               lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
                lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
        }
 
index ef470b470b04ae85488d1a9ffcbe27519884a4db..6afddae2fb4796dc61de3258f683bcc158b15a2e 100644 (file)
@@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
 {
-       struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
+       struct u8_gpio_chip *u8_gc =
+               container_of(mm_gc, struct u8_gpio_chip, mm_gc);
 
        u8_gc->data = in_8(mm_gc->regs);
 }
index e161fafb495b746f94d84a172b27708a1ce77816..6967addc6a8940630bf3038abdad18d37d8bda39 100644 (file)
@@ -363,9 +363,6 @@ config COMPAT
 config SYSVIPC_COMPAT
        def_bool y if COMPAT && SYSVIPC
 
-config KEYS_COMPAT
-       def_bool y if COMPAT && KEYS
-
 config SMP
        def_bool y
        prompt "Symmetric multi-processing support"
index 67026300c88e5565b1d52af8bbd3eb8fa387a62e..144809a3f4f69e1a669441cd592307f1e83ea1cb 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/device.h>
+#include <linux/blkdev.h>
 
 struct arqb {
        u64 data;
@@ -105,13 +106,14 @@ struct scm_driver {
        int (*probe) (struct scm_device *scmdev);
        int (*remove) (struct scm_device *scmdev);
        void (*notify) (struct scm_device *scmdev, enum scm_event event);
-       void (*handler) (struct scm_device *scmdev, void *data, int error);
+       void (*handler) (struct scm_device *scmdev, void *data,
+                       blk_status_t error);
 };
 
 int scm_driver_register(struct scm_driver *scmdrv);
 void scm_driver_unregister(struct scm_driver *scmdrv);
 
 int eadm_start_aob(struct aob *aob);
-void scm_irq_handler(struct aob *aob, int error);
+void scm_irq_handler(struct aob *aob, blk_status_t error);
 
 #endif /* _ASM_S390_EADM_H */
index 426614a882a9b12a71c96f06607f30e6b345cd0d..65d07ac34647001ad45052a18d97e49e151c4b20 100644 (file)
@@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
        struct mutex ais_lock;
        u8 simm;
        u8 nimm;
-       int ais_enabled;
 };
 
 struct kvm_hw_wp_info_arch {
index caf15c8a8948e76c1bf7cfb5d5a109df6cee9b3e..2d120fef7d90d915e33d7f19cb0ba39e9a6a264e 100644 (file)
@@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
        struct kvm_s390_ais_req req;
        int ret = 0;
 
-       if (!fi->ais_enabled)
+       if (!test_kvm_facility(kvm, 72))
                return -ENOTSUPP;
 
        if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
@@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
        };
        int ret = 0;
 
-       if (!fi->ais_enabled || !adapter->suppressible)
+       if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
                return kvm_s390_inject_vm(kvm, &s390int);
 
        mutex_lock(&fi->ais_lock);
index 689ac48361c697318ba6192962c7790d027a3199..f28e2e776931e7fdcddb2ad285ddf35b1e85500e 100644 (file)
@@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                } else {
                        set_kvm_facility(kvm->arch.model.fac_mask, 72);
                        set_kvm_facility(kvm->arch.model.fac_list, 72);
-                       kvm->arch.float_int.ais_enabled = 1;
                        r = 0;
                }
                mutex_unlock(&kvm->lock);
@@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        mutex_init(&kvm->arch.float_int.ais_lock);
        kvm->arch.float_int.simm = 0;
        kvm->arch.float_int.nimm = 0;
-       kvm->arch.float_int.ais_enabled = 0;
        spin_lock_init(&kvm->arch.float_int.lock);
        for (i = 0; i < FIRQ_LIST_COUNT; i++)
                INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
index 58243b0d21c006cfea9c47723221a33b7673fae9..5639c9fe5b5522d6e7b7d05ac17a6f07d579cc0c 100644 (file)
@@ -192,9 +192,9 @@ config NR_CPUS
        int "Maximum number of CPUs"
        depends on SMP
        range 2 32 if SPARC32
-       range 2 1024 if SPARC64
+       range 2 4096 if SPARC64
        default 32 if SPARC32
-       default 64 if SPARC64
+       default 4096 if SPARC64
 
 source kernel/Kconfig.hz
 
@@ -295,9 +295,13 @@ config NUMA
        depends on SPARC64 && SMP
 
 config NODES_SHIFT
-       int
-       default "4"
+       int "Maximum NUMA Nodes (as a power of 2)"
+       range 4 5 if SPARC64
+       default "5"
        depends on NEED_MULTIPLE_NODES
+       help
+         Specify the maximum number of NUMA Nodes available on the target
+         system.  Increases memory reserved to accommodate various tables.
 
 # Some NUMA nodes have memory ranges that span
 # other nodes.  Even though a pfn is valid and
@@ -573,9 +577,6 @@ config SYSVIPC_COMPAT
        depends on COMPAT && SYSVIPC
        default y
 
-config KEYS_COMPAT
-       def_bool y if COMPAT && KEYS
-
 endmenu
 
 source "net/Kconfig"
index f7de0dbc38af2dd36c9f34df53e6e951f6729825..83b36a5371ffc62e80fa694e077867f4f8b1f49f 100644 (file)
@@ -52,7 +52,7 @@
 #define CTX_NR_MASK            TAG_CONTEXT_BITS
 #define CTX_HW_MASK            (CTX_NR_MASK | CTX_PGSZ_MASK)
 
-#define CTX_FIRST_VERSION      ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION      BIT(CTX_VERSION_SHIFT)
 #define CTX_VALID(__ctx)       \
         (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
 #define CTX_HWBITS(__ctx)      ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
index 22fede6eba116020cf7049e2f45a57545a6d55cb..2cddcda4f85f7555dced053b1fc82fd991e19943 100644 (file)
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
 void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
        unsigned long ctx_valid, flags;
-       int cpu;
+       int cpu = smp_processor_id();
 
+       per_cpu(per_cpu_secondary_mm, cpu) = mm;
        if (unlikely(mm == &init_mm))
                return;
 
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
         * for the first time, we must flush that context out of the
         * local TLB.
         */
-       cpu = smp_processor_id();
        if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
                cpumask_set_cpu(cpu, mm_cpumask(mm));
                __flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
 }
 
 #define deactivate_mm(tsk,mm)  do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
-       unsigned long flags;
-       int cpu;
-
-       spin_lock_irqsave(&mm->context.lock, flags);
-       if (!CTX_VALID(mm->context))
-               get_new_mmu_context(mm);
-       cpu = smp_processor_id();
-       if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
-               cpumask_set_cpu(cpu, mm_cpumask(mm));
-
-       load_secondary_context(mm);
-       __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-       tsb_context_switch(mm);
-       spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__SPARC64_MMU_CONTEXT_H) */
index 2669370305465d7a4d1c2f9c5a7c9eae2f66474d..522b43db2ed336a5d34dee17b4e8c3b593e6ee5d 100644 (file)
@@ -20,7 +20,6 @@
 #define PIL_SMP_CALL_FUNC      1
 #define PIL_SMP_RECEIVE_SIGNAL 2
 #define PIL_SMP_CAPTURE                3
-#define PIL_SMP_CTX_NEW_VERSION        4
 #define PIL_DEVICE_IRQ         5
 #define PIL_SMP_CALL_FUNC_SNGL 6
 #define PIL_DEFERRED_PCR_WORK  7
index 8174f6cdbbbbd87af5bdbcb923352ddadb4e237e..9dca7a892978a49d234a2b9325228b2c900d1276 100644 (file)
@@ -327,6 +327,7 @@ struct vio_dev {
        int                     compat_len;
 
        u64                     dev_no;
+       u64                     id;
 
        unsigned long           channel_id;
 
index b542cc7c8d94d8fc75319091f91ba5dc25251fd9..f87265afb1759e16b735e95067044f827dff27e7 100644 (file)
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
                pbuf.req.handle = cp->handle;
                pbuf.req.major = 1;
                pbuf.req.minor = 0;
-               strcpy(pbuf.req.svc_id, cp->service_id);
+               strcpy(pbuf.id_buf, cp->service_id);
 
                err = __ds_send(lp, &pbuf, msg_len);
                if (err > 0)
index 4d0248aa0928695597161d93f325a49311b43e2c..99dd133a029f06528f78b2b811d81c97fc7cf9f4 100644 (file)
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
 {
 #ifdef CONFIG_SMP
        unsigned long page;
+       void *mondo, *p;
 
-       BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+       BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+       /* Make sure mondo block is 64byte aligned */
+       p = kzalloc(127, GFP_KERNEL);
+       if (!p) {
+               prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+               prom_halt();
+       }
+       mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+       tb->cpu_mondo_block_pa = __pa(mondo);
 
        page = get_zeroed_page(GFP_KERNEL);
        if (!page) {
-               prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+               prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
                prom_halt();
        }
 
-       tb->cpu_mondo_block_pa = __pa(page);
-       tb->cpu_list_pa = __pa(page + 64);
+       tb->cpu_list_pa = __pa(page);
 #endif
 }
 
index c9804551262cc2832ea35b0d1285dc3051319fd4..6ae1e77be0bfde27696595a1f33e939935d83321 100644 (file)
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
 /* smp_64.c */
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
 
index b3bc0ac757cc11c0c77e106a447817b89d821cae..fdf31040a7dc5cc460de6d60c9724a60933b38dc 100644 (file)
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
        preempt_enable();
 }
 
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
-       struct mm_struct *mm;
-       unsigned long flags;
-
-       clear_softint(1 << irq);
-
-       /* See if we need to allocate a new TLB context because
-        * the version of the one we are using is now out of date.
-        */
-       mm = current->active_mm;
-       if (unlikely(!mm || (mm == &init_mm)))
-               return;
-
-       spin_lock_irqsave(&mm->context.lock, flags);
-
-       if (unlikely(!CTX_VALID(mm->context)))
-               get_new_mmu_context(mm);
-
-       spin_unlock_irqrestore(&mm->context.lock, flags);
-
-       load_secondary_context(mm);
-       __flush_tlb_mm(CTX_HWBITS(mm->context),
-                      SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
-       smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
 #ifdef CONFIG_KGDB
 void kgdb_roundup_cpus(unsigned long flags)
 {
index 10689cfd0ad40e6b12ae6b148f99ac2f5c7deb64..07c0df92496034efd1262dd2b40e56ffd5486c0c 100644 (file)
@@ -455,13 +455,16 @@ __tsb_context_switch:
        .type   copy_tsb,#function
 copy_tsb:              /* %o0=old_tsb_base, %o1=old_tsb_size
                         * %o2=new_tsb_base, %o3=new_tsb_size
+                        * %o4=page_size_shift
                         */
        sethi           %uhi(TSB_PASS_BITS), %g7
        srlx            %o3, 4, %o3
-       add             %o0, %o1, %g1   /* end of old tsb */
+       add             %o0, %o1, %o1   /* end of old tsb */
        sllx            %g7, 32, %g7
        sub             %o3, 1, %o3     /* %o3 == new tsb hash mask */
 
+       mov             %o4, %g1        /* page_size_shift */
+
 661:   prefetcha       [%o0] ASI_N, #one_read
        .section        .tsb_phys_patch, "ax"
        .word           661b
@@ -486,9 +489,9 @@ copy_tsb:           /* %o0=old_tsb_base, %o1=old_tsb_size
        /* This can definitely be computed faster... */
        srlx            %o0, 4, %o5     /* Build index */
        and             %o5, 511, %o5   /* Mask index */
-       sllx            %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+       sllx            %o5, %g1, %o5   /* Put into vaddr position */
        or              %o4, %o5, %o4   /* Full VADDR. */
-       srlx            %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+       srlx            %o4, %g1, %o4   /* Shift down to create index */
        and             %o4, %o3, %o4   /* Mask with new_tsb_nents-1 */
        sllx            %o4, 4, %o4     /* Shift back up into tsb ent offset */
        TSB_STORE(%o2 + %o4, %g2)       /* Store TAG */
@@ -496,7 +499,7 @@ copy_tsb:           /* %o0=old_tsb_base, %o1=old_tsb_size
        TSB_STORE(%o2 + %o4, %g3)       /* Store TTE */
 
 80:    add             %o0, 16, %o0
-       cmp             %o0, %g1
+       cmp             %o0, %o1
        bne,pt          %xcc, 90b
         nop
 
index 7bd8f6556352d91cdc30e18d590e7421ffba6465..efe93ab4a9c0654143d52569c7157f9117e7ae37 100644 (file)
@@ -50,7 +50,7 @@ tl0_resv03e:  BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
 tl0_irq1:      TRAP_IRQ(smp_call_function_client, 1)
 tl0_irq2:      TRAP_IRQ(smp_receive_signal_client, 2)
 tl0_irq3:      TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4:      TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4:       BTRAP(0x44)
 #else
 tl0_irq1:      BTRAP(0x41)
 tl0_irq2:      BTRAP(0x42)
index f6bb857254fcfa170155d4cd8dc8cb717c5bfb97..075d38980dee394fdb32f86e130d0b3ec37ebfeb 100644 (file)
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
        if (!id) {
                dev_set_name(&vdev->dev, "%s", bus_id_name);
                vdev->dev_no = ~(u64)0;
+               vdev->id = ~(u64)0;
        } else if (!cfg_handle) {
                dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
                vdev->dev_no = *id;
+               vdev->id = ~(u64)0;
        } else {
                dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
                             *cfg_handle, *id);
                vdev->dev_no = *cfg_handle;
+               vdev->id = *id;
        }
 
        vdev->dev.parent = parent;
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
        (void) vio_create_one(hp, node, &root_vdev->dev);
 }
 
+struct vio_md_node_query {
+       const char *type;
+       u64 dev_no;
+       u64 id;
+};
+
 static int vio_md_node_match(struct device *dev, void *arg)
 {
+       struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
        struct vio_dev *vdev = to_vio_dev(dev);
 
-       if (vdev->mp == (u64) arg)
-               return 1;
+       if (vdev->dev_no != query->dev_no)
+               return 0;
+       if (vdev->id != query->id)
+               return 0;
+       if (strcmp(vdev->type, query->type))
+               return 0;
 
-       return 0;
+       return 1;
 }
 
 static void vio_remove(struct mdesc_handle *hp, u64 node)
 {
+       const char *type;
+       const u64 *id, *cfg_handle;
+       u64 a;
+       struct vio_md_node_query query;
        struct device *dev;
 
-       dev = device_find_child(&root_vdev->dev, (void *) node,
+       type = mdesc_get_property(hp, node, "device-type", NULL);
+       if (!type) {
+               type = mdesc_get_property(hp, node, "name", NULL);
+               if (!type)
+                       type = mdesc_node_name(hp, node);
+       }
+
+       query.type = type;
+
+       id = mdesc_get_property(hp, node, "id", NULL);
+       cfg_handle = NULL;
+       mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+               u64 target;
+
+               target = mdesc_arc_target(hp, a);
+               cfg_handle = mdesc_get_property(hp, target,
+                                               "cfg-handle", NULL);
+               if (cfg_handle)
+                       break;
+       }
+
+       if (!id) {
+               query.dev_no = ~(u64)0;
+               query.id = ~(u64)0;
+       } else if (!cfg_handle) {
+               query.dev_no = *id;
+               query.id = ~(u64)0;
+       } else {
+               query.dev_no = *cfg_handle;
+               query.id = *id;
+       }
+
+       dev = device_find_child(&root_vdev->dev, &query,
                                vio_md_node_match);
        if (dev) {
                printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
 
                device_unregister(dev);
                put_device(dev);
+       } else {
+               if (!id)
+                       printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+                              type);
+               else if (!cfg_handle)
+                       printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+                              type, *id);
+               else
+                       printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+                              type, *cfg_handle, *id);
        }
 }
 
index 69912d2f8b54e903ef040b346371cc27204b9d15..07c03e72d81248cebe9c3d48bbc93e1e2de455b7 100644 (file)
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
 lib-$(CONFIG_SPARC64) += atomic_64.o
 lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
 lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
+lib-$(CONFIG_SPARC64) += multi3.o
 
 lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
 lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
new file mode 100644 (file)
index 0000000..d6b6c97
--- /dev/null
@@ -0,0 +1,35 @@
+#include <linux/linkage.h>
+#include <asm/export.h>
+
+       .text
+       .align  4
+ENTRY(__multi3) /* %o0 = u, %o1 = v */
+       mov     %o1, %g1
+       srl     %o3, 0, %g4
+       mulx    %g4, %g1, %o1
+       srlx    %g1, 0x20, %g3
+       mulx    %g3, %g4, %g5
+       sllx    %g5, 0x20, %o5
+       srl     %g1, 0, %g4
+       sub     %o1, %o5, %o5
+       srlx    %o5, 0x20, %o5
+       addcc   %g5, %o5, %g5
+       srlx    %o3, 0x20, %o5
+       mulx    %g4, %o5, %g4
+       mulx    %g3, %o5, %o5
+       sethi   %hi(0x80000000), %g3
+       addcc   %g5, %g4, %g5
+       srlx    %g5, 0x20, %g5
+       add     %g3, %g3, %g3
+       movcc   %xcc, %g0, %g3
+       addcc   %o5, %g5, %o5
+       sllx    %g4, 0x20, %g4
+       add     %o1, %g4, %o1
+       add     %o5, %g3, %g2
+       mulx    %g1, %o2, %g1
+       add     %g1, %g2, %g1
+       mulx    %o0, %o3, %o0
+       retl
+        add    %g1, %o0, %o0
+ENDPROC(__multi3)
+EXPORT_SYMBOL(__multi3)
index 0cda653ae007645fa01f05b4c40518332159a6ac..3c40ebd50f928cbbbfe69c65c35810a78b30c53d 100644 (file)
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
        }
 
        if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
-               pr_warn("hugepagesz=%llu not supported by MMU.\n",
+               hugetlb_bad_size();
+               pr_err("hugepagesz=%llu not supported by MMU.\n",
                        hugepage_size);
                goto out;
        }
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
 
 /* get_new_mmu_context() uses "cache + 1".  */
 DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 #define MAX_CTX_NR     (1UL << CTX_NR_BITS)
 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+       unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+       unsigned long new_ver, new_ctx, old_ctx;
+       struct mm_struct *mm;
+       int cpu;
+
+       bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+       /* Reserve kernel context */
+       set_bit(0, mmu_context_bmap);
+
+       new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+       if (unlikely(new_ver == 0))
+               new_ver = CTX_FIRST_VERSION;
+       tlb_context_cache = new_ver;
+
+       /*
+        * Make sure that any new mm that are added into per_cpu_secondary_mm,
+        * are going to go through get_new_mmu_context() path.
+        */
+       mb();
+
+       /*
+        * Updated versions to current on those CPUs that had valid secondary
+        * contexts
+        */
+       for_each_online_cpu(cpu) {
+               /*
+                * If a new mm is stored after we took this mm from the array,
+                * it will go into get_new_mmu_context() path, because we
+                * already bumped the version in tlb_context_cache.
+                */
+               mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+               if (unlikely(!mm || mm == &init_mm))
+                       continue;
+
+               old_ctx = mm->context.sparc64_ctx_val;
+               if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+                       new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+                       set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+                       mm->context.sparc64_ctx_val = new_ctx;
+               }
+       }
+}
 
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
 {
        unsigned long ctx, new_ctx;
        unsigned long orig_pgsz_bits;
-       int new_version;
 
        spin_lock(&ctx_alloc_lock);
+retry:
+       /* wrap might have happened, test again if our context became valid */
+       if (unlikely(CTX_VALID(mm->context)))
+               goto out;
        orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
        ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
        new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
-       new_version = 0;
        if (new_ctx >= (1 << CTX_NR_BITS)) {
                new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
                if (new_ctx >= ctx) {
-                       int i;
-                       new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
-                               CTX_FIRST_VERSION;
-                       if (new_ctx == 1)
-                               new_ctx = CTX_FIRST_VERSION;
-
-                       /* Don't call memset, for 16 entries that's just
-                        * plain silly...
-                        */
-                       mmu_context_bmap[0] = 3;
-                       mmu_context_bmap[1] = 0;
-                       mmu_context_bmap[2] = 0;
-                       mmu_context_bmap[3] = 0;
-                       for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
-                               mmu_context_bmap[i + 0] = 0;
-                               mmu_context_bmap[i + 1] = 0;
-                               mmu_context_bmap[i + 2] = 0;
-                               mmu_context_bmap[i + 3] = 0;
-                       }
-                       new_version = 1;
-                       goto out;
+                       mmu_context_wrap();
+                       goto retry;
                }
        }
+       if (mm->context.sparc64_ctx_val)
+               cpumask_clear(mm_cpumask(mm));
        mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
        new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
        tlb_context_cache = new_ctx;
        mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
        spin_unlock(&ctx_alloc_lock);
-
-       if (unlikely(new_version))
-               smp_new_mmu_context_version();
 }
 
 static int numa_enabled = 1;
index bedf08b22a4773c5a104b56f7de8b44c461630c1..0d4b998c7d7b74a9e930f12d735591f14769befd 100644 (file)
@@ -496,7 +496,8 @@ retry_tsb_alloc:
                extern void copy_tsb(unsigned long old_tsb_base,
                                     unsigned long old_tsb_size,
                                     unsigned long new_tsb_base,
-                                    unsigned long new_tsb_size);
+                                    unsigned long new_tsb_size,
+                                    unsigned long page_size_shift);
                unsigned long old_tsb_base = (unsigned long) old_tsb;
                unsigned long new_tsb_base = (unsigned long) new_tsb;
 
@@ -504,7 +505,9 @@ retry_tsb_alloc:
                        old_tsb_base = __pa(old_tsb_base);
                        new_tsb_base = __pa(new_tsb_base);
                }
-               copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+               copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+                       tsb_index == MM_TSB_BASE ?
+                       PAGE_SHIFT : REAL_HPAGE_SHIFT);
        }
 
        mm->context.tsb_block[tsb_index].tsb = new_tsb;
index 5d2fd6cd31896b87a3373a59cbfc3130808c6908..fcf4d27a38fb47af30d026079022d07bb803e323 100644 (file)
@@ -971,11 +971,6 @@ xcall_capture:
        wr              %g0, (1 << PIL_SMP_CAPTURE), %set_softint
        retry
 
-       .globl          xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
-       wr              %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
-       retry
-
 #ifdef CONFIG_KGDB
        .globl          xcall_kgdb_capture
 xcall_kgdb_capture:
index 85410279beab63d611af485b27f729727aa5988d..b55fe9bf5d3e2859309cef0420fd9493a041000e 100644 (file)
@@ -534,7 +534,7 @@ static void ubd_handler(void)
                for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
                        blk_end_request(
                                (*irq_req_buffer)[count]->req,
-                               0,
+                               BLK_STS_OK,
                                (*irq_req_buffer)[count]->length
                        );
                        kfree((*irq_req_buffer)[count]);
index 4ccfacc7232ab1ace21b8466ae73e4c7d18d3fba..0efb4c9497bce7e491665688840c4ced8e2c66c6 100644 (file)
@@ -2776,10 +2776,6 @@ config COMPAT_FOR_U64_ALIGNMENT
 config SYSVIPC_COMPAT
        def_bool y
        depends on SYSVIPC
-
-config KEYS_COMPAT
-       def_bool y
-       depends on KEYS
 endif
 
 endmenu
index a70fd61095f8a73baa5eb7c486afd6ff19cd4fd1..6f077445647a6840b8318cba2584e1259e1a4db1 100644 (file)
@@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
                break;
 
        case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
+       case 11: /* GX1 with inverted Device ID */
 #ifdef CONFIG_PCI
        {
                u32 vendor, device;
index afdfd237b59fd7722f4b595dbde17a940096ca81..f522415bf9e53690733250b31120d9d62d63a872 100644 (file)
@@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void)
 
        show_saved_mc();
 
+       /* initrd is going away, clear patch ptr. */
+       intel_ucode_patch = NULL;
+
        return 0;
 }
 
index da5c0978998488c612b1495a3659742b57b55071..43e10d6fdbeda3002dcd9e2063a723ded4ca5972 100644 (file)
@@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
                         */
                        rcu_irq_exit();
                        native_safe_halt();
-                       rcu_irq_enter();
                        local_irq_disable();
+                       rcu_irq_enter();
                }
        }
        if (!n.halted)
index a181ae76c71ce102f88f6a4ee9cc3233677c55e2..59ca2eea522c466937287c3e660b8f38e49c12aa 100644 (file)
@@ -780,18 +780,20 @@ out:
 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
 {
        struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
-       int j, nent = vcpu->arch.cpuid_nent;
+       struct kvm_cpuid_entry2 *ej;
+       int j = i;
+       int nent = vcpu->arch.cpuid_nent;
 
        e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
        /* when no next entry is found, the current entry[i] is reselected */
-       for (j = i + 1; ; j = (j + 1) % nent) {
-               struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
-               if (ej->function == e->function) {
-                       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
-                       return j;
-               }
-       }
-       return 0; /* silence gcc, even though control never reaches here */
+       do {
+               j = (j + 1) % nent;
+               ej = &vcpu->arch.cpuid_entries[j];
+       } while (ej->function != e->function);
+
+       ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+       return j;
 }
 
 /* find an entry with matching function, matching index (if needed), and that
index 5d3376f677949067f39c5e29fca2f3aceb7880bc..cb8225969255ec006fbff5eab76471f341d92b77 100644 (file)
@@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
        return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
 {
        if (unlikely(!lapic_in_kernel(vcpu) ||
                     kvm_event_needs_reinjection(vcpu)))
                return false;
 
+       if (is_guest_mode(vcpu))
+               return false;
+
        return kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
@@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
        if (!async)
                return false; /* *pfn has correct page already */
 
-       if (!prefault && can_do_async_pf(vcpu)) {
+       if (!prefault && kvm_can_do_async_pf(vcpu)) {
                trace_kvm_try_async_get_page(gva, gfn);
                if (kvm_find_async_pf_gfn(vcpu, gfn)) {
                        trace_kvm_async_pf_doublefault(gva, gfn);
index 27975807cc64fcae7bccbb53eecab35c3f0c64e2..330bf3a811fb07271de382b598be402c871f6496 100644 (file)
@@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
                             bool accessed_dirty);
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 {
index 9b4b5d6dcd34755acc0c09525ca93b3408ee4128..ca5d2b93385c66e3531caefae4c06614ce6b2d45 100644 (file)
@@ -2425,7 +2425,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
        if (!(vmcs12->exception_bitmap & (1u << nr)))
                return 0;
 
-       nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+       nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
                          vmcs_read32(VM_EXIT_INTR_INFO),
                          vmcs_readl(EXIT_QUALIFICATION));
        return 1;
index a2cd0997343c485051e849551b9fc9d904177fe0..87d3cb901935f2b251857f54ed53ca73567f10ef 100644 (file)
@@ -8607,8 +8607,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
                return true;
        else
-               return !kvm_event_needs_reinjection(vcpu) &&
-                       kvm_x86_ops->interrupt_allowed(vcpu);
+               return kvm_can_do_async_pf(vcpu);
 }
 
 void kvm_arch_start_assignment(struct kvm *kvm)
index c8a32fb345cf5db7bac8d1a7b6a6c5e2b1d1a0fe..78b2e0db4fb2c0adba7f0b7ec089cf61f0f311c8 100644 (file)
@@ -52,7 +52,7 @@ BFQG_FLAG_FNS(idling)
 BFQG_FLAG_FNS(empty)
 #undef BFQG_FLAG_FNS
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
 {
        unsigned long long now;
@@ -67,7 +67,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
        bfqg_stats_clear_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
                                                 struct bfq_group *curr_bfqg)
 {
@@ -81,7 +81,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
        bfqg_stats_mark_waiting(stats);
 }
 
-/* This should be called with the queue_lock held. */
+/* This should be called with the scheduler lock held. */
 static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
 {
        unsigned long long now;
@@ -203,12 +203,30 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 
 static void bfqg_get(struct bfq_group *bfqg)
 {
-       return blkg_get(bfqg_to_blkg(bfqg));
+       bfqg->ref++;
 }
 
 void bfqg_put(struct bfq_group *bfqg)
 {
-       return blkg_put(bfqg_to_blkg(bfqg));
+       bfqg->ref--;
+
+       if (bfqg->ref == 0)
+               kfree(bfqg);
+}
+
+static void bfqg_and_blkg_get(struct bfq_group *bfqg)
+{
+       /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
+       bfqg_get(bfqg);
+
+       blkg_get(bfqg_to_blkg(bfqg));
+}
+
+void bfqg_and_blkg_put(struct bfq_group *bfqg)
+{
+       bfqg_put(bfqg);
+
+       blkg_put(bfqg_to_blkg(bfqg));
 }
 
 void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
@@ -312,7 +330,11 @@ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
        if (bfqq) {
                bfqq->ioprio = bfqq->new_ioprio;
                bfqq->ioprio_class = bfqq->new_ioprio_class;
-               bfqg_get(bfqg);
+               /*
+                * Make sure that bfqg and its associated blkg do not
+                * disappear before entity.
+                */
+               bfqg_and_blkg_get(bfqg);
        }
        entity->parent = bfqg->my_entity; /* NULL for root group */
        entity->sched_data = &bfqg->sched_data;
@@ -399,6 +421,8 @@ struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
                return NULL;
        }
 
+       /* see comments in bfq_bic_update_cgroup for why refcounting */
+       bfqg_get(bfqg);
        return &bfqg->pd;
 }
 
@@ -426,7 +450,7 @@ void bfq_pd_free(struct blkg_policy_data *pd)
        struct bfq_group *bfqg = pd_to_bfqg(pd);
 
        bfqg_stats_exit(&bfqg->stats);
-       return kfree(bfqg);
+       bfqg_put(bfqg);
 }
 
 void bfq_pd_reset_stats(struct blkg_policy_data *pd)
@@ -496,9 +520,10 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
  * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  * it on the new one.  Avoid putting the entity on the old group idle tree.
  *
- * Must be called under the queue lock; the cgroup owning @bfqg must
- * not disappear (by now this just means that we are called under
- * rcu_read_lock()).
+ * Must be called under the scheduler lock, to make sure that the blkg
+ * owning @bfqg does not disappear (see comments in
+ * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
+ * objects).
  */
 void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                   struct bfq_group *bfqg)
@@ -519,16 +544,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
                bfq_deactivate_bfqq(bfqd, bfqq, false, false);
        else if (entity->on_st)
                bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
-       bfqg_put(bfqq_group(bfqq));
+       bfqg_and_blkg_put(bfqq_group(bfqq));
 
-       /*
-        * Here we use a reference to bfqg.  We don't need a refcounter
-        * as the cgroup reference will not be dropped, so that its
-        * destroy() callback will not be invoked.
-        */
        entity->parent = bfqg->my_entity;
        entity->sched_data = &bfqg->sched_data;
-       bfqg_get(bfqg);
+       /* pin down bfqg and its associated blkg  */
+       bfqg_and_blkg_get(bfqg);
 
        if (bfq_bfqq_busy(bfqq)) {
                bfq_pos_tree_add_move(bfqd, bfqq);
@@ -545,8 +566,9 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  * @bic: the bic to move.
  * @blkcg: the blk-cgroup to move to.
  *
- * Move bic to blkcg, assuming that bfqd->queue is locked; the caller
- * has to make sure that the reference to cgroup is valid across the call.
+ * Move bic to blkcg, assuming that bfqd->lock is held; which makes
+ * sure that the reference to cgroup is valid across the call (see
+ * comments in bfq_bic_update_cgroup on this issue)
  *
  * NOTE: an alternative approach might have been to store the current
  * cgroup in bfqq and getting a reference to it, reducing the lookup
@@ -604,6 +626,57 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
                goto out;
 
        bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+       /*
+        * Update blkg_path for bfq_log_* functions. We cache this
+        * path, and update it here, for the following
+        * reasons. Operations on blkg objects in blk-cgroup are
+        * protected with the request_queue lock, and not with the
+        * lock that protects the instances of this scheduler
+        * (bfqd->lock). This exposes BFQ to the following sort of
+        * race.
+        *
+        * The blkg_lookup performed in bfq_get_queue, protected
+        * through rcu, may happen to return the address of a copy of
+        * the original blkg. If this is the case, then the
+        * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
+        * the blkg, is useless: it does not prevent blk-cgroup code
+        * from destroying both the original blkg and all objects
+        * directly or indirectly referred by the copy of the
+        * blkg.
+        *
+        * On the bright side, destroy operations on a blkg invoke, as
+        * a first step, hooks of the scheduler associated with the
+        * blkg. And these hooks are executed with bfqd->lock held for
+        * BFQ. As a consequence, for any blkg associated with the
+        * request queue this instance of the scheduler is attached
+        * to, we are guaranteed that such a blkg is not destroyed, and
+        * that all the pointers it contains are consistent, while we
+        * are holding bfqd->lock. A blkg_lookup performed with
+        * bfqd->lock held then returns a fully consistent blkg, which
+        * remains consistent until this lock is held.
+        *
+        * Thanks to the last fact, and to the fact that: (1) bfqg has
+        * been obtained through a blkg_lookup in the above
+        * assignment, and (2) bfqd->lock is being held, here we can
+        * safely use the policy data for the involved blkg (i.e., the
+        * field bfqg->pd) to get to the blkg associated with bfqg,
+        * and then we can safely use any field of blkg. After we
+        * release bfqd->lock, even just getting blkg through this
+        * bfqg may cause dangling references to be traversed, as
+        * bfqg->pd may not exist any more.
+        *
+        * In view of the above facts, here we cache, in the bfqg, any
+        * blkg data we may need for this bic, and for its associated
+        * bfq_queue. As of now, we need to cache only the path of the
+        * blkg, which is used in the bfq_log_* functions.
+        *
+        * Finally, note that bfqg itself needs to be protected from
+        * destruction on the blkg_free of the original blkg (which
+        * invokes bfq_pd_free). We use an additional private
+        * refcounter for bfqg, to let it disappear only after no
+        * bfq_queue refers to it any longer.
+        */
+       blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
        bic->blkcg_serial_nr = serial_nr;
 out:
        rcu_read_unlock();
@@ -640,8 +713,6 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
  * @bfqd: the device data structure with the root group.
  * @bfqg: the group to move from.
  * @st: the service tree with the entities.
- *
- * Needs queue_lock to be taken and reference to be valid over the call.
  */
 static void bfq_reparent_active_entities(struct bfq_data *bfqd,
                                         struct bfq_group *bfqg,
@@ -692,8 +763,7 @@ void bfq_pd_offline(struct blkg_policy_data *pd)
                /*
                 * The idle tree may still contain bfq_queues belonging
                 * to exited task because they never migrated to a different
-                * cgroup from the one being destroyed now.  No one else
-                * can access them so it's safe to act without any lock.
+                * cgroup from the one being destroyed now.
                 */
                bfq_flush_idle_tree(st);
 
index 08ce45096350561896fb6c8959c5c04603e98555..ed93da2462abbc94ab75c10a0d9c7ce251f3f0fb 100644 (file)
@@ -3665,7 +3665,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
 
        kmem_cache_free(bfq_pool, bfqq);
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
-       bfqg_put(bfqg);
+       bfqg_and_blkg_put(bfqg);
 #endif
 }
 
index ae783c06dfd9ca73c9a3832e41e5617c510bdaf5..5c3bf986149215b3d98f753548cba9b4880f6e62 100644 (file)
@@ -759,6 +759,12 @@ struct bfq_group {
        /* must be the first member */
        struct blkg_policy_data pd;
 
+       /* cached path for this blkg (see comments in bfq_bic_update_cgroup) */
+       char blkg_path[128];
+
+       /* reference counter (see comments in bfq_bic_update_cgroup) */
+       int ref;
+
        struct bfq_entity entity;
        struct bfq_sched_data sched_data;
 
@@ -838,7 +844,7 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
-void bfqg_put(struct bfq_group *bfqg);
+void bfqg_and_blkg_put(struct bfq_group *bfqg);
 
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
 extern struct cftype bfq_blkcg_legacy_files[];
@@ -910,20 +916,13 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
 
 #define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \
-       blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid, \
+       blk_add_trace_msg((bfqd)->queue, "bfq%d%c %s " fmt, (bfqq)->pid,\
                        bfq_bfqq_sync((bfqq)) ? 'S' : 'A',              \
-                         __pbuf, ##args);                              \
+                       bfqq_group(bfqq)->blkg_path, ##args);           \
 } while (0)
 
-#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {                    \
-       char __pbuf[128];                                               \
-                                                                       \
-       blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf));          \
-       blk_add_trace_msg((bfqd)->queue, "%s " fmt, __pbuf, ##args);    \
-} while (0)
+#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) \
+       blk_add_trace_msg((bfqd)->queue, "%s " fmt, (bfqg)->blkg_path, ##args)
 
 #else /* CONFIG_BFQ_GROUP_IOSCHED */
 
index 5384713d48bc9929e2a4dc8b1b9f22b2e1c5bcd3..b8a3a65f73641a591ab7918b56c3a003594bee89 100644 (file)
@@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio)
        if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
                return false;
 
+       if (!bio_sectors(bio))
+               return false;
+
        /* Already protected? */
        if (bio_integrity(bio))
                return false;
@@ -221,7 +224,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
  * @bio:       bio to generate/verify integrity metadata for
  * @proc_fn:   Pointer to the relevant processing function
  */
-static int bio_integrity_process(struct bio *bio,
+static blk_status_t bio_integrity_process(struct bio *bio,
                                 integrity_processing_fn *proc_fn)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
@@ -229,7 +232,7 @@ static int bio_integrity_process(struct bio *bio,
        struct bvec_iter bviter;
        struct bio_vec bv;
        struct bio_integrity_payload *bip = bio_integrity(bio);
-       unsigned int ret = 0;
+       blk_status_t ret = BLK_STS_OK;
        void *prot_buf = page_address(bip->bip_vec->bv_page) +
                bip->bip_vec->bv_offset;
 
@@ -366,7 +369,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
        struct bio *bio = bip->bip_bio;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-       bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
+       bio->bi_status = bio_integrity_process(bio, bi->profile->verify_fn);
 
        /* Restore original bio completion handler */
        bio->bi_end_io = bip->bip_end_io;
@@ -395,7 +398,7 @@ void bio_integrity_endio(struct bio *bio)
         * integrity metadata.  Restore original bio end_io handler
         * and run it.
         */
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                bio->bi_end_io = bip->bip_end_io;
                bio_endio(bio);
 
index 888e7801c6381edd8d995503643917b2f452282e..7a5c8ed27f42e4ae50a5aae64a1c4a580513c114 100644 (file)
@@ -309,8 +309,8 @@ static struct bio *__bio_chain_endio(struct bio *bio)
 {
        struct bio *parent = bio->bi_private;
 
-       if (!parent->bi_error)
-               parent->bi_error = bio->bi_error;
+       if (!parent->bi_status)
+               parent->bi_status = bio->bi_status;
        bio_put(bio);
        return parent;
 }
@@ -918,7 +918,7 @@ static void submit_bio_wait_endio(struct bio *bio)
 {
        struct submit_bio_ret *ret = bio->bi_private;
 
-       ret->error = bio->bi_error;
+       ret->error = blk_status_to_errno(bio->bi_status);
        complete(&ret->event);
 }
 
@@ -1818,7 +1818,7 @@ again:
 
        if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
                trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
-                                        bio, bio->bi_error);
+                                        bio, bio->bi_status);
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
        }
 
index a7421b772d0e0e3f4b8372fbc11aefd83763d30a..8592409db2720a324fb256c05a84071188167912 100644 (file)
@@ -129,11 +129,69 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
 }
 EXPORT_SYMBOL(blk_rq_init);
 
+static const struct {
+       int             errno;
+       const char      *name;
+} blk_errors[] = {
+       [BLK_STS_OK]            = { 0,          "" },
+       [BLK_STS_NOTSUPP]       = { -EOPNOTSUPP, "operation not supported" },
+       [BLK_STS_TIMEOUT]       = { -ETIMEDOUT, "timeout" },
+       [BLK_STS_NOSPC]         = { -ENOSPC,    "critical space allocation" },
+       [BLK_STS_TRANSPORT]     = { -ENOLINK,   "recoverable transport" },
+       [BLK_STS_TARGET]        = { -EREMOTEIO, "critical target" },
+       [BLK_STS_NEXUS]         = { -EBADE,     "critical nexus" },
+       [BLK_STS_MEDIUM]        = { -ENODATA,   "critical medium" },
+       [BLK_STS_PROTECTION]    = { -EILSEQ,    "protection" },
+       [BLK_STS_RESOURCE]      = { -ENOMEM,    "kernel resource" },
+
+       /* device mapper special case, should not leak out: */
+       [BLK_STS_DM_REQUEUE]    = { -EREMCHG, "dm internal retry" },
+
+       /* everything else not covered above: */
+       [BLK_STS_IOERR]         = { -EIO,       "I/O" },
+};
+
+blk_status_t errno_to_blk_status(int errno)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
+               if (blk_errors[i].errno == errno)
+                       return (__force blk_status_t)i;
+       }
+
+       return BLK_STS_IOERR;
+}
+EXPORT_SYMBOL_GPL(errno_to_blk_status);
+
+int blk_status_to_errno(blk_status_t status)
+{
+       int idx = (__force int)status;
+
+       if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
+               return -EIO;
+       return blk_errors[idx].errno;
+}
+EXPORT_SYMBOL_GPL(blk_status_to_errno);
+
+static void print_req_error(struct request *req, blk_status_t status)
+{
+       int idx = (__force int)status;
+
+       if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
+               return;
+
+       printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
+                          __func__, blk_errors[idx].name, req->rq_disk ?
+                          req->rq_disk->disk_name : "?",
+                          (unsigned long long)blk_rq_pos(req));
+}
+
 static void req_bio_endio(struct request *rq, struct bio *bio,
-                         unsigned int nbytes, int error)
+                         unsigned int nbytes, blk_status_t error)
 {
        if (error)
-               bio->bi_error = error;
+               bio->bi_status = error;
 
        if (unlikely(rq->rq_flags & RQF_QUIET))
                bio_set_flag(bio, BIO_QUIET);
@@ -1668,7 +1726,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
        blk_queue_split(q, &bio, q->bio_split);
 
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return BLK_QC_T_NONE;
        }
@@ -1726,7 +1784,10 @@ get_rq:
        req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
        if (IS_ERR(req)) {
                __wbt_done(q->rq_wb, wb_acct);
-               bio->bi_error = PTR_ERR(req);
+               if (PTR_ERR(req) == -ENOMEM)
+                       bio->bi_status = BLK_STS_RESOURCE;
+               else
+                       bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                goto out_unlock;
        }
@@ -1881,7 +1942,7 @@ generic_make_request_checks(struct bio *bio)
 {
        struct request_queue *q;
        int nr_sectors = bio_sectors(bio);
-       int err = -EIO;
+       blk_status_t status = BLK_STS_IOERR;
        char b[BDEVNAME_SIZE];
        struct hd_struct *part;
 
@@ -1924,7 +1985,7 @@ generic_make_request_checks(struct bio *bio)
            !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
                bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
                if (!nr_sectors) {
-                       err = 0;
+                       status = BLK_STS_OK;
                        goto end_io;
                }
        }
@@ -1976,9 +2037,9 @@ generic_make_request_checks(struct bio *bio)
        return true;
 
 not_supported:
-       err = -EOPNOTSUPP;
+       status = BLK_STS_NOTSUPP;
 end_io:
-       bio->bi_error = err;
+       bio->bi_status = status;
        bio_endio(bio);
        return false;
 }
@@ -2183,29 +2244,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
  * @q:  the queue to submit the request
  * @rq: the request being queued
  */
-int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
 {
        unsigned long flags;
        int where = ELEVATOR_INSERT_BACK;
 
        if (blk_cloned_rq_check_limits(q, rq))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (rq->rq_disk &&
            should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (q->mq_ops) {
                if (blk_queue_io_stat(q))
                        blk_account_io_start(rq, true);
                blk_mq_sched_insert_request(rq, false, true, false, false);
-               return 0;
+               return BLK_STS_OK;
        }
 
        spin_lock_irqsave(q->queue_lock, flags);
        if (unlikely(blk_queue_dying(q))) {
                spin_unlock_irqrestore(q->queue_lock, flags);
-               return -ENODEV;
+               return BLK_STS_IOERR;
        }
 
        /*
@@ -2222,7 +2283,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       return 0;
+       return BLK_STS_OK;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
 
@@ -2456,15 +2517,14 @@ struct request *blk_peek_request(struct request_queue *q)
                        rq = NULL;
                        break;
                } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
-                       int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
-
                        rq->rq_flags |= RQF_QUIET;
                        /*
                         * Mark this request as started so we don't trigger
                         * any debug logic in the end I/O path.
                         */
                        blk_start_request(rq);
-                       __blk_end_request_all(rq, err);
+                       __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
+                                       BLK_STS_TARGET : BLK_STS_IOERR);
                } else {
                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
@@ -2553,7 +2613,7 @@ EXPORT_SYMBOL(blk_fetch_request);
 /**
  * blk_update_request - Special helper function for request stacking drivers
  * @req:      the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete @req
  *
  * Description:
@@ -2572,49 +2632,19 @@ EXPORT_SYMBOL(blk_fetch_request);
  *     %false - this request doesn't have any more data
  *     %true  - this request has more data
  **/
-bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
+bool blk_update_request(struct request *req, blk_status_t error,
+               unsigned int nr_bytes)
 {
        int total_bytes;
 
-       trace_block_rq_complete(req, error, nr_bytes);
+       trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
 
        if (!req->bio)
                return false;
 
-       if (error && !blk_rq_is_passthrough(req) &&
-           !(req->rq_flags & RQF_QUIET)) {
-               char *error_type;
-
-               switch (error) {
-               case -ENOLINK:
-                       error_type = "recoverable transport";
-                       break;
-               case -EREMOTEIO:
-                       error_type = "critical target";
-                       break;
-               case -EBADE:
-                       error_type = "critical nexus";
-                       break;
-               case -ETIMEDOUT:
-                       error_type = "timeout";
-                       break;
-               case -ENOSPC:
-                       error_type = "critical space allocation";
-                       break;
-               case -ENODATA:
-                       error_type = "critical medium";
-                       break;
-               case -EIO:
-               default:
-                       error_type = "I/O";
-                       break;
-               }
-               printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
-                                  __func__, error_type, req->rq_disk ?
-                                  req->rq_disk->disk_name : "?",
-                                  (unsigned long long)blk_rq_pos(req));
-
-       }
+       if (unlikely(error && !blk_rq_is_passthrough(req) &&
+                    !(req->rq_flags & RQF_QUIET)))
+               print_req_error(req, error);
 
        blk_account_io_completion(req, nr_bytes);
 
@@ -2680,7 +2710,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
-static bool blk_update_bidi_request(struct request *rq, int error,
+static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
                                    unsigned int nr_bytes,
                                    unsigned int bidi_bytes)
 {
@@ -2721,7 +2751,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
 /*
  * queue lock must be held
  */
-void blk_finish_request(struct request *req, int error)
+void blk_finish_request(struct request *req, blk_status_t error)
 {
        struct request_queue *q = req->q;
 
@@ -2758,7 +2788,7 @@ EXPORT_SYMBOL(blk_finish_request);
 /**
  * blk_end_bidi_request - Complete a bidi request
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2772,7 +2802,7 @@ EXPORT_SYMBOL(blk_finish_request);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool blk_end_bidi_request(struct request *rq, int error,
+static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
                                 unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        struct request_queue *q = rq->q;
@@ -2791,7 +2821,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
 /**
  * __blk_end_bidi_request - Complete a bidi request with queue lock held
  * @rq:         the request to complete
- * @error:      %0 for success, < %0 for error
+ * @error:      block status code
  * @nr_bytes:   number of bytes to complete @rq
  * @bidi_bytes: number of bytes to complete @rq->next_rq
  *
@@ -2803,7 +2833,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-static bool __blk_end_bidi_request(struct request *rq, int error,
+static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
                                   unsigned int nr_bytes, unsigned int bidi_bytes)
 {
        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
@@ -2817,7 +2847,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
 /**
  * blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2828,7 +2858,8 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool blk_end_request(struct request *rq, blk_status_t error,
+               unsigned int nr_bytes)
 {
        return blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
@@ -2837,12 +2868,12 @@ EXPORT_SYMBOL(blk_end_request);
 /**
  * blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error: block status code
  *
  * Description:
  *     Completely finish @rq.
  */
-void blk_end_request_all(struct request *rq, int error)
+void blk_end_request_all(struct request *rq, blk_status_t error)
 {
        bool pending;
        unsigned int bidi_bytes = 0;
@@ -2858,7 +2889,7 @@ EXPORT_SYMBOL(blk_end_request_all);
 /**
  * __blk_end_request - Helper function for drivers to complete the request.
  * @rq:       the request being processed
- * @error:    %0 for success, < %0 for error
+ * @error:    block status code
  * @nr_bytes: number of bytes to complete
  *
  * Description:
@@ -2868,7 +2899,8 @@ EXPORT_SYMBOL(blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  **/
-bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
+bool __blk_end_request(struct request *rq, blk_status_t error,
+               unsigned int nr_bytes)
 {
        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
 }
@@ -2877,12 +2909,12 @@ EXPORT_SYMBOL(__blk_end_request);
 /**
  * __blk_end_request_all - Helper function for drives to finish the request.
  * @rq: the request to finish
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Completely finish @rq.  Must be called with queue lock held.
  */
-void __blk_end_request_all(struct request *rq, int error)
+void __blk_end_request_all(struct request *rq, blk_status_t error)
 {
        bool pending;
        unsigned int bidi_bytes = 0;
@@ -2898,7 +2930,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
 /**
  * __blk_end_request_cur - Helper function to finish the current request chunk.
  * @rq: the request to finish the current chunk for
- * @error: %0 for success, < %0 for error
+ * @error:    block status code
  *
  * Description:
  *     Complete the current consecutively mapped chunk from @rq.  Must
@@ -2908,7 +2940,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
  *     %false - we are done with this request
  *     %true  - still buffers pending for this request
  */
-bool __blk_end_request_cur(struct request *rq, int error)
+bool __blk_end_request_cur(struct request *rq, blk_status_t error)
 {
        return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
 }
@@ -3249,7 +3281,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                 * Short-circuit if @q is dead
                 */
                if (unlikely(blk_queue_dying(q))) {
-                       __blk_end_request_all(rq, -ENODEV);
+                       __blk_end_request_all(rq, BLK_STS_IOERR);
                        continue;
                }
 
index a9451e3b858715cb209a0b4e949ec591ca3d2d88..5c0f3dc446dc7caa1c0cef086740744c1eaafea9 100644 (file)
@@ -16,7 +16,7 @@
  * @rq: request to complete
  * @error: end I/O status of the request
  */
-static void blk_end_sync_rq(struct request *rq, int error)
+static void blk_end_sync_rq(struct request *rq, blk_status_t error)
 {
        struct completion *waiting = rq->end_io_data;
 
@@ -69,7 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 
        if (unlikely(blk_queue_dying(q))) {
                rq->rq_flags |= RQF_QUIET;
-               __blk_end_request_all(rq, -ENXIO);
+               __blk_end_request_all(rq, BLK_STS_IOERR);
                spin_unlock_irq(q->queue_lock);
                return;
        }
index c4e0880b54bbf4fee779c8e724c1175221ef1830..a572b47fa05903f45ec907ae514cde412efb44f5 100644 (file)
@@ -164,7 +164,7 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
  */
 static bool blk_flush_complete_seq(struct request *rq,
                                   struct blk_flush_queue *fq,
-                                  unsigned int seq, int error)
+                                  unsigned int seq, blk_status_t error)
 {
        struct request_queue *q = rq->q;
        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
@@ -216,7 +216,7 @@ static bool blk_flush_complete_seq(struct request *rq,
        return kicked | queued;
 }
 
-static void flush_end_io(struct request *flush_rq, int error)
+static void flush_end_io(struct request *flush_rq, blk_status_t error)
 {
        struct request_queue *q = flush_rq->q;
        struct list_head *running;
@@ -341,7 +341,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
        return blk_flush_queue_rq(flush_rq, false);
 }
 
-static void flush_data_end_io(struct request *rq, int error)
+static void flush_data_end_io(struct request *rq, blk_status_t error)
 {
        struct request_queue *q = rq->q;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
@@ -382,7 +382,7 @@ static void flush_data_end_io(struct request *rq, int error)
                blk_run_queue_async(q);
 }
 
-static void mq_flush_data_end_io(struct request *rq, int error)
+static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_hw_ctx *hctx;
index 0f891a9aff4d67b67f005af2b43d9860891f206a..feb30570eaf561c90dd1b442d6cb785bb4a61a55 100644 (file)
@@ -384,9 +384,9 @@ static struct kobj_type integrity_ktype = {
        .sysfs_ops      = &integrity_ops,
 };
 
-static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
+static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
 {
-       return 0;
+       return BLK_STS_OK;
 }
 
 static const struct blk_integrity_profile nop_profile = {
index 803aed4d72216f5e23a585cb6d027115fa603ff9..9edebbdce0bdfc22ee9ba8319bb3cc58923bbfa7 100644 (file)
@@ -114,10 +114,12 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
                blk_mq_run_hw_queues(q, true);
        } else if (strcmp(op, "start") == 0) {
                blk_mq_start_stopped_hw_queues(q, true);
+       } else if (strcmp(op, "kick") == 0) {
+               blk_mq_kick_requeue_list(q);
        } else {
                pr_err("%s: unsupported operation '%s'\n", __func__, op);
 inval:
-               pr_err("%s: use either 'run' or 'start'\n", __func__);
+               pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
                return -EINVAL;
        }
        return count;
@@ -267,6 +269,14 @@ static const char *const rqf_name[] = {
 };
 #undef RQF_NAME
 
+#define RQAF_NAME(name) [REQ_ATOM_##name] = #name
+static const char *const rqaf_name[] = {
+       RQAF_NAME(COMPLETE),
+       RQAF_NAME(STARTED),
+       RQAF_NAME(POLL_SLEPT),
+};
+#undef RQAF_NAME
+
 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
 {
        const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
@@ -283,6 +293,8 @@ int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
        seq_puts(m, ", .rq_flags=");
        blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
                       ARRAY_SIZE(rqf_name));
+       seq_puts(m, ", .atomic_flags=");
+       blk_flags_show(m, rq->atomic_flags, rqaf_name, ARRAY_SIZE(rqaf_name));
        seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
                   rq->internal_tag);
        if (mq_ops->show_rq)
@@ -298,6 +310,37 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
 }
 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
 
+static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
+       __acquires(&q->requeue_lock)
+{
+       struct request_queue *q = m->private;
+
+       spin_lock_irq(&q->requeue_lock);
+       return seq_list_start(&q->requeue_list, *pos);
+}
+
+static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct request_queue *q = m->private;
+
+       return seq_list_next(v, &q->requeue_list, pos);
+}
+
+static void queue_requeue_list_stop(struct seq_file *m, void *v)
+       __releases(&q->requeue_lock)
+{
+       struct request_queue *q = m->private;
+
+       spin_unlock_irq(&q->requeue_lock);
+}
+
+static const struct seq_operations queue_requeue_list_seq_ops = {
+       .start  = queue_requeue_list_start,
+       .next   = queue_requeue_list_next,
+       .stop   = queue_requeue_list_stop,
+       .show   = blk_mq_debugfs_rq_show,
+};
+
 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
        __acquires(&hctx->lock)
 {
@@ -329,6 +372,36 @@ static const struct seq_operations hctx_dispatch_seq_ops = {
        .show   = blk_mq_debugfs_rq_show,
 };
 
+struct show_busy_params {
+       struct seq_file         *m;
+       struct blk_mq_hw_ctx    *hctx;
+};
+
+/*
+ * Note: the state of a request may change while this function is in progress,
+ * e.g. due to a concurrent blk_mq_finish_request() call.
+ */
+static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
+{
+       const struct show_busy_params *params = data;
+
+       if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
+           test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+               __blk_mq_debugfs_rq_show(params->m,
+                                        list_entry_rq(&rq->queuelist));
+}
+
+static int hctx_busy_show(void *data, struct seq_file *m)
+{
+       struct blk_mq_hw_ctx *hctx = data;
+       struct show_busy_params params = { .m = m, .hctx = hctx };
+
+       blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
+                               &params);
+
+       return 0;
+}
+
 static int hctx_ctx_map_show(void *data, struct seq_file *m)
 {
        struct blk_mq_hw_ctx *hctx = data;
@@ -655,6 +728,7 @@ const struct file_operations blk_mq_debugfs_fops = {
 
 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
        {"poll_stat", 0400, queue_poll_stat_show},
+       {"requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops},
        {"state", 0600, queue_state_show, queue_state_write},
        {},
 };
@@ -663,6 +737,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
        {"state", 0400, hctx_state_show},
        {"flags", 0400, hctx_flags_show},
        {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
+       {"busy", 0400, hctx_busy_show},
        {"ctx_map", 0400, hctx_ctx_map_show},
        {"tags", 0400, hctx_tags_show},
        {"tags_bitmap", 0400, hctx_tags_bitmap_show},
index 1f5b692526ae1a7199ee9bbaef305c4b0a42e696..c4e2afb9d12db87eb2043862cbfb3929abb8d404 100644 (file)
@@ -221,19 +221,71 @@ bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
 
+/*
+ * Reverse check our software queue for entries that we could potentially
+ * merge with. Currently includes a hand-wavy stop count of 8, to not spend
+ * too much time checking for merges.
+ */
+static bool blk_mq_attempt_merge(struct request_queue *q,
+                                struct blk_mq_ctx *ctx, struct bio *bio)
+{
+       struct request *rq;
+       int checked = 8;
+
+       list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
+               bool merged = false;
+
+               if (!checked--)
+                       break;
+
+               if (!blk_rq_merge_ok(rq, bio))
+                       continue;
+
+               switch (blk_try_merge(rq, bio)) {
+               case ELEVATOR_BACK_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_back_merge(q, rq, bio);
+                       break;
+               case ELEVATOR_FRONT_MERGE:
+                       if (blk_mq_sched_allow_merge(q, rq, bio))
+                               merged = bio_attempt_front_merge(q, rq, bio);
+                       break;
+               case ELEVATOR_DISCARD_MERGE:
+                       merged = bio_attempt_discard_merge(q, rq, bio);
+                       break;
+               default:
+                       continue;
+               }
+
+               if (merged)
+                       ctx->rq_merged++;
+               return merged;
+       }
+
+       return false;
+}
+
 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 {
        struct elevator_queue *e = q->elevator;
+       struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+       bool ret = false;
 
-       if (e->type->ops.mq.bio_merge) {
-               struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
-               struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
-
+       if (e && e->type->ops.mq.bio_merge) {
                blk_mq_put_ctx(ctx);
                return e->type->ops.mq.bio_merge(hctx, bio);
        }
 
-       return false;
+       if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
+               /* default per sw-queue merge */
+               spin_lock(&ctx->lock);
+               ret = blk_mq_attempt_merge(q, ctx, bio);
+               spin_unlock(&ctx->lock);
+       }
+
+       blk_mq_put_ctx(ctx);
+       return ret;
 }
 
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
index edafb5383b7bbdedfd5365ed38f9a5c373ec96ab..b87e5be5db8cfb49e7154dcd68e53580b156c9c3 100644 (file)
@@ -38,9 +38,7 @@ int blk_mq_sched_init(struct request_queue *q);
 static inline bool
 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
 {
-       struct elevator_queue *e = q->elevator;
-
-       if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
+       if (blk_queue_nomerges(q) || !bio_mergeable(bio))
                return false;
 
        return __blk_mq_sched_bio_merge(q, bio);
index 1bcccedcc74f0b48f58363640acb1eae04704800..359d2dc0d4148a4cd15652d61fcc0ce156a98c36 100644 (file)
@@ -394,7 +394,7 @@ void blk_mq_free_request(struct request *rq)
 }
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
-inline void __blk_mq_end_request(struct request *rq, int error)
+inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
        blk_account_io_done(rq);
 
@@ -409,7 +409,7 @@ inline void __blk_mq_end_request(struct request *rq, int error)
 }
 EXPORT_SYMBOL(__blk_mq_end_request);
 
-void blk_mq_end_request(struct request *rq, int error)
+void blk_mq_end_request(struct request *rq, blk_status_t error)
 {
        if (blk_update_request(rq, error, blk_rq_bytes(rq)))
                BUG();
@@ -753,50 +753,6 @@ static void blk_mq_timeout_work(struct work_struct *work)
        blk_queue_exit(q);
 }
 
-/*
- * Reverse check our software queue for entries that we could potentially
- * merge with. Currently includes a hand-wavy stop count of 8, to not spend
- * too much time checking for merges.
- */
-static bool blk_mq_attempt_merge(struct request_queue *q,
-                                struct blk_mq_ctx *ctx, struct bio *bio)
-{
-       struct request *rq;
-       int checked = 8;
-
-       list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
-               bool merged = false;
-
-               if (!checked--)
-                       break;
-
-               if (!blk_rq_merge_ok(rq, bio))
-                       continue;
-
-               switch (blk_try_merge(rq, bio)) {
-               case ELEVATOR_BACK_MERGE:
-                       if (blk_mq_sched_allow_merge(q, rq, bio))
-                               merged = bio_attempt_back_merge(q, rq, bio);
-                       break;
-               case ELEVATOR_FRONT_MERGE:
-                       if (blk_mq_sched_allow_merge(q, rq, bio))
-                               merged = bio_attempt_front_merge(q, rq, bio);
-                       break;
-               case ELEVATOR_DISCARD_MERGE:
-                       merged = bio_attempt_discard_merge(q, rq, bio);
-                       break;
-               default:
-                       continue;
-               }
-
-               if (merged)
-                       ctx->rq_merged++;
-               return merged;
-       }
-
-       return false;
-}
-
 struct flush_busy_ctx_data {
        struct blk_mq_hw_ctx *hctx;
        struct list_head *list;
@@ -968,7 +924,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
 {
        struct blk_mq_hw_ctx *hctx;
        struct request *rq;
-       int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
+       int errors, queued;
 
        if (list_empty(list))
                return false;
@@ -979,6 +935,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
        errors = queued = 0;
        do {
                struct blk_mq_queue_data bd;
+               blk_status_t ret;
 
                rq = list_first_entry(list, struct request, queuelist);
                if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
@@ -1019,25 +976,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
                }
 
                ret = q->mq_ops->queue_rq(hctx, &bd);
-               switch (ret) {
-               case BLK_MQ_RQ_QUEUE_OK:
-                       queued++;
-                       break;
-               case BLK_MQ_RQ_QUEUE_BUSY:
+               if (ret == BLK_STS_RESOURCE) {
                        blk_mq_put_driver_tag_hctx(hctx, rq);
                        list_add(&rq->queuelist, list);
                        __blk_mq_requeue_request(rq);
                        break;
-               default:
-                       pr_err("blk-mq: bad return on queue: %d\n", ret);
-               case BLK_MQ_RQ_QUEUE_ERROR:
+               }
+
+               if (unlikely(ret != BLK_STS_OK)) {
                        errors++;
-                       blk_mq_end_request(rq, -EIO);
-                       break;
+                       blk_mq_end_request(rq, BLK_STS_IOERR);
+                       continue;
                }
 
-               if (ret == BLK_MQ_RQ_QUEUE_BUSY)
-                       break;
+               queued++;
        } while (!list_empty(list));
 
        hctx->dispatched[queued_to_index(queued)]++;
@@ -1075,7 +1027,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
                 * - blk_mq_run_hw_queue() checks whether or not a queue has
                 *   been stopped before rerunning a queue.
                 * - Some but not all block drivers stop a queue before
-                *   returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
+                *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
                 *   and dm-rq.
                 */
                if (!blk_mq_sched_needs_restart(hctx) &&
@@ -1427,30 +1379,13 @@ static inline bool hctx_allow_merges(struct blk_mq_hw_ctx *hctx)
                !blk_queue_nomerges(hctx->queue);
 }
 
-static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
-                                        struct blk_mq_ctx *ctx,
-                                        struct request *rq, struct bio *bio)
+static inline void blk_mq_queue_io(struct blk_mq_hw_ctx *hctx,
+                                  struct blk_mq_ctx *ctx,
+                                  struct request *rq)
 {
-       if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
-               blk_mq_bio_to_request(rq, bio);
-               spin_lock(&ctx->lock);
-insert_rq:
-               __blk_mq_insert_request(hctx, rq, false);
-               spin_unlock(&ctx->lock);
-               return false;
-       } else {
-               struct request_queue *q = hctx->queue;
-
-               spin_lock(&ctx->lock);
-               if (!blk_mq_attempt_merge(q, ctx, bio)) {
-                       blk_mq_bio_to_request(rq, bio);
-                       goto insert_rq;
-               }
-
-               spin_unlock(&ctx->lock);
-               __blk_mq_finish_request(hctx, ctx, rq);
-               return true;
-       }
+       spin_lock(&ctx->lock);
+       __blk_mq_insert_request(hctx, rq, false);
+       spin_unlock(&ctx->lock);
 }
 
 static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -1461,22 +1396,28 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
-                                     bool may_sleep)
+static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+                                       struct request *rq,
+                                       blk_qc_t *cookie, bool may_sleep)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_queue_data bd = {
                .rq = rq,
                .last = true,
        };
-       struct blk_mq_hw_ctx *hctx;
        blk_qc_t new_cookie;
-       int ret;
+       blk_status_t ret;
+       bool run_queue = true;
+
+       if (blk_mq_hctx_stopped(hctx)) {
+               run_queue = false;
+               goto insert;
+       }
 
        if (q->elevator)
                goto insert;
 
-       if (!blk_mq_get_driver_tag(rq, &hctx, false))
+       if (!blk_mq_get_driver_tag(rq, NULL, false))
                goto insert;
 
        new_cookie = request_to_qc_t(hctx, rq);
@@ -1487,20 +1428,21 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
         * would have done
         */
        ret = q->mq_ops->queue_rq(hctx, &bd);
-       if (ret == BLK_MQ_RQ_QUEUE_OK) {
+       switch (ret) {
+       case BLK_STS_OK:
                *cookie = new_cookie;
                return;
-       }
-
-       if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
+       case BLK_STS_RESOURCE:
+               __blk_mq_requeue_request(rq);
+               goto insert;
+       default:
                *cookie = BLK_QC_T_NONE;
-               blk_mq_end_request(rq, -EIO);
+               blk_mq_end_request(rq, ret);
                return;
        }
 
-       __blk_mq_requeue_request(rq);
 insert:
-       blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
+       blk_mq_sched_insert_request(rq, false, run_queue, false, may_sleep);
 }
 
 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
@@ -1508,7 +1450,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
        if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
                rcu_read_lock();
-               __blk_mq_try_issue_directly(rq, cookie, false);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, false);
                rcu_read_unlock();
        } else {
                unsigned int srcu_idx;
@@ -1516,7 +1458,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
                might_sleep();
 
                srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
-               __blk_mq_try_issue_directly(rq, cookie, true);
+               __blk_mq_try_issue_directly(hctx, rq, cookie, true);
                srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
        }
 }
@@ -1619,9 +1561,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                blk_mq_put_ctx(data.ctx);
 
-               if (same_queue_rq)
+               if (same_queue_rq) {
+                       data.hctx = blk_mq_map_queue(q,
+                                       same_queue_rq->mq_ctx->cpu);
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
+               }
        } else if (q->nr_hw_queues > 1 && is_sync) {
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
@@ -1630,11 +1575,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true, true, true);
-       } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+       } else {
                blk_mq_put_ctx(data.ctx);
+               blk_mq_bio_to_request(rq, bio);
+               blk_mq_queue_io(data.hctx, data.ctx, rq);
                blk_mq_run_hw_queue(data.hctx, true);
-       } else
-               blk_mq_put_ctx(data.ctx);
+       }
 
        return cookie;
 }
index fc13dd0c6e3956a84913d9e71132c0f321a67280..a7285bf2831c7bdbb89b753fccb198f8640e8780 100644 (file)
@@ -27,6 +27,13 @@ static int throtl_quantum = 32;
 #define MIN_THROTL_IOPS (10)
 #define DFL_LATENCY_TARGET (-1L)
 #define DFL_IDLE_THRESHOLD (0)
+#define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
+#define LATENCY_FILTERED_SSD (0)
+/*
+ * For HD, very small latency comes from sequential IO. Such IO is helpless to
+ * help determine if its IO is impacted by others, hence we ignore the IO
+ */
+#define LATENCY_FILTERED_HD (1000L) /* 1ms */
 
 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
 
@@ -212,6 +219,7 @@ struct throtl_data
        struct avg_latency_bucket avg_buckets[LATENCY_BUCKET_SIZE];
        struct latency_bucket __percpu *latency_buckets;
        unsigned long last_calculate_time;
+       unsigned long filtered_latency;
 
        bool track_bio_latency;
 };
@@ -698,7 +706,7 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
                                          unsigned long expires)
 {
-       unsigned long max_expire = jiffies + 8 * sq_to_tg(sq)->td->throtl_slice;
+       unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
 
        /*
         * Since we are adjusting the throttle limit dynamically, the sleep
@@ -2281,7 +2289,7 @@ void blk_throtl_bio_endio(struct bio *bio)
                throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
                        bio_op(bio), lat);
 
-       if (tg->latency_target) {
+       if (tg->latency_target && lat >= tg->td->filtered_latency) {
                int bucket;
                unsigned int threshold;
 
@@ -2417,14 +2425,20 @@ void blk_throtl_exit(struct request_queue *q)
 void blk_throtl_register_queue(struct request_queue *q)
 {
        struct throtl_data *td;
+       int i;
 
        td = q->td;
        BUG_ON(!td);
 
-       if (blk_queue_nonrot(q))
+       if (blk_queue_nonrot(q)) {
                td->throtl_slice = DFL_THROTL_SLICE_SSD;
-       else
+               td->filtered_latency = LATENCY_FILTERED_SSD;
+       } else {
                td->throtl_slice = DFL_THROTL_SLICE_HD;
+               td->filtered_latency = LATENCY_FILTERED_HD;
+               for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
+                       td->avg_buckets[i].latency = DFL_HD_BASELINE_LATENCY;
+       }
 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
        /* if no low limit, use previous default */
        td->throtl_slice = DFL_THROTL_SLICE_HD;
index 1cb5dd3a5da1e7bf834f229d2c8776bbcb65a3d4..e4703181d97fc809a3c71a48df3b15f5b63fdfa4 100644 (file)
@@ -143,7 +143,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool)
                mempool_free(bvec->bv_page, pool);
        }
 
-       bio_orig->bi_error = bio->bi_error;
+       bio_orig->bi_status = bio->bi_status;
        bio_endio(bio_orig);
        bio_put(bio);
 }
@@ -163,7 +163,7 @@ static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
 {
        struct bio *bio_orig = bio->bi_private;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                copy_to_high_bio_irq(bio_orig, bio);
 
        bounce_end_io(bio, pool);
index 0a23dbba2d3018edf10c49c774ebd5dd3ae79c87..c4513b23f57a6438af6ae38367c072931edf138c 100644 (file)
@@ -37,7 +37,7 @@ static void bsg_destroy_job(struct kref *kref)
        struct bsg_job *job = container_of(kref, struct bsg_job, kref);
        struct request *rq = job->req;
 
-       blk_end_request_all(rq, scsi_req(rq)->result);
+       blk_end_request_all(rq, BLK_STS_OK);
 
        put_device(job->dev);   /* release reference for the request */
 
@@ -202,7 +202,7 @@ static void bsg_request_fn(struct request_queue *q)
                ret = bsg_create_job(dev, req);
                if (ret) {
                        scsi_req(req)->result = ret;
-                       blk_end_request_all(req, ret);
+                       blk_end_request_all(req, BLK_STS_OK);
                        spin_lock_irq(q->queue_lock);
                        continue;
                }
@@ -246,6 +246,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
        q->bsg_job_size = dd_job_size;
        q->bsg_job_fn = job_fn;
        queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        blk_queue_softirq_done(q, bsg_softirq_done);
        blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
 
index 6fd08544d77eaaf94d6eeda990bd142936186e57..59d02dd31b0cf29fa0f8b94fdc34614fc1335113 100644 (file)
@@ -294,14 +294,14 @@ out:
  * async completion call-back from the block layer, when scsi/ide/whatever
  * calls end_that_request_last() on a request
  */
-static void bsg_rq_end_io(struct request *rq, int uptodate)
+static void bsg_rq_end_io(struct request *rq, blk_status_t status)
 {
        struct bsg_command *bc = rq->end_io_data;
        struct bsg_device *bd = bc->bd;
        unsigned long flags;
 
-       dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
-               bd->name, rq, bc, bc->bio, uptodate);
+       dprintk("%s: finished rq %p bc %p, bio %p\n",
+               bd->name, rq, bc, bc->bio);
 
        bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
 
@@ -750,6 +750,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
 #ifdef BSG_DEBUG
        unsigned char buf[32];
 #endif
+
+       if (!blk_queue_scsi_passthrough(rq)) {
+               WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        if (!blk_get_queue(rq))
                return ERR_PTR(-ENXIO);
 
index b7e9c7feeab2acbd1a846d0c31285460aba076ec..3d5c289457191ed8e1718ac3ebb5892a5343ff66 100644 (file)
@@ -982,15 +982,6 @@ static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
        return min_vdisktime;
 }
 
-static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
-{
-       s64 delta = (s64)(vdisktime - min_vdisktime);
-       if (delta < 0)
-               min_vdisktime = vdisktime;
-
-       return min_vdisktime;
-}
-
 static void update_min_vdisktime(struct cfq_rb_root *st)
 {
        struct cfq_group *cfqg;
index 680c6d63629831c8e8cc6687f6e8593924105463..350b3cbcf9e53fd3b6770236b449a7dcdb2d18e7 100644 (file)
@@ -46,8 +46,8 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
  * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
  * tag.
  */
-static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
-                          unsigned int type)
+static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
+               csum_fn *fn, unsigned int type)
 {
        unsigned int i;
 
@@ -67,11 +67,11 @@ static int t10_pi_generate(struct blk_integrity_iter *iter, csum_fn *fn,
                iter->seed++;
        }
 
-       return 0;
+       return BLK_STS_OK;
 }
 
-static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
-                               unsigned int type)
+static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
+               csum_fn *fn, unsigned int type)
 {
        unsigned int i;
 
@@ -108,7 +108,7 @@ static int t10_pi_verify(struct blk_integrity_iter *iter, csum_fn *fn,
                               "(rcvd %04x, want %04x)\n", iter->disk_name,
                               (unsigned long long)iter->seed,
                               be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
-                       return -EILSEQ;
+                       return BLK_STS_PROTECTION;
                }
 
 next:
@@ -117,45 +117,45 @@ next:
                iter->seed++;
        }
 
-       return 0;
+       return BLK_STS_OK;
 }
 
-static int t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_crc_fn, 1);
 }
 
-static int t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_ip_fn, 1);
 }
 
-static int t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_crc_fn, 1);
 }
 
-static int t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_ip_fn, 1);
 }
 
-static int t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_crc_fn, 3);
 }
 
-static int t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_generate(iter, t10_pi_ip_fn, 3);
 }
 
-static int t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_crc_fn, 3);
 }
 
-static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
+static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
 {
        return t10_pi_verify(iter, t10_pi_ip_fn, 3);
 }
index d3a989e718f53518bafe93ddb9efea419e5d9b30..3cd6e12cfc467d27ccf2830fffde82e1aaf0f45e 100644 (file)
@@ -141,7 +141,7 @@ int public_key_verify_signature(const struct public_key *pkey,
         * signature and returns that to us.
         */
        ret = crypto_akcipher_verify(req);
-       if (ret == -EINPROGRESS) {
+       if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
                wait_for_completion(&compl.completion);
                ret = compl.err;
        }
index 672a94c2c3ffa3a8683dfb7f02134a27d92a641e..d178650fd524cfe8bc390c883505f4cbc31d60f2 100644 (file)
@@ -381,7 +381,7 @@ static int pefile_digest_pe(const void *pebuf, unsigned int pelen,
        }
 
 error:
-       kfree(desc);
+       kzfree(desc);
 error_no_desc:
        crypto_free_shash(tfm);
        kleave(" = %d", ret);
@@ -450,6 +450,6 @@ int verify_pefile_signature(const void *pebuf, unsigned pelen,
        ret = pefile_digest_pe(pebuf, pelen, &ctx);
 
 error:
-       kfree(ctx.digest);
+       kzfree(ctx.digest);
        return ret;
 }
index c80765b211cf0fae7a91c35494dc9ed45247eafe..dd03fead1ca358fc4f21cdb0c5c65b9896a39b38 100644 (file)
@@ -102,6 +102,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
                }
        }
 
+       ret = -ENOMEM;
        cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL);
        if (!cert->pub->key)
                goto error_decode;
index fa749f47013508d562366fb0484e7ea80535ba0a..cdb27ac4b2266eccff2ba89a5388ec2fbf6d18bc 100644 (file)
@@ -1767,9 +1767,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
                        break;
                case -EINPROGRESS:
                case -EBUSY:
-                       ret = wait_for_completion_interruptible(
-                               &drbg->ctr_completion);
-                       if (!ret && !drbg->ctr_async_err) {
+                       wait_for_completion(&drbg->ctr_completion);
+                       if (!drbg->ctr_async_err) {
                                reinit_completion(&drbg->ctr_completion);
                                break;
                        }
index b7ad808be3d4ec6c3822ce2cc5c0428d8f3b3dd0..3841b5eafa7ee244f605c28fd56c5a8c5dcaba9b 100644 (file)
@@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
 
        err = crypto_skcipher_encrypt(&data->req);
        if (err == -EINPROGRESS || err == -EBUSY) {
-               err = wait_for_completion_interruptible(
-                       &data->result.completion);
-               if (!err)
-                       err = data->result.err;
+               wait_for_completion(&data->result.completion);
+               err = data->result.err;
        }
 
        if (err)
index c5fecf97ee2f52bd11188a0cd2295bd82d5d02db..797b28dc7b3410cda1e775e55d2e1d2362254dd9 100644 (file)
@@ -666,14 +666,6 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
        int ret = -ENODEV;
        struct fwnode_handle *iort_fwnode;
 
-       /*
-        * If we already translated the fwspec there
-        * is nothing left to do, return the iommu_ops.
-        */
-       ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
-       if (ops)
-               return ops;
-
        if (node) {
                iort_fwnode = iort_get_fwnode(node);
                if (!iort_fwnode)
@@ -735,6 +727,14 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
        u32 streamid = 0;
        int err;
 
+       /*
+        * If we already translated the fwspec there
+        * is nothing left to do, return the iommu_ops.
+        */
+       ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+       if (ops)
+               return ops;
+
        if (dev_is_pci(dev)) {
                struct pci_bus *bus = to_pci_dev(dev)->bus;
                u32 rid;
@@ -782,6 +782,12 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
        if (err)
                ops = ERR_PTR(err);
 
+       /* Ignore all other errors apart from EPROBE_DEFER */
+       if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+               dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+               ops = NULL;
+       }
+
        return ops;
 }
 
index a9a9ab3399d47ff8087e62d495f1d2ba930fc1d3..d42eeef9d9287815ce5f4c82d8d915ae5deabe51 100644 (file)
@@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
        if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
            (test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
             (battery->capacity_now <= battery->alarm)))
-               pm_wakeup_hard_event(&battery->device->dev);
+               pm_wakeup_event(&battery->device->dev, 0);
 
        return result;
 }
index 9ad8cdb58743b765a6daa615729d78745d70af30..e19f530f1083a13732328516925e3bbeb6493e14 100644 (file)
@@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
        }
 
        if (state)
-               pm_wakeup_hard_event(&device->dev);
+               pm_wakeup_event(&device->dev, 0);
 
        ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
        if (ret == NOTIFY_DONE)
@@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
                } else {
                        int keycode;
 
-                       pm_wakeup_hard_event(&device->dev);
+                       pm_wakeup_event(&device->dev, 0);
                        if (button->suspended)
                                break;
 
@@ -534,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
                lid_device = device;
        }
 
-       device_init_wakeup(&device->dev, true);
        printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
        return 0;
 
index 798d5003a039d876f275fc2d933be71cb7ebfbed..993fd31394c854c99e5ce0c2af824f36c50b7a22 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/pm_qos.h>
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
-#include <linux/suspend.h>
 
 #include "internal.h"
 
@@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
        mutex_lock(&acpi_pm_notifier_lock);
 
        if (adev->wakeup.flags.notifier_present) {
-               pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
+               __pm_wakeup_event(adev->wakeup.ws, 0);
                if (adev->wakeup.context.work.func)
                        queue_pm_work(&adev->wakeup.context.work);
        }
index e39ec7b7cb674fbad3cab60f4e5139d7e06a1f09..3a10d7573477e7dea0139c5f885e9514a1886a7a 100644 (file)
@@ -1371,8 +1371,8 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
        iort_set_dma_mask(dev);
 
        iommu = iort_iommu_configure(dev);
-       if (IS_ERR(iommu))
-               return PTR_ERR(iommu);
+       if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
 
        size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
        /*
index a6574d62634031ac6e351418b935a333b556b665..097d630ab8867267326121f9f4db2525cf06ef4b 100644 (file)
@@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
        acpi_os_wait_events_complete();
        if (acpi_sci_irq_valid())
                enable_irq_wake(acpi_sci_irq);
-
        return 0;
 }
 
-static void acpi_freeze_wake(void)
-{
-       /*
-        * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
-        * that the SCI has triggered while suspended, so cancel the wakeup in
-        * case it has not been a wakeup event (the GPEs will be checked later).
-        */
-       if (acpi_sci_irq_valid() &&
-           !irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
-               pm_system_cancel_wakeup();
-}
-
-static void acpi_freeze_sync(void)
-{
-       /*
-        * Process all pending events in case there are any wakeup ones.
-        *
-        * The EC driver uses the system workqueue, so that one needs to be
-        * flushed too.
-        */
-       acpi_os_wait_events_complete();
-       flush_scheduled_work();
-}
-
 static void acpi_freeze_restore(void)
 {
        acpi_disable_wakeup_devices(ACPI_STATE_S0);
        if (acpi_sci_irq_valid())
                disable_irq_wake(acpi_sci_irq);
-
        acpi_enable_all_runtime_gpes();
 }
 
@@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
 static const struct platform_freeze_ops acpi_freeze_ops = {
        .begin = acpi_freeze_begin,
        .prepare = acpi_freeze_prepare,
-       .wake = acpi_freeze_wake,
-       .sync = acpi_freeze_sync,
        .restore = acpi_freeze_restore,
        .end = acpi_freeze_end,
 };
index 2fc52407306c15c27b9fb0b11c2db4ef4641aeba..c69954023c2e7d8c235aace4b66a1d32298f36eb 100644 (file)
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
 {}
 #endif
 
+/*
+ * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
+ * as DUMMY, or detected but eventually get a "link down" and never get up
+ * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
+ * port_map may hold a value of 0x00.
+ *
+ * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
+ * and can significantly reduce the occurrence of the problem.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189471
+ */
+static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
+                                   struct pci_dev *pdev)
+{
+       static const struct dmi_system_id sysids[] = {
+               {
+                       .ident = "Acer Switch Alpha 12",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
+                       },
+               },
+               { }
+       };
+
+       if (dmi_check_system(sysids)) {
+               dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
+               if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
+                       hpriv->port_map = 0x7;
+                       hpriv->cap = 0xC734FF02;
+               }
+       }
+}
+
 #ifdef CONFIG_ARM64
 /*
  * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                         "online status unreliable, applying workaround\n");
        }
 
+
+       /* Acer SA5-271 workaround modifies private_data */
+       acer_sa5_271_workaround(hpriv, pdev);
+
        /* CAP.NP sometimes indicate the index of the last enabled
         * port, at other times, that of the last possible port, so
         * determining the maximum port number requires looking at
index aaa761b9081cc02a75792302c741f7b54ebd9823..cd2eab6aa92ea245e1a3dab839be7fe8aa938cdb 100644 (file)
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
 
        irq = platform_get_irq(pdev, 0);
        if (irq <= 0) {
-               dev_err(dev, "no irq\n");
-               return -EINVAL;
+               if (irq != -EPROBE_DEFER)
+                       dev_err(dev, "no irq\n");
+               return irq;
        }
 
        hpriv->irq = irq;
index 2d83b8c7596567a020300d8dd1aeefeb33a6a055..e157a0e4441916b77b53c402741b74e124012cc9 100644 (file)
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur,
        }
 
        force_ent->port = simple_strtoul(id, &endp, 10);
-       if (p == endp || *endp != '\0') {
+       if (id == endp || *endp != '\0') {
                *reason = "invalid port/link";
                return -EINVAL;
        }
index b66bcda88320fefa399ac9653eca64d3045a6a96..3b2246dded74fbeed89d53f913c725ab6e5c0082 100644 (file)
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev)
        struct ata_host *host;
        struct mv_host_priv *hpriv;
        struct resource *res;
-       void __iomem *mmio;
        int n_ports = 0, irq = 0;
        int rc;
        int port;
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev)
         * Get the register base first
         */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mmio = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(mmio))
-               return PTR_ERR(mmio);
+       if (res == NULL)
+               return -EINVAL;
 
        /* allocate host */
        if (pdev->dev.of_node) {
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev)
        hpriv->board_idx = chip_soc;
 
        host->iomap = NULL;
-       hpriv->base = mmio - SATAHC0_REG_BASE;
+       hpriv->base = devm_ioremap(&pdev->dev, res->start,
+                                  resource_size(res));
+       if (!hpriv->base)
+               return -ENOMEM;
+
+       hpriv->base -= SATAHC0_REG_BASE;
 
        hpriv->clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(hpriv->clk))
index 5d38245a7a73a7cc4fa0d49255a52c7daead7886..b7939a2c1fab53ff2a94799a261d401c1c440f2e 100644 (file)
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to get access to sata clock\n");
                return PTR_ERR(priv->clk);
        }
-       clk_prepare_enable(priv->clk);
+
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        host = ata_host_alloc(&pdev->dev, 1);
        if (!host) {
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev)
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
        void __iomem *base = priv->base;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        /* ack and mask */
        iowrite32(0, base + SATAINTSTAT_REG);
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
        struct sata_rcar_priv *priv = host->private_data;
+       int ret;
 
-       clk_prepare_enable(priv->clk);
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
 
        sata_rcar_setup_port(host);
 
index e987a6f55d36747f79b470b0372a1abcff6390d7..9faee1c893e53c8dea6e14d472a73a8b7131bf96 100644 (file)
@@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
        if (async_error)
                goto Complete;
 
+       if (pm_wakeup_pending()) {
+               async_error = -EBUSY;
+               goto Complete;
+       }
+
        if (dev->power.syscore || dev->power.direct_complete)
                goto Complete;
 
index 9c36b27996fc2b56a141bb388acf4947a45b104b..c313b600d356260fd9b98fe4848340f9cbdcf9ae 100644 (file)
@@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
 /* First wakeup IRQ seen by the kernel in the last cycle. */
 unsigned int pm_wakeup_irq __read_mostly;
 
-/* If greater than 0 and the system is suspending, terminate the suspend. */
-static atomic_t pm_abort_suspend __read_mostly;
+/* If set and the system is suspending, terminate the suspend. */
+static bool pm_abort_suspend __read_mostly;
 
 /*
  * Combined counters of registered wakeup events and wakeup events in progress.
@@ -855,26 +855,20 @@ bool pm_wakeup_pending(void)
                pm_print_active_wakeup_sources();
        }
 
-       return ret || atomic_read(&pm_abort_suspend) > 0;
+       return ret || pm_abort_suspend;
 }
 
 void pm_system_wakeup(void)
 {
-       atomic_inc(&pm_abort_suspend);
+       pm_abort_suspend = true;
        freeze_wake();
 }
 EXPORT_SYMBOL_GPL(pm_system_wakeup);
 
-void pm_system_cancel_wakeup(void)
-{
-       atomic_dec(&pm_abort_suspend);
-}
-
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(void)
 {
+       pm_abort_suspend = false;
        pm_wakeup_irq = 0;
-       if (reset)
-               atomic_set(&pm_abort_suspend, 0);
 }
 
 void pm_system_irq_wakeup(unsigned int irq_number)
index 26a51be7722768d1215d2e72df11158dd64c0e05..245a879b036e58e4fa50a5f9181a7719cac48445 100644 (file)
@@ -3464,7 +3464,7 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
                                                 bool SuccessfulIO)
 {
        struct request *Request = Command->Request;
-       int Error = SuccessfulIO ? 0 : -EIO;
+       blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR;
 
        pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
                Command->SegmentCount, Command->DmaDirection);
index a328f673adfe036fcc5a4ccfe5ac3e9824d8ca85..49908c74bfcb0fae9eb30f84d969a4c520192e39 100644 (file)
@@ -1378,7 +1378,7 @@ static void redo_fd_request(void)
        struct amiga_floppy_struct *floppy;
        char *data;
        unsigned long flags;
-       int err;
+       blk_status_t err;
 
 next_req:
        rq = set_next_request();
@@ -1392,7 +1392,7 @@ next_req:
 
 next_segment:
        /* Here someone could investigate to be more efficient */
-       for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
+       for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
 #ifdef DEBUG
                printk("fd: sector %ld + %d requested for %s\n",
                       blk_rq_pos(rq), cnt,
@@ -1400,7 +1400,7 @@ next_segment:
 #endif
                block = blk_rq_pos(rq) + cnt;
                if ((int)block > floppy->blocks) {
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                        break;
                }
 
@@ -1413,7 +1413,7 @@ next_segment:
 #endif
 
                if (get_track(drive, track) == -1) {
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                        break;
                }
 
@@ -1424,7 +1424,7 @@ next_segment:
 
                        /* keep the drive spinning while writes are scheduled */
                        if (!fd_motor_on(drive)) {
-                               err = -EIO;
+                               err = BLK_STS_IOERR;
                                break;
                        }
                        /*
index 3c606c09fd5acbd2897c680c3249929f30b6a9a8..dc43254e05a4bd4030e5a7a0cbb34d749b3f8365 100644 (file)
@@ -1070,8 +1070,8 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
                d->ip.rq = NULL;
        do {
                bio = rq->bio;
-               bok = !fastfail && !bio->bi_error;
-       } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
+               bok = !fastfail && !bio->bi_status;
+       } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
 
        /* cf. http://lkml.org/lkml/2006/10/31/28 */
        if (!fastfail)
@@ -1131,7 +1131,7 @@ ktiocomplete(struct frame *f)
                        ahout->cmdstat, ahin->cmdstat,
                        d->aoemajor, d->aoeminor);
 noskb:         if (buf)
-                       buf->bio->bi_error = -EIO;
+                       buf->bio->bi_status = BLK_STS_IOERR;
                goto out;
        }
 
@@ -1144,7 +1144,7 @@ noskb:            if (buf)
                                "aoe: runt data size in read from",
                                (long) d->aoemajor, d->aoeminor,
                               skb->len, n);
-                       buf->bio->bi_error = -EIO;
+                       buf->bio->bi_status = BLK_STS_IOERR;
                        break;
                }
                if (n > f->iter.bi_size) {
@@ -1152,7 +1152,7 @@ noskb:            if (buf)
                                "aoe: too-large data size in read from",
                                (long) d->aoemajor, d->aoeminor,
                                n, f->iter.bi_size);
-                       buf->bio->bi_error = -EIO;
+                       buf->bio->bi_status = BLK_STS_IOERR;
                        break;
                }
                bvcpy(skb, f->buf->bio, f->iter, n);
@@ -1654,7 +1654,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
        if (buf == NULL)
                return;
        buf->iter.bi_size = 0;
-       buf->bio->bi_error = -EIO;
+       buf->bio->bi_status = BLK_STS_IOERR;
        if (buf->nframesout == 0)
                aoe_end_buf(d, buf);
 }
index ffd1947500c6411b286a1250b6cab662917008cc..b28fefb90391cbf7900651e170c3a06c57f4a6ae 100644 (file)
@@ -170,7 +170,7 @@ aoe_failip(struct aoedev *d)
        if (rq == NULL)
                return;
        while ((bio = d->ip.nxbio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                d->ip.nxbio = bio->bi_next;
                n = (unsigned long) rq->special;
                rq->special = (void *) --n;
index fa69ecd52cb57cb226e1f9f177ef0c464ee3155a..92da886180aa10a994e01139c53d94130672343c 100644 (file)
@@ -378,7 +378,7 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
 static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
 static DEFINE_TIMER(fd_timer, check_change, 0, 0);
        
-static void fd_end_request_cur(int err)
+static void fd_end_request_cur(blk_status_t err)
 {
        if (!__blk_end_request_cur(fd_request, err))
                fd_request = NULL;
@@ -620,7 +620,7 @@ static void fd_error( void )
        fd_request->error_count++;
        if (fd_request->error_count >= MAX_ERRORS) {
                printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
-               fd_end_request_cur(-EIO);
+               fd_end_request_cur(BLK_STS_IOERR);
        }
        else if (fd_request->error_count == RECALIBRATE_ERRORS) {
                printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
@@ -739,7 +739,7 @@ static void do_fd_action( int drive )
                    }
                    else {
                        /* all sectors finished */
-                       fd_end_request_cur(0);
+                       fd_end_request_cur(BLK_STS_OK);
                        redo_fd_request();
                        return;
                    }
@@ -1144,7 +1144,7 @@ static void fd_rwsec_done1(int status)
        }
        else {
                /* all sectors finished */
-               fd_end_request_cur(0);
+               fd_end_request_cur(BLK_STS_OK);
                redo_fd_request();
        }
        return;
@@ -1445,7 +1445,7 @@ repeat:
        if (!UD.connected) {
                /* drive not connected */
                printk(KERN_ERR "Unknown Device: fd%d\n", drive );
-               fd_end_request_cur(-EIO);
+               fd_end_request_cur(BLK_STS_IOERR);
                goto repeat;
        }
                
@@ -1461,12 +1461,12 @@ repeat:
                /* user supplied disk type */
                if (--type >= NUM_DISK_MINORS) {
                        printk(KERN_WARNING "fd%d: invalid disk format", drive );
-                       fd_end_request_cur(-EIO);
+                       fd_end_request_cur(BLK_STS_IOERR);
                        goto repeat;
                }
                if (minor2disktype[type].drive_types > DriveType)  {
                        printk(KERN_WARNING "fd%d: unsupported disk format", drive );
-                       fd_end_request_cur(-EIO);
+                       fd_end_request_cur(BLK_STS_IOERR);
                        goto repeat;
                }
                type = minor2disktype[type].index;
@@ -1476,7 +1476,7 @@ repeat:
        }
        
        if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
-               fd_end_request_cur(-EIO);
+               fd_end_request_cur(BLK_STS_IOERR);
                goto repeat;
        }
 
index cd375503f7b0d83558280e9865ee17c967b706a8..02a611993bb4b8d3673b2d92b4ebee512ca434ce 100644 (file)
@@ -1864,7 +1864,8 @@ static void cciss_softirq_done(struct request *rq)
        /* set the residual count for pc requests */
        if (blk_rq_is_passthrough(rq))
                scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
-       blk_end_request_all(rq, scsi_req(rq)->result ? -EIO : 0);
+       blk_end_request_all(rq, scsi_req(rq)->result ?
+                       BLK_STS_IOERR : BLK_STS_OK);
 
        spin_lock_irqsave(&h->lock, flags);
        cmd_free(h, c);
@@ -1956,6 +1957,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
        disk->queue->cmd_size = sizeof(struct scsi_request);
        disk->queue->request_fn = do_cciss_request;
        disk->queue->queue_lock = &h->lock;
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue);
        if (blk_init_allocated_queue(disk->queue) < 0)
                goto cleanup_queue;
 
index 8d7bcfa49c1223da19ea1d7093fdfe5845f8a7d7..e02c45cd3c5a7302054ff63ab95fb4247b368443 100644 (file)
@@ -178,7 +178,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
        else
                submit_bio(bio);
        wait_until_done_or_force_detached(device, bdev, &device->md_io.done);
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                err = device->md_io.error;
 
  out:
index a804a4107fbc132795ae67a000f2b517b1047d32..809fd245c3dc8b21240a91de649d253e4ab3f6cd 100644 (file)
@@ -959,16 +959,16 @@ static void drbd_bm_endio(struct bio *bio)
            !bm_test_page_unchanged(b->bm_pages[idx]))
                drbd_warn(device, "bitmap page idx %u changed during IO!\n", idx);
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                /* ctx error will hold the completed-last non-zero error code,
                 * in case error codes differ. */
-               ctx->error = bio->bi_error;
+               ctx->error = blk_status_to_errno(bio->bi_status);
                bm_set_page_io_err(b->bm_pages[idx]);
                /* Not identical to on disk version of it.
                 * Is BM_PAGE_IO_ERROR enough? */
                if (__ratelimit(&drbd_ratelimit_state))
                        drbd_err(device, "IO ERROR %d on bitmap page idx %u\n",
-                                       bio->bi_error, idx);
+                                       bio->bi_status, idx);
        } else {
                bm_clear_page_io_err(b->bm_pages[idx]);
                dynamic_drbd_dbg(device, "bitmap page idx %u completed\n", idx);
index d5da45bb03a663ef33f7bfb9cc6bb378a724393d..76761b4ca13ebd24845d144d72623498df58dc5e 100644 (file)
@@ -1627,7 +1627,7 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
        __release(local);
        if (!bio->bi_bdev) {
                drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
-               bio->bi_error = -ENODEV;
+               bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return;
        }
index 1b0a2be24f39edc8e597ed4348545e08cb025c9c..c7e95e6380fb46d1503c6a595f6cb716c4d0f6ef 100644 (file)
@@ -1229,9 +1229,9 @@ void one_flush_endio(struct bio *bio)
        struct drbd_device *device = octx->device;
        struct issue_flush_context *ctx = octx->ctx;
 
-       if (bio->bi_error) {
-               ctx->error = bio->bi_error;
-               drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_error);
+       if (bio->bi_status) {
+               ctx->error = blk_status_to_errno(bio->bi_status);
+               drbd_info(device, "local disk FLUSH FAILED with status %d\n", bio->bi_status);
        }
        kfree(octx);
        bio_put(bio);
index 656624314f0d68dc7385b3896d0de9ae1a1cc457..fca6b9914948b37ec6a28ab95e14204054692e6e 100644 (file)
@@ -203,7 +203,7 @@ void start_new_tl_epoch(struct drbd_connection *connection)
 void complete_master_bio(struct drbd_device *device,
                struct bio_and_error *m)
 {
-       m->bio->bi_error = m->error;
+       m->bio->bi_status = errno_to_blk_status(m->error);
        bio_endio(m->bio);
        dec_ap_bio(device);
 }
@@ -1157,7 +1157,7 @@ static void drbd_process_discard_req(struct drbd_request *req)
 
        if (blkdev_issue_zeroout(bdev, req->i.sector, req->i.size >> 9,
                        GFP_NOIO, 0))
-               req->private_bio->bi_error = -EIO;
+               req->private_bio->bi_status = BLK_STS_IOERR;
        bio_endio(req->private_bio);
 }
 
@@ -1225,7 +1225,7 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
                /* only pass the error to the upper layers.
                 * if user cannot handle io errors, that's not our business. */
                drbd_err(device, "could not kmalloc() req\n");
-               bio->bi_error = -ENOMEM;
+               bio->bi_status = BLK_STS_RESOURCE;
                bio_endio(bio);
                return ERR_PTR(-ENOMEM);
        }
index 1afcb4e02d8d98c0021dc38e2770306f30dc8883..1d8726a8df340513a6c9006aaebcc12c99d5b284 100644 (file)
@@ -63,7 +63,7 @@ void drbd_md_endio(struct bio *bio)
        struct drbd_device *device;
 
        device = bio->bi_private;
-       device->md_io.error = bio->bi_error;
+       device->md_io.error = blk_status_to_errno(bio->bi_status);
 
        /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
         * to timeout on the lower level device, and eventually detach from it.
@@ -177,13 +177,13 @@ void drbd_peer_request_endio(struct bio *bio)
        bool is_discard = bio_op(bio) == REQ_OP_WRITE_ZEROES ||
                          bio_op(bio) == REQ_OP_DISCARD;
 
-       if (bio->bi_error && __ratelimit(&drbd_ratelimit_state))
+       if (bio->bi_status && __ratelimit(&drbd_ratelimit_state))
                drbd_warn(device, "%s: error=%d s=%llus\n",
                                is_write ? (is_discard ? "discard" : "write")
-                                       : "read", bio->bi_error,
+                                       : "read", bio->bi_status,
                                (unsigned long long)peer_req->i.sector);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                set_bit(__EE_WAS_ERROR, &peer_req->flags);
 
        bio_put(bio); /* no need for the bio anymore */
@@ -243,16 +243,16 @@ void drbd_request_endio(struct bio *bio)
                if (__ratelimit(&drbd_ratelimit_state))
                        drbd_emerg(device, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
 
-               if (!bio->bi_error)
+               if (!bio->bi_status)
                        drbd_panic_after_delayed_completion_of_aborted_request(device);
        }
 
        /* to avoid recursion in __req_mod */
-       if (unlikely(bio->bi_error)) {
+       if (unlikely(bio->bi_status)) {
                switch (bio_op(bio)) {
                case REQ_OP_WRITE_ZEROES:
                case REQ_OP_DISCARD:
-                       if (bio->bi_error == -EOPNOTSUPP)
+                       if (bio->bi_status == BLK_STS_NOTSUPP)
                                what = DISCARD_COMPLETED_NOTSUPP;
                        else
                                what = DISCARD_COMPLETED_WITH_ERROR;
@@ -272,7 +272,7 @@ void drbd_request_endio(struct bio *bio)
        }
 
        bio_put(req->private_bio);
-       req->private_bio = ERR_PTR(bio->bi_error);
+       req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
 
        /* not req_mod(), we need irqsave here! */
        spin_lock_irqsave(&device->resource->req_lock, flags);
index 60d4c765317833ec75ef6637104325b96084cc11..9e3cb32e365d909d6c642f8a2a4548a50b4e2cc4 100644 (file)
@@ -2202,7 +2202,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
  * =============================
  */
 
-static void floppy_end_request(struct request *req, int error)
+static void floppy_end_request(struct request *req, blk_status_t error)
 {
        unsigned int nr_sectors = current_count_sectors;
        unsigned int drive = (unsigned long)req->rq_disk->private_data;
@@ -2263,7 +2263,7 @@ static void request_done(int uptodate)
                        DRWE->last_error_generation = DRS->generation;
                }
                spin_lock_irqsave(q->queue_lock, flags);
-               floppy_end_request(req, -EIO);
+               floppy_end_request(req, BLK_STS_IOERR);
                spin_unlock_irqrestore(q->queue_lock, flags);
        }
 }
@@ -3780,9 +3780,9 @@ static void floppy_rb0_cb(struct bio *bio)
        struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
        int drive = cbdata->drive;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                pr_info("floppy: error %d while reading block 0\n",
-                       bio->bi_error);
+                       bio->bi_status);
                set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
        }
        complete(&cbdata->complete);
index 28d932906f24c19e7250e6c69ec903b4a4460761..9cdf771b66edca3138090ac5b8396321df91d1ce 100644 (file)
@@ -221,7 +221,8 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
 }
 
 static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
+                loff_t logical_blocksize)
 {
        loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
        sector_t x = (sector_t)size;
@@ -233,6 +234,12 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
                lo->lo_offset = offset;
        if (lo->lo_sizelimit != sizelimit)
                lo->lo_sizelimit = sizelimit;
+       if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
+               lo->lo_logical_blocksize = logical_blocksize;
+               blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
+               blk_queue_logical_block_size(lo->lo_queue,
+                                            lo->lo_logical_blocksize);
+       }
        set_capacity(lo->lo_disk, x);
        bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
        /* let user-space know about the new size */
@@ -457,7 +464,7 @@ static void lo_complete_rq(struct request *rq)
                zero_fill_bio(bio);
        }
 
-       blk_mq_end_request(rq, cmd->ret < 0 ? -EIO : 0);
+       blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -608,6 +615,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
  */
 static int loop_flush(struct loop_device *lo)
 {
+       /* loop not yet configured, no running thread, nothing to flush */
+       if (lo->lo_state != Lo_bound)
+               return 0;
        return loop_switch(lo, NULL);
 }
 
@@ -810,6 +820,7 @@ static void loop_config_discard(struct loop_device *lo)
        struct file *file = lo->lo_backing_file;
        struct inode *inode = file->f_mapping->host;
        struct request_queue *q = lo->lo_queue;
+       int lo_bits = 9;
 
        /*
         * We use punch hole to reclaim the free space used by the
@@ -829,8 +840,11 @@ static void loop_config_discard(struct loop_device *lo)
 
        q->limits.discard_granularity = inode->i_sb->s_blocksize;
        q->limits.discard_alignment = 0;
-       blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
-       blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+       if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
+               lo_bits = blksize_bits(lo->lo_logical_blocksize);
+
+       blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
+       blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 }
 
@@ -918,6 +932,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 
        lo->use_dio = false;
        lo->lo_blocksize = lo_blocksize;
+       lo->lo_logical_blocksize = 512;
        lo->lo_device = bdev;
        lo->lo_flags = lo_flags;
        lo->lo_backing_file = file;
@@ -1083,6 +1098,7 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        int err;
        struct loop_func_table *xfer;
        kuid_t uid = current_uid();
+       int lo_flags = lo->lo_flags;
 
        if (lo->lo_encrypt_key_size &&
            !uid_eq(lo->lo_key_owner, uid) &&
@@ -1115,12 +1131,30 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
        if (err)
                goto exit;
 
+       if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
+               if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
+                       lo->lo_logical_blocksize = 512;
+               lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
+               if (LO_INFO_BLOCKSIZE(info) != 512 &&
+                   LO_INFO_BLOCKSIZE(info) != 1024 &&
+                   LO_INFO_BLOCKSIZE(info) != 2048 &&
+                   LO_INFO_BLOCKSIZE(info) != 4096)
+                       return -EINVAL;
+               if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
+                       return -EINVAL;
+       }
+
        if (lo->lo_offset != info->lo_offset ||
-           lo->lo_sizelimit != info->lo_sizelimit)
-               if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
+           lo->lo_sizelimit != info->lo_sizelimit ||
+           lo->lo_flags != lo_flags ||
+           ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
+            lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
+               if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
+                                    LO_INFO_BLOCKSIZE(info))) {
                        err = -EFBIG;
                        goto exit;
                }
+       }
 
        loop_config_discard(lo);
 
@@ -1303,12 +1337,13 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
        return err;
 }
 
-static int loop_set_capacity(struct loop_device *lo, struct block_device *bdev)
+static int loop_set_capacity(struct loop_device *lo)
 {
        if (unlikely(lo->lo_state != Lo_bound))
                return -ENXIO;
 
-       return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
+       return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
+                               lo->lo_logical_blocksize);
 }
 
 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1366,7 +1401,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
        case LOOP_SET_CAPACITY:
                err = -EPERM;
                if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
-                       err = loop_set_capacity(lo, bdev);
+                       err = loop_set_capacity(lo);
                break;
        case LOOP_SET_DIRECT_IO:
                err = -EPERM;
@@ -1642,7 +1677,7 @@ int loop_unregister_transfer(int number)
 EXPORT_SYMBOL(loop_register_transfer);
 EXPORT_SYMBOL(loop_unregister_transfer);
 
-static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -1651,7 +1686,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_mq_start_request(bd->rq);
 
        if (lo->lo_state != Lo_bound)
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
 
        switch (req_op(cmd->rq)) {
        case REQ_OP_FLUSH:
@@ -1666,7 +1701,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        kthread_queue_work(&lo->worker, &cmd->work);
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void loop_handle_cmd(struct loop_cmd *cmd)
index fecd3f97ef8c7cd9f825e6c58777a1a2bc6f3461..2c096b9a17b8ccd756065d5102b293796a5833ad 100644 (file)
@@ -49,6 +49,7 @@ struct loop_device {
        struct file *   lo_backing_file;
        struct block_device *lo_device;
        unsigned        lo_blocksize;
+       unsigned        lo_logical_blocksize;
        void            *key_data; 
 
        gfp_t           old_gfp_mask;
index 3a779a4f565365c1d59a7b7f4c3e96fe7343c195..d8618a71da74cc6252aceaa25ffdaa87b819be4b 100644 (file)
@@ -532,7 +532,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
 static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
                                                struct smart_attr *attrib);
 
-static void mtip_complete_command(struct mtip_cmd *cmd, int status)
+static void mtip_complete_command(struct mtip_cmd *cmd, blk_status_t status)
 {
        struct request *req = blk_mq_rq_from_pdu(cmd);
 
@@ -568,7 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
        if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
                cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
                dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
-               mtip_complete_command(cmd, -EIO);
+               mtip_complete_command(cmd, BLK_STS_IOERR);
                return;
        }
 
@@ -667,7 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
                                        tag,
                                        fail_reason != NULL ?
                                                fail_reason : "unknown");
-                                       mtip_complete_command(cmd, -ENODATA);
+                                       mtip_complete_command(cmd, BLK_STS_MEDIUM);
                                        continue;
                                }
                        }
@@ -690,7 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
                        dev_warn(&port->dd->pdev->dev,
                                "retiring tag %d\n", tag);
 
-                       mtip_complete_command(cmd, -EIO);
+                       mtip_complete_command(cmd, BLK_STS_IOERR);
                }
        }
        print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
@@ -2753,7 +2753,7 @@ static void mtip_abort_cmd(struct request *req, void *data,
        dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag);
 
        clear_bit(req->tag, dd->port->cmds_to_issue);
-       cmd->status = -EIO;
+       cmd->status = BLK_STS_IOERR;
        mtip_softirq_done_fn(req);
 }
 
@@ -3597,7 +3597,7 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
                int err;
 
                err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
-               blk_mq_end_request(rq, err);
+               blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK);
                return 0;
        }
 
@@ -3633,8 +3633,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
        return false;
 }
 
-static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
-                                  struct request *rq)
+static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
+               struct request *rq)
 {
        struct driver_data *dd = hctx->queue->queuedata;
        struct mtip_int_cmd *icmd = rq->special;
@@ -3642,7 +3642,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
        struct mtip_cmd_sg *command_sg;
 
        if (mtip_commands_active(dd->port))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        /* Populate the SG list */
        cmd->command_header->opts =
@@ -3666,10 +3666,10 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
 
        blk_mq_start_request(rq);
        mtip_issue_non_ncq_command(dd->port, rq->tag);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return 0;
 }
 
-static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct request *rq = bd->rq;
@@ -3681,15 +3681,14 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
                return mtip_issue_reserved_cmd(hctx, rq);
 
        if (unlikely(mtip_check_unal_depth(hctx, rq)))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        blk_mq_start_request(rq);
 
        ret = mtip_submit_request(hctx, rq);
        if (likely(!ret))
-               return BLK_MQ_RQ_QUEUE_OK;
-
-       return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_OK;
+       return BLK_STS_IOERR;
 }
 
 static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
@@ -3730,7 +3729,7 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
        if (reserved) {
                struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
 
-               cmd->status = -ETIME;
+               cmd->status = BLK_STS_TIMEOUT;
                return BLK_EH_HANDLED;
        }
 
@@ -3961,7 +3960,7 @@ static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
 {
        struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
-       cmd->status = -ENODEV;
+       cmd->status = BLK_STS_IOERR;
        blk_mq_complete_request(rq);
 }
 
index 37b8e3e0bb786b04798a25efa833be1fd7095c25..e8286af50e16b6b4c561e279dce2b3ed748d2a9a 100644 (file)
@@ -342,7 +342,7 @@ struct mtip_cmd {
        int retries; /* The number of retries left for this command. */
 
        int direction; /* Data transfer direction */
-       int status;
+       blk_status_t status;
 };
 
 /* Structure used to describe a port. */
index f3f191ba8ca4bbe6b7d87a7accc84bd648e4d718..977ec960dd2f974b0c09db746671a7dbd0130652 100644 (file)
@@ -116,7 +116,7 @@ struct nbd_cmd {
        int index;
        int cookie;
        struct completion send_complete;
-       int status;
+       blk_status_t status;
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -286,7 +286,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        struct nbd_config *config;
 
        if (!refcount_inc_not_zero(&nbd->config_refs)) {
-               cmd->status = -EIO;
+               cmd->status = BLK_STS_TIMEOUT;
                return BLK_EH_HANDLED;
        }
 
@@ -331,7 +331,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
                                    "Connection timed out\n");
        }
        set_bit(NBD_TIMEDOUT, &config->runtime_flags);
-       cmd->status = -EIO;
+       cmd->status = BLK_STS_IOERR;
        sock_shutdown(nbd);
        nbd_config_put(nbd);
 
@@ -400,6 +400,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
        u32 type;
+       u32 nbd_cmd_flags = 0;
        u32 tag = blk_mq_unique_tag(req);
        int sent = nsock->sent, skip = 0;
 
@@ -429,6 +430,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                return -EIO;
        }
 
+       if (req->cmd_flags & REQ_FUA)
+               nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
+
        /* We did a partial send previously, and we at least sent the whole
         * request struct, so just go and send the rest of the pages in the
         * request.
@@ -442,7 +446,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        }
        cmd->index = index;
        cmd->cookie = nsock->cookie;
-       request.type = htonl(type);
+       request.type = htonl(type | nbd_cmd_flags);
        if (type != NBD_CMD_FLUSH) {
                request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
                request.len = htonl(size);
@@ -465,7 +469,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
                                nsock->pending = req;
                                nsock->sent = sent;
                        }
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+                       return BLK_STS_RESOURCE;
                }
                dev_err_ratelimited(disk_to_dev(nbd->disk),
                        "Send control failed (result %d)\n", result);
@@ -506,7 +510,7 @@ send_pages:
                                         */
                                        nsock->pending = req;
                                        nsock->sent = sent;
-                                       return BLK_MQ_RQ_QUEUE_BUSY;
+                                       return BLK_STS_RESOURCE;
                                }
                                dev_err(disk_to_dev(nbd->disk),
                                        "Send data failed (result %d)\n",
@@ -574,7 +578,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
        if (ntohl(reply.error)) {
                dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
                        ntohl(reply.error));
-               cmd->status = -EIO;
+               cmd->status = BLK_STS_IOERR;
                return cmd;
        }
 
@@ -599,7 +603,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                                 */
                                if (nbd_disconnected(config) ||
                                    config->num_connections <= 1) {
-                                       cmd->status = -EIO;
+                                       cmd->status = BLK_STS_IOERR;
                                        return cmd;
                                }
                                return ERR_PTR(-EIO);
@@ -651,7 +655,7 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved)
        if (!blk_mq_request_started(req))
                return;
        cmd = blk_mq_rq_to_pdu(req);
-       cmd->status = -EIO;
+       cmd->status = BLK_STS_IOERR;
        blk_mq_complete_request(req);
 }
 
@@ -740,7 +744,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
                nbd_config_put(nbd);
                return -EINVAL;
        }
-       cmd->status = 0;
+       cmd->status = BLK_STS_OK;
 again:
        nsock = config->socks[index];
        mutex_lock(&nsock->tx_lock);
@@ -794,7 +798,7 @@ out:
        return ret;
 }
 
-static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -818,13 +822,9 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
         * appropriate.
         */
        ret = nbd_handle_cmd(cmd, hctx->queue_num);
-       if (ret < 0)
-               ret = BLK_MQ_RQ_QUEUE_ERROR;
-       if (!ret)
-               ret = BLK_MQ_RQ_QUEUE_OK;
        complete(&cmd->send_complete);
 
-       return ret;
+       return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
 }
 
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -910,6 +910,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
                        continue;
                }
                sk_set_memalloc(sock->sk);
+               sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
                atomic_inc(&config->recv_threads);
                refcount_inc(&nbd->config_refs);
                old = nsock->sock;
@@ -957,8 +958,12 @@ static void nbd_parse_flags(struct nbd_device *nbd)
                set_disk_ro(nbd->disk, false);
        if (config->flags & NBD_FLAG_SEND_TRIM)
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
-       if (config->flags & NBD_FLAG_SEND_FLUSH)
-               blk_queue_write_cache(nbd->disk->queue, true, false);
+       if (config->flags & NBD_FLAG_SEND_FLUSH) {
+               if (config->flags & NBD_FLAG_SEND_FUA)
+                       blk_queue_write_cache(nbd->disk->queue, true, true);
+               else
+                       blk_queue_write_cache(nbd->disk->queue, true, false);
+       }
        else
                blk_queue_write_cache(nbd->disk->queue, false, false);
 }
@@ -1071,6 +1076,7 @@ static int nbd_start_device(struct nbd_device *nbd)
                        return -ENOMEM;
                }
                sk_set_memalloc(config->socks[i]->sock->sk);
+               config->socks[i]->sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
                atomic_inc(&config->recv_threads);
                refcount_inc(&nbd->config_refs);
                INIT_WORK(&args->work, recv_work);
@@ -1305,6 +1311,8 @@ static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
                seq_puts(s, "NBD_FLAG_READ_ONLY\n");
        if (flags & NBD_FLAG_SEND_FLUSH)
                seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
+       if (flags & NBD_FLAG_SEND_FUA)
+               seq_puts(s, "NBD_FLAG_SEND_FUA\n");
        if (flags & NBD_FLAG_SEND_TRIM)
                seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
 
index d946e1eeac8ef0dafbf3510f3aaa57925ea5f07c..586dfff5d53f9f911bb67a9c2d6f11fe24fb91fb 100644 (file)
@@ -229,11 +229,11 @@ static void end_cmd(struct nullb_cmd *cmd)
 
        switch (queue_mode)  {
        case NULL_Q_MQ:
-               blk_mq_end_request(cmd->rq, 0);
+               blk_mq_end_request(cmd->rq, BLK_STS_OK);
                return;
        case NULL_Q_RQ:
                INIT_LIST_HEAD(&cmd->rq->queuelist);
-               blk_end_request_all(cmd->rq, 0);
+               blk_end_request_all(cmd->rq, BLK_STS_OK);
                break;
        case NULL_Q_BIO:
                bio_endio(cmd->bio);
@@ -356,7 +356,7 @@ static void null_request_fn(struct request_queue *q)
        }
 }
 
-static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -373,7 +373,7 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_mq_start_request(bd->rq);
 
        null_handle_cmd(cmd);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
@@ -422,11 +422,12 @@ static void cleanup_queues(struct nullb *nullb)
 
 #ifdef CONFIG_NVM
 
-static void null_lnvm_end_io(struct request *rq, int error)
+static void null_lnvm_end_io(struct request *rq, blk_status_t status)
 {
        struct nvm_rq *rqd = rq->end_io_data;
 
-       rqd->error = error;
+       /* XXX: lighnvm core seems to expect NVM_RSP_* values here.. */
+       rqd->error = status ? -EIO : 0;
        nvm_end_io(rqd);
 
        blk_put_request(rq);
index b1267ef34d5a7d918a5be1ff6d50194f96f78f33..cffe42d80ce9a6a47d564cecef1774c6f5f5e37e 100644 (file)
@@ -783,7 +783,7 @@ static void pcd_request(void)
                        ps_set_intr(do_pcd_read, NULL, 0, nice);
                        return;
                } else {
-                       __blk_end_request_all(pcd_req, -EIO);
+                       __blk_end_request_all(pcd_req, BLK_STS_IOERR);
                        pcd_req = NULL;
                }
        }
@@ -794,7 +794,7 @@ static void do_pcd_request(struct request_queue *q)
        pcd_request();
 }
 
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
 {
        unsigned long saved_flags;
 
@@ -837,7 +837,7 @@ static void pcd_start(void)
 
        if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
                pcd_bufblk = -1;
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
 
@@ -871,7 +871,7 @@ static void do_pcd_read_drq(void)
                        return;
                }
                pcd_bufblk = -1;
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
 
index 7d2402f9097892332a43ba632c88084f52b53efa..c98983be4f9c088bcd3ebb2e98e1462a7dc6fbda 100644 (file)
@@ -438,7 +438,7 @@ static void run_fsm(void)
                                phase = NULL;
                                spin_lock_irqsave(&pd_lock, saved_flags);
                                if (!__blk_end_request_cur(pd_req,
-                                               res == Ok ? 0 : -EIO)) {
+                                               res == Ok ? 0 : BLK_STS_IOERR)) {
                                        if (!set_next_request())
                                                stop = 1;
                                }
index f24ca7315ddc91e24e2cfa6ca62c7f3a5578a296..5f46da8d05cd427e1351eb2358d6a712ac470831 100644 (file)
@@ -801,7 +801,7 @@ static int set_next_request(void)
        return pf_req != NULL;
 }
 
-static void pf_end_request(int err)
+static void pf_end_request(blk_status_t err)
 {
        if (pf_req && !__blk_end_request_cur(pf_req, err))
                pf_req = NULL;
@@ -821,7 +821,7 @@ repeat:
        pf_count = blk_rq_cur_sectors(pf_req);
 
        if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
-               pf_end_request(-EIO);
+               pf_end_request(BLK_STS_IOERR);
                goto repeat;
        }
 
@@ -836,7 +836,7 @@ repeat:
                pi_do_claimed(pf_current->pi, do_pf_write);
        else {
                pf_busy = 0;
-               pf_end_request(-EIO);
+               pf_end_request(BLK_STS_IOERR);
                goto repeat;
        }
 }
@@ -868,7 +868,7 @@ static int pf_next_buf(void)
        return 0;
 }
 
-static inline void next_request(int err)
+static inline void next_request(blk_status_t err)
 {
        unsigned long saved_flags;
 
@@ -896,7 +896,7 @@ static void do_pf_read_start(void)
                        pi_do_claimed(pf_current->pi, do_pf_read_start);
                        return;
                }
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
        pf_mask = STAT_DRQ;
@@ -915,7 +915,7 @@ static void do_pf_read_drq(void)
                                pi_do_claimed(pf_current->pi, do_pf_read_start);
                                return;
                        }
-                       next_request(-EIO);
+                       next_request(BLK_STS_IOERR);
                        return;
                }
                pi_read_block(pf_current->pi, pf_buf, 512);
@@ -942,7 +942,7 @@ static void do_pf_write_start(void)
                        pi_do_claimed(pf_current->pi, do_pf_write_start);
                        return;
                }
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
 
@@ -955,7 +955,7 @@ static void do_pf_write_start(void)
                                pi_do_claimed(pf_current->pi, do_pf_write_start);
                                return;
                        }
-                       next_request(-EIO);
+                       next_request(BLK_STS_IOERR);
                        return;
                }
                pi_write_block(pf_current->pi, pf_buf, 512);
@@ -975,7 +975,7 @@ static void do_pf_write_done(void)
                        pi_do_claimed(pf_current->pi, do_pf_write_start);
                        return;
                }
-               next_request(-EIO);
+               next_request(BLK_STS_IOERR);
                return;
        }
        pi_disconnect(pf_current->pi);
index 205b865ebeb9f123b12beb8d1a8c5179bbad7bd9..e8a381161db69593e238f4c6f7e8af81678919cc 100644 (file)
@@ -952,9 +952,9 @@ static void pkt_end_io_read(struct bio *bio)
 
        pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
                bio, (unsigned long long)pkt->sector,
-               (unsigned long long)bio->bi_iter.bi_sector, bio->bi_error);
+               (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                atomic_inc(&pkt->io_errors);
        if (atomic_dec_and_test(&pkt->io_wait)) {
                atomic_inc(&pkt->run_sm);
@@ -969,7 +969,7 @@ static void pkt_end_io_packet_write(struct bio *bio)
        struct pktcdvd_device *pd = pkt->pd;
        BUG_ON(!pd);
 
-       pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_error);
+       pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
 
        pd->stats.pkt_ended++;
 
@@ -1305,16 +1305,16 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
        pkt_queue_bio(pd, pkt->w_bio);
 }
 
-static void pkt_finish_packet(struct packet_data *pkt, int error)
+static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
 {
        struct bio *bio;
 
-       if (error)
+       if (status)
                pkt->cache_valid = 0;
 
        /* Finish all bios corresponding to this packet */
        while ((bio = bio_list_pop(&pkt->orig_bios))) {
-               bio->bi_error = error;
+               bio->bi_status = status;
                bio_endio(bio);
        }
 }
@@ -1349,7 +1349,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
                        if (atomic_read(&pkt->io_wait) > 0)
                                return;
 
-                       if (!pkt->w_bio->bi_error) {
+                       if (!pkt->w_bio->bi_status) {
                                pkt_set_state(pkt, PACKET_FINISHED_STATE);
                        } else {
                                pkt_set_state(pkt, PACKET_RECOVERY_STATE);
@@ -1366,7 +1366,7 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data
                        break;
 
                case PACKET_FINISHED_STATE:
-                       pkt_finish_packet(pkt, pkt->w_bio->bi_error);
+                       pkt_finish_packet(pkt, pkt->w_bio->bi_status);
                        return;
 
                default:
@@ -2301,7 +2301,7 @@ static void pkt_end_io_read_cloned(struct bio *bio)
        struct packet_stacked_data *psd = bio->bi_private;
        struct pktcdvd_device *pd = psd->pd;
 
-       psd->bio->bi_error = bio->bi_error;
+       psd->bio->bi_status = bio->bi_status;
        bio_put(bio);
        bio_endio(psd->bio);
        mempool_free(psd, psd_pool);
@@ -2583,6 +2583,11 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
        bdev = bdget(dev);
        if (!bdev)
                return -ENOMEM;
+       if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+               WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
+               bdput(bdev);
+               return -EINVAL;
+       }
        ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
        if (ret)
                return ret;
index a809e3e9feb8b885af9cd439de909c7e2e27f220..075662f2cf46631c10fde3f8ca9c95271145ee61 100644 (file)
@@ -158,7 +158,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
        if (res) {
                dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
                        __LINE__, op, res);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
                return 0;
        }
 
@@ -180,7 +180,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
        if (res) {
                dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
                        __func__, __LINE__, res);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
                return 0;
        }
 
@@ -208,7 +208,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
                        break;
                default:
                        blk_dump_rq_flags(req, DEVICE_NAME " bad request");
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                }
        }
 }
@@ -231,7 +231,8 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
        struct ps3_storage_device *dev = data;
        struct ps3disk_private *priv;
        struct request *req;
-       int res, read, error;
+       int res, read;
+       blk_status_t error;
        u64 tag, status;
        const char *op;
 
@@ -269,7 +270,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
        if (status) {
                dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%llx\n", __func__,
                        __LINE__, op, status);
-               error = -EIO;
+               error = BLK_STS_IOERR;
        } else {
                dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
                        __LINE__, op);
index 456b4fe21559877c825b4e3da05b3c0de841a7e4..6fa2b8197013f9c8dfe8442f6c2ccd8d0d28d80b 100644 (file)
@@ -428,7 +428,7 @@ static void ps3vram_cache_cleanup(struct ps3_system_bus_device *dev)
        kfree(priv->cache.tags);
 }
 
-static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
+static blk_status_t ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
                        size_t len, size_t *retlen, u_char *buf)
 {
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -438,7 +438,7 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
                (unsigned int)from, len);
 
        if (from >= priv->size)
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (len > priv->size - from)
                len = priv->size - from;
@@ -472,14 +472,14 @@ static int ps3vram_read(struct ps3_system_bus_device *dev, loff_t from,
        return 0;
 }
 
-static int ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
+static blk_status_t ps3vram_write(struct ps3_system_bus_device *dev, loff_t to,
                         size_t len, size_t *retlen, const u_char *buf)
 {
        struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
        unsigned int cached, count;
 
        if (to >= priv->size)
-               return -EIO;
+               return BLK_STS_IOERR;
 
        if (len > priv->size - to)
                len = priv->size - to;
@@ -554,7 +554,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
        int write = bio_data_dir(bio) == WRITE;
        const char *op = write ? "write" : "read";
        loff_t offset = bio->bi_iter.bi_sector << 9;
-       int error = 0;
+       blk_status_t error = 0;
        struct bio_vec bvec;
        struct bvec_iter iter;
        struct bio *next;
@@ -578,7 +578,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
 
                if (retlen != len) {
                        dev_err(&dev->core, "Short %s\n", op);
-                       error = -EIO;
+                       error = BLK_STS_IOERR;
                        goto out;
                }
 
@@ -593,7 +593,7 @@ out:
        next = bio_list_peek(&priv->list);
        spin_unlock_irq(&priv->lock);
 
-       bio->bi_error = error;
+       bio->bi_status = error;
        bio_endio(bio);
        return next;
 }
index c16f74547804ccb957275f6d59b705b0ba35eb6b..5420bc40c544f3fc5f1e7f020efce44d8086b6cd 100644 (file)
@@ -2293,11 +2293,13 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
                rbd_assert(img_request->obj_request != NULL);
                more = obj_request->which < img_request->obj_request_count - 1;
        } else {
+               blk_status_t status = errno_to_blk_status(result);
+
                rbd_assert(img_request->rq != NULL);
 
-               more = blk_update_request(img_request->rq, result, xferred);
+               more = blk_update_request(img_request->rq, status, xferred);
                if (!more)
-                       __blk_mq_end_request(img_request->rq, result);
+                       __blk_mq_end_request(img_request->rq, status);
        }
 
        return more;
@@ -4150,17 +4152,17 @@ err_rq:
                         obj_op_name(op_type), length, offset, result);
        ceph_put_snap_context(snapc);
 err:
-       blk_mq_end_request(rq, result);
+       blk_mq_end_request(rq, errno_to_blk_status(result));
 }
 
-static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct request *rq = bd->rq;
        struct work_struct *work = blk_mq_rq_to_pdu(rq);
 
        queue_work(rbd_wq, work);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void rbd_free_disk(struct rbd_device *rbd_dev)
index 9c566364ac9c3c5890d466a26c0d7daf72eeb38c..0b0a0a9023554ccecc9ccd0b9ece9abafec5828c 100644 (file)
@@ -149,7 +149,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 {
        struct rsxx_cardinfo *card = q->queuedata;
        struct rsxx_bio_meta *bio_meta;
-       int st = -EINVAL;
+       blk_status_t st = BLK_STS_IOERR;
 
        blk_queue_split(q, &bio, q->bio_split);
 
@@ -161,15 +161,11 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
        if (bio_end_sector(bio) > get_capacity(card->gendisk))
                goto req_err;
 
-       if (unlikely(card->halt)) {
-               st = -EFAULT;
+       if (unlikely(card->halt))
                goto req_err;
-       }
 
-       if (unlikely(card->dma_fault)) {
-               st = (-EFAULT);
+       if (unlikely(card->dma_fault))
                goto req_err;
-       }
 
        if (bio->bi_iter.bi_size == 0) {
                dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
@@ -178,7 +174,7 @@ static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
 
        bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
        if (!bio_meta) {
-               st = -ENOMEM;
+               st = BLK_STS_RESOURCE;
                goto req_err;
        }
 
@@ -205,7 +201,7 @@ queue_err:
        kmem_cache_free(bio_meta_pool, bio_meta);
 req_err:
        if (st)
-               bio->bi_error = st;
+               bio->bi_status = st;
        bio_endio(bio);
        return BLK_QC_T_NONE;
 }
index 5a20385f87d045af1704205dea18b0aa9e7a1260..6a1b2177951c1521f50b9364739bd433a16a577e 100644 (file)
@@ -611,7 +611,7 @@ static void rsxx_schedule_done(struct work_struct *work)
        mutex_unlock(&ctrl->work_lock);
 }
 
-static int rsxx_queue_discard(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
                                  struct list_head *q,
                                  unsigned int laddr,
                                  rsxx_dma_cb cb,
@@ -621,7 +621,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
 
        dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
        if (!dma)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        dma->cmd          = HW_CMD_BLK_DISCARD;
        dma->laddr        = laddr;
@@ -640,7 +640,7 @@ static int rsxx_queue_discard(struct rsxx_cardinfo *card,
        return 0;
 }
 
-static int rsxx_queue_dma(struct rsxx_cardinfo *card,
+static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
                              struct list_head *q,
                              int dir,
                              unsigned int dma_off,
@@ -655,7 +655,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
 
        dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
        if (!dma)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        dma->cmd          = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
        dma->laddr        = laddr;
@@ -677,7 +677,7 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
        return 0;
 }
 
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           struct bio *bio,
                           atomic_t *n_dmas,
                           rsxx_dma_cb cb,
@@ -694,7 +694,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
        unsigned int dma_len;
        int dma_cnt[RSXX_MAX_TARGETS];
        int tgt;
-       int st;
+       blk_status_t st;
        int i;
 
        addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
@@ -769,7 +769,6 @@ bvec_err:
        for (i = 0; i < card->n_targets; i++)
                rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
                                        FREE_DMA);
-
        return st;
 }
 
index 6bbc64d0f69042033614e05f15f5d2dd2d878768..277f27e673a2ccc78ea683f2c555a7bdfcc810f1 100644 (file)
@@ -391,7 +391,7 @@ int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
 void rsxx_dma_cleanup(void);
 void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
 int rsxx_dma_configure(struct rsxx_cardinfo *card);
-int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
+blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           struct bio *bio,
                           atomic_t *n_dmas,
                           rsxx_dma_cb cb,
index 27833e4dae2adffca5eec7714272459cd0a29bb7..e6c526861703bb8974aa796a2a13dede6b2a2939 100644 (file)
@@ -451,8 +451,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
                                    struct skd_special_context *skspcl);
 static void skd_request_fn(struct request_queue *rq);
 static void skd_end_request(struct skd_device *skdev,
-                           struct skd_request_context *skreq, int error);
-static int skd_preop_sg_list(struct skd_device *skdev,
+               struct skd_request_context *skreq, blk_status_t status);
+static bool skd_preop_sg_list(struct skd_device *skdev,
                             struct skd_request_context *skreq);
 static void skd_postop_sg_list(struct skd_device *skdev,
                               struct skd_request_context *skreq);
@@ -491,7 +491,7 @@ static void skd_fail_all_pending(struct skd_device *skdev)
                if (req == NULL)
                        break;
                blk_start_request(req);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
        }
 }
 
@@ -545,7 +545,6 @@ static void skd_request_fn(struct request_queue *q)
        struct request *req = NULL;
        struct skd_scsi_request *scsi_req;
        unsigned long io_flags;
-       int error;
        u32 lba;
        u32 count;
        int data_dir;
@@ -716,9 +715,7 @@ static void skd_request_fn(struct request_queue *q)
                if (!req->bio)
                        goto skip_sg;
 
-               error = skd_preop_sg_list(skdev, skreq);
-
-               if (error != 0) {
+               if (!skd_preop_sg_list(skdev, skreq)) {
                        /*
                         * Complete the native request with error.
                         * Note that the request context is still at the
@@ -730,7 +727,7 @@ static void skd_request_fn(struct request_queue *q)
                         */
                        pr_debug("%s:%s:%d error Out\n",
                                 skdev->name, __func__, __LINE__);
-                       skd_end_request(skdev, skreq, error);
+                       skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
                        continue;
                }
 
@@ -805,7 +802,7 @@ skip_sg:
 }
 
 static void skd_end_request(struct skd_device *skdev,
-                           struct skd_request_context *skreq, int error)
+               struct skd_request_context *skreq, blk_status_t error)
 {
        if (unlikely(error)) {
                struct request *req = skreq->req;
@@ -822,7 +819,7 @@ static void skd_end_request(struct skd_device *skdev,
        __blk_end_request_all(skreq->req, error);
 }
 
-static int skd_preop_sg_list(struct skd_device *skdev,
+static bool skd_preop_sg_list(struct skd_device *skdev,
                             struct skd_request_context *skreq)
 {
        struct request *req = skreq->req;
@@ -839,7 +836,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
 
        n_sg = blk_rq_map_sg(skdev->queue, req, sg);
        if (n_sg <= 0)
-               return -EINVAL;
+               return false;
 
        /*
         * Map scatterlist to PCI bus addresses.
@@ -847,7 +844,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
         */
        n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
        if (n_sg <= 0)
-               return -EINVAL;
+               return false;
 
        SKD_ASSERT(n_sg <= skdev->sgs_per_request);
 
@@ -882,7 +879,7 @@ static int skd_preop_sg_list(struct skd_device *skdev,
                }
        }
 
-       return 0;
+       return true;
 }
 
 static void skd_postop_sg_list(struct skd_device *skdev,
@@ -2333,7 +2330,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
        switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
        case SKD_CHECK_STATUS_REPORT_GOOD:
        case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
-               skd_end_request(skdev, skreq, 0);
+               skd_end_request(skdev, skreq, BLK_STS_OK);
                break;
 
        case SKD_CHECK_STATUS_BUSY_IMMINENT:
@@ -2355,7 +2352,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
 
        case SKD_CHECK_STATUS_REPORT_ERROR:
        default:
-               skd_end_request(skdev, skreq, -EIO);
+               skd_end_request(skdev, skreq, BLK_STS_IOERR);
                break;
        }
 }
@@ -2748,7 +2745,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
                         * native request.
                         */
                        if (likely(cmp_status == SAM_STAT_GOOD))
-                               skd_end_request(skdev, skreq, 0);
+                               skd_end_request(skdev, skreq, BLK_STS_OK);
                        else
                                skd_resolve_req_exception(skdev, skreq);
                }
@@ -3190,7 +3187,7 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue)
                            SKD_MAX_RETRIES)
                                blk_requeue_request(skdev->queue, skreq->req);
                        else
-                               skd_end_request(skdev, skreq, -EIO);
+                               skd_end_request(skdev, skreq, BLK_STS_IOERR);
 
                        skreq->req = NULL;
 
index 3f3a3ab3d50ae02b418c27dc4a34d9ef8a44e9c4..6b16ead1da5871abcef5b2233733f281158596a8 100644 (file)
@@ -316,7 +316,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 
        rqe->req = NULL;
 
-       __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
+       __blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size);
 
        vdc_blk_queue_start(port);
 }
@@ -1023,7 +1023,7 @@ static void vdc_queue_drain(struct vdc_port *port)
        struct request *req;
 
        while ((req = blk_fetch_request(port->disk->queue)) != NULL)
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
 }
 
 static void vdc_ldc_reset_timer(unsigned long _arg)
index 3064be6cf3755a4017cbd2a30cda023977250cbf..1633aaf240600859302b7cf4902e48aaf0c9e78c 100644 (file)
@@ -493,7 +493,7 @@ static inline int swim_read_sector(struct floppy_state *fs,
        return ret;
 }
 
-static int floppy_read_sectors(struct floppy_state *fs,
+static blk_status_t floppy_read_sectors(struct floppy_state *fs,
                               int req_sector, int sectors_nb,
                               unsigned char *buffer)
 {
@@ -516,7 +516,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
                        ret = swim_read_sector(fs, side, track, sector,
                                                buffer);
                        if (try-- == 0)
-                               return -EIO;
+                               return BLK_STS_IOERR;
                } while (ret != 512);
 
                buffer += ret;
@@ -553,7 +553,7 @@ static void do_fd_request(struct request_queue *q)
 
        req = swim_next_request(swd);
        while (req) {
-               int err = -EIO;
+               blk_status_t err = BLK_STS_IOERR;
 
                fs = req->rq_disk->private_data;
                if (blk_rq_pos(req) >= fs->total_secs)
index ba4809c9bdbadfccfb3bd28b7e3c9825ce0e39f1..c7953860ce9127d1ce086b2a7387267c4eaea3f4 100644 (file)
@@ -257,7 +257,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,
                                        unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
 {
        struct request *req = fs->cur_req;
        int rc;
@@ -334,7 +334,7 @@ static void start_request(struct floppy_state *fs)
                if (fs->mdev->media_bay &&
                    check_media_bay(fs->mdev->media_bay) != MB_FD) {
                        swim3_dbg("%s", "  media bay absent, dropping req\n");
-                       swim3_end_request(fs, -ENODEV, 0);
+                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                        continue;
                }
 
@@ -350,12 +350,12 @@ static void start_request(struct floppy_state *fs)
                if (blk_rq_pos(req) >= fs->total_secs) {
                        swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
                                  (long)blk_rq_pos(req), (long)fs->total_secs);
-                       swim3_end_request(fs, -EIO, 0);
+                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                        continue;
                }
                if (fs->ejected) {
                        swim3_dbg("%s", "  disk ejected\n");
-                       swim3_end_request(fs, -EIO, 0);
+                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                        continue;
                }
 
@@ -364,7 +364,7 @@ static void start_request(struct floppy_state *fs)
                                fs->write_prot = swim3_readbit(fs, WRITE_PROT);
                        if (fs->write_prot) {
                                swim3_dbg("%s", "  try to write, disk write protected\n");
-                               swim3_end_request(fs, -EIO, 0);
+                               swim3_end_request(fs, BLK_STS_IOERR, 0);
                                continue;
                        }
                }
@@ -548,7 +548,7 @@ static void act(struct floppy_state *fs)
                                if (fs->retries > 5) {
                                        swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
                                                  fs->req_cyl, fs->cur_cyl);
-                                       swim3_end_request(fs, -EIO, 0);
+                                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                                        fs->state = idle;
                                        return;
                                }
@@ -584,7 +584,7 @@ static void scan_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        fs->cur_cyl = -1;
        if (fs->retries > 5) {
-               swim3_end_request(fs, -EIO, 0);
+               swim3_end_request(fs, BLK_STS_IOERR, 0);
                fs->state = idle;
                start_request(fs);
        } else {
@@ -608,7 +608,7 @@ static void seek_timeout(unsigned long data)
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
        swim3_err("%s", "Seek timeout\n");
-       swim3_end_request(fs, -EIO, 0);
+       swim3_end_request(fs, BLK_STS_IOERR, 0);
        fs->state = idle;
        start_request(fs);
        spin_unlock_irqrestore(&swim3_lock, flags);
@@ -637,7 +637,7 @@ static void settle_timeout(unsigned long data)
                goto unlock;
        }
        swim3_err("%s", "Seek settle timeout\n");
-       swim3_end_request(fs, -EIO, 0);
+       swim3_end_request(fs, BLK_STS_IOERR, 0);
        fs->state = idle;
        start_request(fs);
  unlock:
@@ -666,7 +666,7 @@ static void xfer_timeout(unsigned long data)
        swim3_err("Timeout %sing sector %ld\n",
               (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
               (long)blk_rq_pos(fs->cur_req));
-       swim3_end_request(fs, -EIO, 0);
+       swim3_end_request(fs, BLK_STS_IOERR, 0);
        fs->state = idle;
        start_request(fs);
        spin_unlock_irqrestore(&swim3_lock, flags);
@@ -703,7 +703,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                swim3_err("%s", "Seen sector but cyl=ff?\n");
                                fs->cur_cyl = -1;
                                if (fs->retries > 5) {
-                                       swim3_end_request(fs, -EIO, 0);
+                                       swim3_end_request(fs, BLK_STS_IOERR, 0);
                                        fs->state = idle;
                                        start_request(fs);
                                } else {
@@ -786,7 +786,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                swim3_err("Error %sing block %ld (err=%x)\n",
                                       rq_data_dir(req) == WRITE? "writ": "read",
                                       (long)blk_rq_pos(req), err);
-                               swim3_end_request(fs, -EIO, 0);
+                               swim3_end_request(fs, BLK_STS_IOERR, 0);
                                fs->state = idle;
                        }
                } else {
@@ -795,7 +795,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                                swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
                                swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
                                          fs->state, rq_data_dir(req), intr, err);
-                               swim3_end_request(fs, -EIO, 0);
+                               swim3_end_request(fs, BLK_STS_IOERR, 0);
                                fs->state = idle;
                                start_request(fs);
                                break;
index c8e072caf56ffcd9678b850ffab9886f6ac9b5c6..08586dc14e853b8ed2c6cbc38566d66b21b35f90 100644 (file)
@@ -745,7 +745,7 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
 
 static inline void carm_end_request_queued(struct carm_host *host,
                                           struct carm_request *crq,
-                                          int error)
+                                          blk_status_t error)
 {
        struct request *req = crq->rq;
        int rc;
@@ -791,7 +791,7 @@ static inline void carm_round_robin(struct carm_host *host)
 }
 
 static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
-                              int error)
+                              blk_status_t error)
 {
        carm_end_request_queued(host, crq, error);
        if (max_queue == 1)
@@ -869,14 +869,14 @@ queue_one_request:
        sg = &crq->sg[0];
        n_elem = blk_rq_map_sg(q, rq, sg);
        if (n_elem <= 0) {
-               carm_end_rq(host, crq, -EIO);
+               carm_end_rq(host, crq, BLK_STS_IOERR);
                return;         /* request with no s/g entries? */
        }
 
        /* map scatterlist to PCI bus addresses */
        n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
        if (n_elem <= 0) {
-               carm_end_rq(host, crq, -EIO);
+               carm_end_rq(host, crq, BLK_STS_IOERR);
                return;         /* request with no s/g entries? */
        }
        crq->n_elem = n_elem;
@@ -937,7 +937,7 @@ queue_one_request:
 
 static void carm_handle_array_info(struct carm_host *host,
                                   struct carm_request *crq, u8 *mem,
-                                  int error)
+                                  blk_status_t error)
 {
        struct carm_port *port;
        u8 *msg_data = mem + sizeof(struct carm_array_info);
@@ -997,7 +997,7 @@ out:
 
 static void carm_handle_scan_chan(struct carm_host *host,
                                  struct carm_request *crq, u8 *mem,
-                                 int error)
+                                 blk_status_t error)
 {
        u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
        unsigned int i, dev_count = 0;
@@ -1029,7 +1029,7 @@ out:
 }
 
 static void carm_handle_generic(struct carm_host *host,
-                               struct carm_request *crq, int error,
+                               struct carm_request *crq, blk_status_t error,
                                int cur_state, int next_state)
 {
        DPRINTK("ENTER\n");
@@ -1045,7 +1045,7 @@ static void carm_handle_generic(struct carm_host *host,
 }
 
 static inline void carm_handle_rw(struct carm_host *host,
-                                 struct carm_request *crq, int error)
+                                 struct carm_request *crq, blk_status_t error)
 {
        int pci_dir;
 
@@ -1067,7 +1067,7 @@ static inline void carm_handle_resp(struct carm_host *host,
        u32 handle = le32_to_cpu(ret_handle_le);
        unsigned int msg_idx;
        struct carm_request *crq;
-       int error = (status == RMSG_OK) ? 0 : -EIO;
+       blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
        u8 *mem;
 
        VPRINTK("ENTER, handle == 0x%x\n", handle);
@@ -1155,7 +1155,7 @@ static inline void carm_handle_resp(struct carm_host *host,
 err_out:
        printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
               pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
-       carm_end_rq(host, crq, -EIO);
+       carm_end_rq(host, crq, BLK_STS_IOERR);
 }
 
 static inline void carm_handle_responses(struct carm_host *host)
index c141cc3be22bddc12079e3195c93dea225aafb9b..4b3c947697b1f693aa0b389c2be413406eab2de8 100644 (file)
@@ -454,7 +454,7 @@ static void process_page(unsigned long data)
                                PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
                if (control & DMASCR_HARD_ERROR) {
                        /* error */
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        dev_printk(KERN_WARNING, &card->dev->dev,
                                "I/O error on sector %d/%d\n",
                                le32_to_cpu(desc->local_addr)>>9,
index 553cc4c542b4f13a5a04d4ca48af24198401c9f8..e59bd4549a8a713cfc8e0c2f0b7beec383739666 100644 (file)
@@ -64,15 +64,15 @@ struct virtblk_req {
        struct scatterlist sg[];
 };
 
-static inline int virtblk_result(struct virtblk_req *vbr)
+static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
 {
        switch (vbr->status) {
        case VIRTIO_BLK_S_OK:
-               return 0;
+               return BLK_STS_OK;
        case VIRTIO_BLK_S_UNSUPP:
-               return -ENOTTY;
+               return BLK_STS_NOTSUPP;
        default:
-               return -EIO;
+               return BLK_STS_IOERR;
        }
 }
 
@@ -214,7 +214,7 @@ static void virtblk_done(struct virtqueue *vq)
        spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
 }
 
-static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                           const struct blk_mq_queue_data *bd)
 {
        struct virtio_blk *vblk = hctx->queue->queuedata;
@@ -246,7 +246,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                break;
        default:
                WARN_ON_ONCE(1);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
@@ -276,8 +276,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                /* Out of mem doesn't actually happen, since we fall back
                 * to direct descriptors */
                if (err == -ENOMEM || err == -ENOSPC)
-                       return BLK_MQ_RQ_QUEUE_BUSY;
-               return BLK_MQ_RQ_QUEUE_ERROR;
+                       return BLK_STS_RESOURCE;
+               return BLK_STS_IOERR;
        }
 
        if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -286,7 +286,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (notify)
                virtqueue_notify(vblk->vqs[qid].vq);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 /* return id (s/n) string for *disk to *id_str
@@ -307,7 +307,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
                goto out;
 
        blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
-       err = virtblk_result(blk_mq_rq_to_pdu(req));
+       err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
 out:
        blk_put_request(req);
        return err;
index 726c32e35db9c542e6f050ff0a04e31e10fc2b7d..746bd8c8c09acc7a2fe4594648215219ba148ded 100644 (file)
@@ -1069,20 +1069,17 @@ static void xen_blk_drain_io(struct xen_blkif_ring *ring)
        atomic_set(&blkif->drain, 0);
 }
 
-/*
- * Completion callback on the bio's. Called as bh->b_end_io()
- */
-
-static void __end_block_io_op(struct pending_req *pending_req, int error)
+static void __end_block_io_op(struct pending_req *pending_req,
+               blk_status_t error)
 {
        /* An error fails the entire request. */
-       if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
-           (error == -EOPNOTSUPP)) {
+       if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
+           error == BLK_STS_NOTSUPP) {
                pr_debug("flush diskcache op failed, not supported\n");
                xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
-       } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
-                   (error == -EOPNOTSUPP)) {
+       } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
+                  error == BLK_STS_NOTSUPP) {
                pr_debug("write barrier op failed, not supported\n");
                xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
                pending_req->status = BLKIF_RSP_EOPNOTSUPP;
@@ -1106,7 +1103,7 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
  */
 static void end_block_io_op(struct bio *bio)
 {
-       __end_block_io_op(bio->bi_private, bio->bi_error);
+       __end_block_io_op(bio->bi_private, bio->bi_status);
        bio_put(bio);
 }
 
@@ -1423,7 +1420,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        for (i = 0; i < nbio; i++)
                bio_put(biolist[i]);
        atomic_set(&pending_req->pendcnt, 1);
-       __end_block_io_op(pending_req, -EINVAL);
+       __end_block_io_op(pending_req, BLK_STS_RESOURCE);
        msleep(1); /* back off a bit */
        return -EIO;
 }
index 39459631667cc248a8d569bf13ed6e67273848ae..e3be666c2776afc89db9299d350e98ac60a4f774 100644 (file)
@@ -881,7 +881,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
                 !info->feature_fua));
 }
 
-static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
                          const struct blk_mq_queue_data *qd)
 {
        unsigned long flags;
@@ -904,16 +904,16 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        flush_requests(rinfo);
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 
 out_err:
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
-       return BLK_MQ_RQ_QUEUE_ERROR;
+       return BLK_STS_IOERR;
 
 out_busy:
        spin_unlock_irqrestore(&rinfo->ring_lock, flags);
        blk_mq_stop_hw_queue(hctx);
-       return BLK_MQ_RQ_QUEUE_BUSY;
+       return BLK_STS_RESOURCE;
 }
 
 static void blkif_complete_rq(struct request *rq)
@@ -1601,14 +1601,18 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                        continue;
                }
 
-               blkif_req(req)->error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+               if (bret->status == BLKIF_RSP_OKAY)
+                       blkif_req(req)->error = BLK_STS_OK;
+               else
+                       blkif_req(req)->error = BLK_STS_IOERR;
+
                switch (bret->operation) {
                case BLKIF_OP_DISCARD:
                        if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
                                struct request_queue *rq = info->rq;
                                printk(KERN_WARNING "blkfront: %s: %s op failed\n",
                                           info->gd->disk_name, op_name(bret->operation));
-                               blkif_req(req)->error = -EOPNOTSUPP;
+                               blkif_req(req)->error = BLK_STS_NOTSUPP;
                                info->feature_discard = 0;
                                info->feature_secdiscard = 0;
                                queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
@@ -1626,11 +1630,11 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
                                     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
                                printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
                                       info->gd->disk_name, op_name(bret->operation));
-                               blkif_req(req)->error = -EOPNOTSUPP;
+                               blkif_req(req)->error = BLK_STS_NOTSUPP;
                        }
                        if (unlikely(blkif_req(req)->error)) {
-                               if (blkif_req(req)->error == -EOPNOTSUPP)
-                                       blkif_req(req)->error = 0;
+                               if (blkif_req(req)->error == BLK_STS_NOTSUPP)
+                                       blkif_req(req)->error = BLK_STS_OK;
                                info->feature_fua = 0;
                                info->feature_flush = 0;
                                xlvbd_flush(info);
@@ -2002,7 +2006,7 @@ static void split_bio_end(struct bio *bio)
 
        if (atomic_dec_and_test(&split_bio->pending)) {
                split_bio->bio->bi_phys_segments = 0;
-               split_bio->bio->bi_error = bio->bi_error;
+               split_bio->bio->bi_status = bio->bi_status;
                bio_endio(split_bio->bio);
                kfree(split_bio);
        }
@@ -2137,7 +2141,7 @@ static int blkfront_resume(struct xenbus_device *dev)
                        merge_bio.tail = shadow[j].request->biotail;
                        bio_list_merge(&info->bio_list, &merge_bio);
                        shadow[j].request->bio = NULL;
-                       blk_mq_end_request(shadow[j].request, 0);
+                       blk_mq_end_request(shadow[j].request, BLK_STS_OK);
                }
        }
 
index 757dce2147e005a4b2bcb16881a19e226ab75743..977fdf0660175ce4167bf85a4e0614aab6a13e48 100644 (file)
@@ -471,7 +471,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
                if (!blk_rq_is_passthrough(req))
                        break;
                blk_start_request(req);
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
        }
        return req;
 }
@@ -499,11 +499,11 @@ static void ace_fsm_dostate(struct ace_device *ace)
 
                /* Drop all in-flight and pending requests */
                if (ace->req) {
-                       __blk_end_request_all(ace->req, -EIO);
+                       __blk_end_request_all(ace->req, BLK_STS_IOERR);
                        ace->req = NULL;
                }
                while ((req = blk_fetch_request(ace->queue)) != NULL)
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
 
                /* Drop back to IDLE state and notify waiters */
                ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -728,7 +728,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
                }
 
                /* bio finished; is there another one? */
-               if (__blk_end_request_cur(ace->req, 0)) {
+               if (__blk_end_request_cur(ace->req, BLK_STS_OK)) {
                        /* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
                         *      blk_rq_sectors(ace->req),
                         *      blk_rq_cur_sectors(ace->req));
index 968f9e52effa8c401a66e11b4de8bae9f23756ec..41c95c9b2ab436e5917eb6f83f055b91ee521044 100644 (file)
@@ -74,14 +74,14 @@ static void do_z2_request(struct request_queue *q)
        while (req) {
                unsigned long start = blk_rq_pos(req) << 9;
                unsigned long len  = blk_rq_cur_bytes(req);
-               int err = 0;
+               blk_status_t err = BLK_STS_OK;
 
                if (start + len > z2ram_size) {
                        pr_err(DEVICE_NAME ": bad access: block=%llu, "
                               "count=%u\n",
                               (unsigned long long)blk_rq_pos(req),
                               blk_rq_cur_sectors(req));
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                        goto done;
                }
                while (len) {
index 76c952fd9ab9056250341da04d08c6b012e343e2..ff19cfc587f04a99191d937b3adeb6f7a6d880cc 100644 (file)
@@ -2178,6 +2178,12 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
        if (!q)
                return -ENXIO;
 
+       if (!blk_queue_scsi_passthrough(q)) {
+               WARN_ONCE(true,
+                         "Attempt read CDDA info through a non-SCSI queue\n");
+               return -EINVAL;
+       }
+
        cdi->last_sense = 0;
 
        while (nframes) {
index 1372763a948f48e81af667748df01d850e6859af..53f8278e66f75c9235f171e4be9bbbaf55f4336c 100644 (file)
@@ -583,7 +583,8 @@ static int gdrom_set_interrupt_handlers(void)
  */
 static void gdrom_readdisk_dma(struct work_struct *work)
 {
-       int err, block, block_cnt;
+       int block, block_cnt;
+       blk_status_t err;
        struct packet_command *read_command;
        struct list_head *elem, *next;
        struct request *req;
@@ -641,7 +642,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
                __raw_writeb(1, GDROM_DMA_STATUS_REG);
                wait_event_interruptible_timeout(request_queue,
                        gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
-               err = gd.transfer ? -EIO : 0;
+               err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
                gd.transfer = 0;
                gd.pending = 0;
                /* now seek to take the request spinlock
@@ -670,11 +671,11 @@ static void gdrom_request(struct request_queue *rq)
                        break;
                case REQ_OP_WRITE:
                        pr_notice("Read only device - write request ignored\n");
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        break;
                default:
                        printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        break;
                }
        }
index 6e0cbe09222059f0d8e8ccf821edfee5c575703e..593a8818aca99e345e03d0fb56eabd25b5ffd05a 100644 (file)
@@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
        phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 
        /* It's illegal to wrap around the end of the physical address space. */
-       if (offset + (phys_addr_t)size < offset)
+       if (offset + (phys_addr_t)size - 1 < offset)
                return -EINVAL;
 
        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
index a561f0c2f428df6cbd80e0fe5bfd3c479a578e18..e870f329db888c58e06bb854e7cf55d78a8bd313 100644 (file)
@@ -1,6 +1,9 @@
 /*
  * random.c -- A strong random number generator
  *
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ *
  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
  *
  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 static struct crng_state **crng_node_pool __read_mostly;
 #endif
 
+static void invalidate_batched_entropy(void);
+
 static void crng_initialize(struct crng_state *crng)
 {
        int             i;
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
                cp++; crng_init_cnt++; len--;
        }
        if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+               invalidate_batched_entropy();
                crng_init = 1;
                wake_up_interruptible(&crng_init_wait);
                pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
        memzero_explicit(&buf, sizeof(buf));
        crng->init_time = jiffies;
        if (crng == &primary_crng && crng_init < 2) {
+               invalidate_batched_entropy();
                crng_init = 2;
                process_random_ready_list();
                wake_up_interruptible(&crng_init_wait);
@@ -1097,15 +1104,15 @@ static void add_interrupt_bench(cycles_t start)
 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
 {
        __u32 *ptr = (__u32 *) regs;
-       unsigned long flags;
+       unsigned int idx;
 
        if (regs == NULL)
                return 0;
-       local_irq_save(flags);
-       if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
-               f->reg_idx = 0;
-       ptr += f->reg_idx++;
-       local_irq_restore(flags);
+       idx = READ_ONCE(f->reg_idx);
+       if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+               idx = 0;
+       ptr += idx++;
+       WRITE_ONCE(f->reg_idx, idx);
        return *ptr;
 }
 
@@ -2023,6 +2030,7 @@ struct batched_entropy {
        };
        unsigned int position;
 };
+static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
 
 /*
  * Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
 u64 get_random_u64(void)
 {
        u64 ret;
+       bool use_lock = crng_init < 2;
+       unsigned long flags;
        struct batched_entropy *batch;
 
 #if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
 #endif
 
        batch = &get_cpu_var(batched_entropy_u64);
+       if (use_lock)
+               read_lock_irqsave(&batched_entropy_reset_lock, flags);
        if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
                extract_crng((u8 *)batch->entropy_u64);
                batch->position = 0;
        }
        ret = batch->entropy_u64[batch->position++];
+       if (use_lock)
+               read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
        put_cpu_var(batched_entropy_u64);
        return ret;
 }
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
 u32 get_random_u32(void)
 {
        u32 ret;
+       bool use_lock = crng_init < 2;
+       unsigned long flags;
        struct batched_entropy *batch;
 
        if (arch_get_random_int(&ret))
                return ret;
 
        batch = &get_cpu_var(batched_entropy_u32);
+       if (use_lock)
+               read_lock_irqsave(&batched_entropy_reset_lock, flags);
        if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
                extract_crng((u8 *)batch->entropy_u32);
                batch->position = 0;
        }
        ret = batch->entropy_u32[batch->position++];
+       if (use_lock)
+               read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
        put_cpu_var(batched_entropy_u32);
        return ret;
 }
 EXPORT_SYMBOL(get_random_u32);
 
+/* It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage. */
+static void invalidate_batched_entropy(void)
+{
+       int cpu;
+       unsigned long flags;
+
+       write_lock_irqsave(&batched_entropy_reset_lock, flags);
+       for_each_possible_cpu (cpu) {
+               per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+               per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+       }
+       write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+}
+
 /**
  * randomize_page - Generate a random, page aligned address
  * @start:     The smallest acceptable address the caller will take.
index b7de5bd76a31743f52cc9095845495e596d0817d..eb1158532de31e7aee418162135a7495b10f9860 100644 (file)
@@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
 static int min_perf_pct_min(void)
 {
        struct cpudata *cpu = all_cpu_data[0];
+       int turbo_pstate = cpu->pstate.turbo_pstate;
 
-       return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
-                           cpu->pstate.turbo_pstate);
+       return turbo_pstate ?
+               DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
 }
 
 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
index 6ed32aac8bbeec9f139d0ba622d4b5fb6d1aca4b..922d0823f8ec2b32e14ed6bef64bfe4bfdd268e1 100644 (file)
@@ -210,9 +210,12 @@ EXPORT_SYMBOL_GPL(kill_dax);
 static struct inode *dax_alloc_inode(struct super_block *sb)
 {
        struct dax_device *dax_dev;
+       struct inode *inode;
 
        dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL);
-       return &dax_dev->inode;
+       inode = &dax_dev->inode;
+       inode->i_rdev = 0;
+       return inode;
 }
 
 static struct dax_device *to_dax_dev(struct inode *inode)
@@ -227,7 +230,8 @@ static void dax_i_callback(struct rcu_head *head)
 
        kfree(dax_dev->host);
        dax_dev->host = NULL;
-       ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
+       if (inode->i_rdev)
+               ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev));
        kmem_cache_free(dax_cache, dax_dev);
 }
 
@@ -423,6 +427,7 @@ static void init_once(void *_dax_dev)
        struct dax_device *dax_dev = _dax_dev;
        struct inode *inode = &dax_dev->inode;
 
+       memset(dax_dev, 0, sizeof(*dax_dev));
        inode_init_once(inode);
 }
 
index 8bf27323f7a37c34591c45f8b39d2091ae096260..b58233e4ed71ac41709318e03ce06c1d1b2c20ce 100644 (file)
@@ -27,6 +27,26 @@ struct bmp_header {
        u32 size;
 } __packed;
 
+static bool efi_bgrt_addr_valid(u64 addr)
+{
+       efi_memory_desc_t *md;
+
+       for_each_efi_memory_desc(md) {
+               u64 size;
+               u64 end;
+
+               if (md->type != EFI_BOOT_SERVICES_DATA)
+                       continue;
+
+               size = md->num_pages << EFI_PAGE_SHIFT;
+               end = md->phys_addr + size;
+               if (addr >= md->phys_addr && addr < end)
+                       return true;
+       }
+
+       return false;
+}
+
 void __init efi_bgrt_init(struct acpi_table_header *table)
 {
        void *image;
@@ -36,7 +56,7 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
        if (acpi_disabled)
                return;
 
-       if (!efi_enabled(EFI_BOOT))
+       if (!efi_enabled(EFI_MEMMAP))
                return;
 
        if (table->length < sizeof(bgrt_tab)) {
@@ -65,6 +85,10 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
                goto out;
        }
 
+       if (!efi_bgrt_addr_valid(bgrt->image_address)) {
+               pr_notice("Ignoring BGRT: invalid image address\n");
+               goto out;
+       }
        image = early_memremap(bgrt->image_address, sizeof(bmp_header));
        if (!image) {
                pr_notice("Ignoring BGRT: failed to map image header memory\n");
index 1e7860f02f4fa99e821e6a71bec7758621bc9a7d..31058d400bda6f767c022ac179d839437cf0e64f 100644 (file)
@@ -136,12 +136,12 @@ static int vpd_section_attrib_add(const u8 *key, s32 key_len,
        info->value = value;
 
        INIT_LIST_HEAD(&info->list);
-       list_add_tail(&info->list, &sec->attribs);
 
        ret = sysfs_create_bin_file(sec->kobj, &info->bin_attr);
        if (ret)
                goto free_info_key;
 
+       list_add_tail(&info->list, &sec->attribs);
        return 0;
 
 free_info_key:
@@ -158,8 +158,8 @@ static void vpd_section_attrib_destroy(struct vpd_section *sec)
        struct vpd_attrib_info *temp;
 
        list_for_each_entry_safe(info, temp, &sec->attribs, list) {
-               kfree(info->key);
                sysfs_remove_bin_file(sec->kobj, &info->bin_attr);
+               kfree(info->key);
                kfree(info);
        }
 }
@@ -244,7 +244,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
 {
        if (sec->enabled) {
                vpd_section_attrib_destroy(sec);
-               kobject_del(sec->kobj);
+               kobject_put(sec->kobj);
                sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
                kfree(sec->raw_name);
                iounmap(sec->baseaddr);
@@ -331,7 +331,7 @@ static void __exit vpd_platform_exit(void)
 {
        vpd_section_destroy(&ro_vpd);
        vpd_section_destroy(&rw_vpd);
-       kobject_del(vpd_kobj);
+       kobject_put(vpd_kobj);
 }
 
 module_init(vpd_platform_init);
index ccea609676eebfe8bbcd6e01acaa8f6c4499ffe7..4ca436e66bdb24f674280dd51a459a3d90809457 100644 (file)
@@ -646,6 +646,9 @@ static int enable_debounce(struct gpio_chip *chip, unsigned int offset,
        int rc;
        int i;
 
+       if (!gpio->clk)
+               return -EINVAL;
+
        rc = usecs_to_cycles(gpio, usecs, &requested_cycles);
        if (rc < 0) {
                dev_warn(chip->parent, "Failed to convert %luus to cycles at %luHz: %d\n",
index 2197368cc899d06ae231cb14d0825f7c9df1382f..e60156ec0c1842865bd7120c38ac85f58274fefd 100644 (file)
@@ -90,8 +90,18 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
 {
        int reg;
 
-       if (gpio == 94)
-               return GPIOPANELCTL;
+       if (gpio >= CRYSTALCOVE_GPIO_NUM) {
+               /*
+                * Virtual GPIO called from ACPI, for now we only support
+                * the panel ctl.
+                */
+               switch (gpio) {
+               case 0x5e:
+                       return GPIOPANELCTL;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       }
 
        if (reg_type == CTRL_IN) {
                if (gpio < 8)
@@ -130,36 +140,36 @@ static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio)
 static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
 {
        struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+       int reg = to_reg(gpio, CTRL_OUT);
 
-       if (gpio > CRYSTALCOVE_VGPIO_NUM)
+       if (reg < 0)
                return 0;
 
-       return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
-                           CTLO_INPUT_SET);
+       return regmap_write(cg->regmap, reg, CTLO_INPUT_SET);
 }
 
 static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
                                    int value)
 {
        struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+       int reg = to_reg(gpio, CTRL_OUT);
 
-       if (gpio > CRYSTALCOVE_VGPIO_NUM)
+       if (reg < 0)
                return 0;
 
-       return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
-                           CTLO_OUTPUT_SET | value);
+       return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value);
 }
 
 static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
 {
        struct crystalcove_gpio *cg = gpiochip_get_data(chip);
-       int ret;
        unsigned int val;
+       int ret, reg = to_reg(gpio, CTRL_IN);
 
-       if (gpio > CRYSTALCOVE_VGPIO_NUM)
+       if (reg < 0)
                return 0;
 
-       ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val);
+       ret = regmap_read(cg->regmap, reg, &val);
        if (ret)
                return ret;
 
@@ -170,14 +180,15 @@ static void crystalcove_gpio_set(struct gpio_chip *chip,
                                 unsigned gpio, int value)
 {
        struct crystalcove_gpio *cg = gpiochip_get_data(chip);
+       int reg = to_reg(gpio, CTRL_OUT);
 
-       if (gpio > CRYSTALCOVE_VGPIO_NUM)
+       if (reg < 0)
                return;
 
        if (value)
-               regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+               regmap_update_bits(cg->regmap, reg, 1, 1);
        else
-               regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+               regmap_update_bits(cg->regmap, reg, 1, 0);
 }
 
 static int crystalcove_irq_type(struct irq_data *data, unsigned type)
@@ -185,6 +196,9 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned type)
        struct crystalcove_gpio *cg =
                gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
+       if (data->hwirq >= CRYSTALCOVE_GPIO_NUM)
+               return 0;
+
        switch (type) {
        case IRQ_TYPE_NONE:
                cg->intcnt_value = CTLI_INTCNT_DIS;
@@ -235,8 +249,10 @@ static void crystalcove_irq_unmask(struct irq_data *data)
        struct crystalcove_gpio *cg =
                gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
-       cg->set_irq_mask = false;
-       cg->update |= UPDATE_IRQ_MASK;
+       if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+               cg->set_irq_mask = false;
+               cg->update |= UPDATE_IRQ_MASK;
+       }
 }
 
 static void crystalcove_irq_mask(struct irq_data *data)
@@ -244,8 +260,10 @@ static void crystalcove_irq_mask(struct irq_data *data)
        struct crystalcove_gpio *cg =
                gpiochip_get_data(irq_data_get_irq_chip_data(data));
 
-       cg->set_irq_mask = true;
-       cg->update |= UPDATE_IRQ_MASK;
+       if (data->hwirq < CRYSTALCOVE_GPIO_NUM) {
+               cg->set_irq_mask = true;
+               cg->update |= UPDATE_IRQ_MASK;
+       }
 }
 
 static struct irq_chip crystalcove_irqchip = {
index 19a92efabbef705b8826441c966cca4c55838bfd..5104b63981390adb878ed27f4ca2d0d758c65307 100644 (file)
@@ -747,7 +747,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
                set = U32_MAX;
        else
                return -EINVAL;
-       writel_relaxed(0, mvebu_gpioreg_blink_counter_select(mvchip));
+       writel_relaxed(set, mvebu_gpioreg_blink_counter_select(mvchip));
 
        mvpwm = devm_kzalloc(dev, sizeof(struct mvebu_pwm), GFP_KERNEL);
        if (!mvpwm)
@@ -768,6 +768,13 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
        mvpwm->chip.dev = dev;
        mvpwm->chip.ops = &mvebu_pwm_ops;
        mvpwm->chip.npwm = mvchip->chip.ngpio;
+       /*
+        * There may already be some PWM allocated, so we can't force
+        * mvpwm->chip.base to a fixed point like mvchip->chip.base.
+        * So, we let pwmchip_add() do the numbering and take the next free
+        * region.
+        */
+       mvpwm->chip.base = -1;
 
        spin_lock_init(&mvpwm->lock);
 
index 8be9719284b047f3d6046fd6a22c80bb617fa0f7..aa885a614e27c9882ea597456c4b2e16fb25b62b 100644 (file)
@@ -508,6 +508,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                bool has_connectors =
                        !!new_crtc_state->connector_mask;
 
+               WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
                if (!drm_mode_equal(&old_crtc_state->mode, &new_crtc_state->mode)) {
                        DRM_DEBUG_ATOMIC("[CRTC:%d:%s] mode changed\n",
                                         crtc->base.id, crtc->name);
@@ -551,6 +553,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
        for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
                const struct drm_connector_helper_funcs *funcs = connector->helper_private;
 
+               WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
                /*
                 * This only sets crtc->connectors_changed for routing changes,
                 * drivers must set crtc->connectors_changed themselves when
@@ -650,6 +654,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
        for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
                const struct drm_plane_helper_funcs *funcs;
 
+               WARN_ON(!drm_modeset_is_locked(&plane->mutex));
+
                funcs = plane->helper_private;
 
                drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
@@ -2663,7 +2669,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
 
        drm_modeset_acquire_init(&ctx, 0);
        while (1) {
+               err = drm_modeset_lock_all_ctx(dev, &ctx);
+               if (err)
+                       goto out;
+
                err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+out:
                if (err != -EDEADLK)
                        break;
 
index b5c6bb46a4251bdac218832ba61e52109e5a1768..37b8ad3e30d80440aea9ea2654a7a99696b50a57 100644 (file)
@@ -358,7 +358,12 @@ EXPORT_SYMBOL(drm_put_dev);
 void drm_unplug_dev(struct drm_device *dev)
 {
        /* for a USB device */
-       drm_dev_unregister(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_modeset_unregister_all(dev);
+
+       drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
+       drm_minor_unregister(dev, DRM_MINOR_RENDER);
+       drm_minor_unregister(dev, DRM_MINOR_CONTROL);
 
        mutex_lock(&drm_global_mutex);
 
index 5abc69c9630fc28789a32c87b19e44397d1d58ba..f77dcfaade6c5dfb74d7d600d8181d4fbb006f76 100644 (file)
@@ -760,7 +760,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi)
         * Get the endpoint node. In our case, dsi has one output port1
         * to which the external HDMI bridge is connected.
         */
-       ret = drm_of_find_panel_or_bridge(np, 0, 0, NULL, &dsi->bridge);
+       ret = drm_of_find_panel_or_bridge(np, 1, 0, NULL, &dsi->bridge);
        if (ret)
                return ret;
 
index c994fe6e65b2eafe6a133fccb70f7c5db5019b00..48428672fc6ece0927416d17a8dde8c41f00f500 100644 (file)
@@ -1235,6 +1235,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_fini;
 
        pci_set_drvdata(pdev, &dev_priv->drm);
+       /*
+        * Disable the system suspend direct complete optimization, which can
+        * leave the device suspended skipping the driver's suspend handlers
+        * if the device was already runtime suspended. This is needed due to
+        * the difference in our runtime and system suspend sequence and
+        * becaue the HDA driver may require us to enable the audio power
+        * domain during system suspend.
+        */
+       pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
 
        ret = i915_driver_init_early(dev_priv, ent);
        if (ret < 0)
index 963f6d4481f76ec54b5aeab138b0cca3f4ff90e5..2c453a4e97d5ba28ca3a21da372f73eed0131268 100644 (file)
@@ -2991,6 +2991,16 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
        return false;
 }
 
+static inline bool
+intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
+{
+#ifdef CONFIG_INTEL_IOMMU
+       if (IS_BROXTON(dev_priv) && intel_iommu_gfx_mapped)
+               return true;
+#endif
+       return false;
+}
+
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                                int enable_ppgtt);
 
index b6ac3df18b582534b118ab44aae1dbfe9f75186e..462031cbd77f714b23a3b7645039c0d8dba71f40 100644 (file)
@@ -3298,6 +3298,10 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
 {
        int ret;
 
+       /* If the device is asleep, we have no requests outstanding */
+       if (!READ_ONCE(i915->gt.awake))
+               return 0;
+
        if (flags & I915_WAIT_LOCKED) {
                struct i915_gem_timeline *tl;
 
index 50b8f1139ff99d6dc8d3ec225abf251d6af4465d..f1989b8792dd6f21ba1a944113b424fb8dc3184d 100644 (file)
@@ -2191,6 +2191,101 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
                gen8_set_pte(&gtt_base[i], scratch_pte);
 }
 
+static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+{
+       struct drm_i915_private *dev_priv = vm->i915;
+
+       /*
+        * Make sure the internal GAM fifo has been cleared of all GTT
+        * writes before exiting stop_machine(). This guarantees that
+        * any aperture accesses waiting to start in another process
+        * cannot back up behind the GTT writes causing a hang.
+        * The register can be any arbitrary GAM register.
+        */
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+}
+
+struct insert_page {
+       struct i915_address_space *vm;
+       dma_addr_t addr;
+       u64 offset;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+{
+       struct insert_page *arg = _arg;
+
+       gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+                                         dma_addr_t addr,
+                                         u64 offset,
+                                         enum i915_cache_level level,
+                                         u32 unused)
+{
+       struct insert_page arg = { vm, addr, offset, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+}
+
+struct insert_entries {
+       struct i915_address_space *vm;
+       struct sg_table *st;
+       u64 start;
+       enum i915_cache_level level;
+};
+
+static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+{
+       struct insert_entries *arg = _arg;
+
+       gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+                                            struct sg_table *st,
+                                            u64 start,
+                                            enum i915_cache_level level,
+                                            u32 unused)
+{
+       struct insert_entries arg = { vm, st, start, level };
+
+       stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+}
+
+struct clear_range {
+       struct i915_address_space *vm;
+       u64 start;
+       u64 length;
+};
+
+static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+{
+       struct clear_range *arg = _arg;
+
+       gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+       bxt_vtd_ggtt_wa(arg->vm);
+
+       return 0;
+}
+
+static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+                                         u64 start,
+                                         u64 length)
+{
+       struct clear_range arg = { vm, start, length };
+
+       stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+}
+
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  u64 start, u64 length)
 {
@@ -2785,6 +2880,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
 
        ggtt->base.insert_entries = gen8_ggtt_insert_entries;
 
+       /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+       if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+               ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+               ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+               if (ggtt->base.clear_range != nop_clear_range)
+                       ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+       }
+
        ggtt->invalidate = gen6_ggtt_invalidate;
 
        return ggtt_probe_common(ggtt, size);
@@ -2997,7 +3100,8 @@ void i915_ggtt_enable_guc(struct drm_i915_private *i915)
 
 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
 {
-       i915->ggtt.invalidate = gen6_ggtt_invalidate;
+       if (i915->ggtt.invalidate == guc_ggtt_invalidate)
+               i915->ggtt.invalidate = gen6_ggtt_invalidate;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
index a0d6d4317a490bba6487891a4048ddef6b358fe4..fb5231f98c0d620f1ccf03a9872607b1373dc0e2 100644 (file)
@@ -278,7 +278,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
                        obj->mm.quirked = false;
                }
                if (!i915_gem_object_is_tiled(obj)) {
-                       GEM_BUG_ON(!obj->mm.quirked);
+                       GEM_BUG_ON(obj->mm.quirked);
                        __i915_gem_object_pin_pages(obj);
                        obj->mm.quirked = true;
                }
index f87b0c4e564d8b85de91e93f7a8d9a6e6f219b61..1a78363c7f4a9e974edbc4e4f31ec7d64d26b6ad 100644 (file)
@@ -208,7 +208,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
        GEN5_FEATURES,
        .platform = INTEL_IRONLAKE,
-       .is_mobile = 1,
+       .is_mobile = 1, .has_fbc = 1,
 };
 
 #define GEN6_FEATURES \
@@ -390,7 +390,6 @@ static const struct intel_device_info intel_skylake_gt3_info = {
        .has_hw_contexts = 1, \
        .has_logical_ring_contexts = 1, \
        .has_guc = 1, \
-       .has_decoupled_mmio = 1, \
        .has_aliasing_ppgtt = 1, \
        .has_full_ppgtt = 1, \
        .has_full_48bit_ppgtt = 1, \
index 3cabe52a4e3b168e176d1f55abdae65f67219ef7..569717a1272367a91cf682a9cae7640f9ae32777 100644 (file)
@@ -12203,6 +12203,15 @@ static void update_scanline_offset(struct intel_crtc *crtc)
         * type. For DP ports it behaves like most other platforms, but on HDMI
         * there's an extra 1 line difference. So we need to add two instead of
         * one to the value.
+        *
+        * On VLV/CHV DSI the scanline counter would appear to increment
+        * approx. 1/3 of a scanline before start of vblank. Unfortunately
+        * that means we can't tell whether we're in vblank or not while
+        * we're on that particular line. We must still set scanline_offset
+        * to 1 so that the vblank timestamps come out correct when we query
+        * the scanline counter from within the vblank interrupt handler.
+        * However if queried just before the start of vblank we'll get an
+        * answer that's slightly in the future.
         */
        if (IS_GEN2(dev_priv)) {
                const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
index 854e8e0c836bd2099c1cfcb72e12e3ec5ff21915..f94eacff196c5d0980690ae95cda45c42e3a4e9b 100644 (file)
@@ -1075,6 +1075,22 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
        return 0;
 }
 
+static bool ring_is_idle(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->i915;
+       bool idle = true;
+
+       intel_runtime_pm_get(dev_priv);
+
+       /* No bit for gen2, so assume the CS parser is idle */
+       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+               idle = false;
+
+       intel_runtime_pm_put(dev_priv);
+
+       return idle;
+}
+
 /**
  * intel_engine_is_idle() - Report if the engine has finished process all work
  * @engine: the intel_engine_cs
@@ -1084,8 +1100,6 @@ int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
  */
 bool intel_engine_is_idle(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
-
        /* Any inflight/incomplete requests? */
        if (!i915_seqno_passed(intel_engine_get_seqno(engine),
                               intel_engine_last_submit(engine)))
@@ -1100,7 +1114,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
                return false;
 
        /* Ring stopped? */
-       if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+       if (!ring_is_idle(engine))
                return false;
 
        return true;
index ded2add18b26122d7f6395d0d5532da26dd21f34..d93c58410bffe9701d148e546db753dff84c4083 100644 (file)
@@ -82,20 +82,10 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
 static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
                                            int *width, int *height)
 {
-       int w, h;
-
-       if (drm_rotation_90_or_270(cache->plane.rotation)) {
-               w = cache->plane.src_h;
-               h = cache->plane.src_w;
-       } else {
-               w = cache->plane.src_w;
-               h = cache->plane.src_h;
-       }
-
        if (width)
-               *width = w;
+               *width = cache->plane.src_w;
        if (height)
-               *height = h;
+               *height = cache->plane.src_h;
 }
 
 static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
@@ -746,6 +736,11 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
 
        cache->plane.rotation = plane_state->base.rotation;
+       /*
+        * Src coordinates are already rotated by 270 degrees for
+        * the 90/270 degree plane rotation cases (to match the
+        * GTT mapping), hence no need to account for rotation here.
+        */
        cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
        cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16;
        cache->plane.visible = plane_state->base.visible;
index 570bd603f401d513ac3f08c67fc78d6d1523b762..2ca481b5aa691872d39263605ef67b9c7335cec6 100644 (file)
@@ -4335,10 +4335,18 @@ skl_compute_wm(struct drm_atomic_state *state)
        struct drm_crtc_state *cstate;
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct skl_wm_values *results = &intel_state->wm_results;
+       struct drm_device *dev = state->dev;
        struct skl_pipe_wm *pipe_wm;
        bool changed = false;
        int ret, i;
 
+       /*
+        * When we distrust bios wm we always need to recompute to set the
+        * expected DDB allocations for each CRTC.
+        */
+       if (to_i915(dev)->wm.distrust_bios_wm)
+               changed = true;
+
        /*
         * If this transaction isn't actually touching any CRTC's, don't
         * bother with watermark calculation.  Note that if we pass this
@@ -4349,6 +4357,7 @@ skl_compute_wm(struct drm_atomic_state *state)
         */
        for_each_new_crtc_in_state(state, crtc, cstate, i)
                changed = true;
+
        if (!changed)
                return 0;
 
index c3780d0d2baf752ce9d590b6f6c8db67674ec745..559f1ab42bfc23e005020d9bb3cb88e0f0d57943 100644 (file)
@@ -435,8 +435,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
        }
 
        /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-       if (intel_crtc->config->pipe_src_w > 3200 ||
-                               intel_crtc->config->pipe_src_h > 2000) {
+       if (dev_priv->psr.psr2_support &&
+           (intel_crtc->config->pipe_src_w > 3200 ||
+            intel_crtc->config->pipe_src_h > 2000)) {
                dev_priv->psr.psr2_support = false;
                return false;
        }
index 8c87c717c7cda92c4256cf277828e594f96a0ad1..e6517edcd16b55608c125452b56904f2b48e90df 100644 (file)
@@ -83,10 +83,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
  */
 void intel_pipe_update_start(struct intel_crtc *crtc)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
        long timeout = msecs_to_jiffies_timeout(1);
        int scanline, min, max, vblank_start;
        wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
+       bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+               intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
        DEFINE_WAIT(wait);
 
        vblank_start = adjusted_mode->crtc_vblank_start;
@@ -139,6 +142,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
 
        drm_crtc_vblank_put(&crtc->base);
 
+       /*
+        * On VLV/CHV DSI the scanline counter would appear to
+        * increment approx. 1/3 of a scanline before start of vblank.
+        * The registers still get latched at start of vblank however.
+        * This means we must not write any registers on the first
+        * line of vblank (since not the whole line is actually in
+        * vblank). And unfortunately we can't use the interrupt to
+        * wait here since it will fire too soon. We could use the
+        * frame start interrupt instead since it will fire after the
+        * critical scanline, but that would require more changes
+        * in the interrupt code. So for now we'll just do the nasty
+        * thing and poll for the bad scanline to pass us by.
+        *
+        * FIXME figure out if BXT+ DSI suffers from this as well
+        */
+       while (need_vlv_dsi_wa && scanline == vblank_start)
+               scanline = intel_get_crtc_scanline(crtc);
+
        crtc->debug.scanline_start = scanline;
        crtc->debug.start_vbl_time = ktime_get();
        crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
index 4b7f73aeddac6475db31d184853f833c8ba3d510..f84115261ae78b02591a64cb77de96c3fda167bb 100644 (file)
@@ -59,8 +59,6 @@ struct drm_i915_gem_request;
  *                available in the work queue (note, the queue is shared,
  *                not per-engine). It is OK for this to be nonzero, but
  *                it should not be huge!
- *   q_fail: failed to enqueue a work item. This should never happen,
- *           because we check for space beforehand.
  *   b_fail: failed to ring the doorbell. This should never happen, unless
  *           somehow the hardware misbehaves, or maybe if the GuC firmware
  *           crashes? We probably need to reset the GPU to recover.
index 8fb801fab039b10225765b044a4e535cf7a4201d..8b05ecb8fdefccafeed07755d501e8902ccba0c3 100644 (file)
@@ -673,7 +673,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                ret = drm_of_find_panel_or_bridge(child,
                                                  imx_ldb->lvds_mux ? 4 : 2, 0,
                                                  &channel->panel, &channel->bridge);
-               if (ret)
+               if (ret && ret != -ENODEV)
                        return ret;
 
                /* panel ddc only if there is no bridge */
index 808b995a990f5529b303e23cb1085b4b7f478355..b5cc6e12334cf96e8faacc01a1a8fb5dcec48202 100644 (file)
@@ -19,6 +19,7 @@
 #include <drm/drm_of.h>
 #include <linux/clk.h>
 #include <linux/component.h>
+#include <linux/iopoll.h>
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -900,16 +901,12 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
 
 static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
 {
-       u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
-
-       while (timeout_ms--) {
-               if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
-                       break;
-
-               usleep_range(2, 4);
-       }
+       int ret;
+       u32 val;
 
-       if (timeout_ms == 0) {
+       ret = readl_poll_timeout(dsi->regs + DSI_INTSTA, val, !(val & DSI_BUSY),
+                                4, 2000000);
+       if (ret) {
                DRM_WARN("polling dsi wait not busy timeout!\n");
 
                mtk_dsi_enable(dsi);
index 41a1c03b03476b620a511731518b8d0f7772417d..0a4ffd7241468dcbd064fa3a210f17094d10697b 100644 (file)
@@ -1062,7 +1062,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
        }
 
        err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
-       if (err) {
+       if (err < 0) {
                dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
                        err);
                return err;
index 75382f5f0fcec00a8749df932cfd7dba9eb19542..10b227d83e9ac7af98b8177188bb56d48823f2b1 100644 (file)
@@ -152,7 +152,7 @@ static struct regmap_config meson_regmap_config = {
        .max_register   = 0x1000,
 };
 
-static int meson_drv_bind(struct device *dev)
+static int meson_drv_bind_master(struct device *dev, bool has_components)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct meson_drm *priv;
@@ -233,10 +233,12 @@ static int meson_drv_bind(struct device *dev)
        if (ret)
                goto free_drm;
 
-       ret = component_bind_all(drm->dev, drm);
-       if (ret) {
-               dev_err(drm->dev, "Couldn't bind all components\n");
-               goto free_drm;
+       if (has_components) {
+               ret = component_bind_all(drm->dev, drm);
+               if (ret) {
+                       dev_err(drm->dev, "Couldn't bind all components\n");
+                       goto free_drm;
+               }
        }
 
        ret = meson_plane_create(priv);
@@ -276,6 +278,11 @@ free_drm:
        return ret;
 }
 
+static int meson_drv_bind(struct device *dev)
+{
+       return meson_drv_bind_master(dev, true);
+}
+
 static void meson_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
@@ -357,6 +364,9 @@ static int meson_drv_probe(struct platform_device *pdev)
                count += meson_probe_remote(pdev, &match, np, remote);
        }
 
+       if (count && !match)
+               return meson_drv_bind_master(&pdev->dev, false);
+
        /* If some endpoints were found, initialize the nodes */
        if (count) {
                dev_info(&pdev->dev, "Queued %d outputs on vpu\n", count);
index 6a567fe347b369a2c01d5e89c67ac0a881a49420..820a4805916f1da8115b798cf3c93d5750ae8196 100644 (file)
@@ -4,6 +4,7 @@
 
 struct nvkm_alarm {
        struct list_head head;
+       struct list_head exec;
        u64 timestamp;
        void (*func)(struct nvkm_alarm *);
 };
index 36268e1802b5afcd65c6b3d623b273c4ac60af87..15a13d09d431c9a8d4822fb5f997bcf3225a2d4c 100644 (file)
@@ -80,7 +80,7 @@ int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
-int nouveau_runtime_pm = -1;
+static int nouveau_runtime_pm = -1;
 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
 
 static struct drm_driver driver_stub;
@@ -495,7 +495,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
        nouveau_fbcon_init(dev);
        nouveau_led_init(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_set_active(dev->dev);
@@ -527,7 +527,7 @@ nouveau_drm_unload(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
 
-       if (nouveau_runtime_pm != 0) {
+       if (nouveau_pmops_runtime()) {
                pm_runtime_get_sync(dev->dev);
                pm_runtime_forbid(dev->dev);
        }
@@ -726,6 +726,14 @@ nouveau_pmops_thaw(struct device *dev)
        return nouveau_do_resume(drm_dev, false);
 }
 
+bool
+nouveau_pmops_runtime()
+{
+       if (nouveau_runtime_pm == -1)
+               return nouveau_is_optimus() || nouveau_is_v1_dsm();
+       return nouveau_runtime_pm == 1;
+}
+
 static int
 nouveau_pmops_runtime_suspend(struct device *dev)
 {
@@ -733,14 +741,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        int ret;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
@@ -765,8 +766,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
        int ret;
 
-       if (nouveau_runtime_pm == 0)
-               return -EINVAL;
+       if (!nouveau_pmops_runtime()) {
+               pm_runtime_forbid(dev);
+               return -EBUSY;
+       }
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
@@ -796,14 +799,7 @@ nouveau_pmops_runtime_idle(struct device *dev)
        struct nouveau_drm *drm = nouveau_drm(drm_dev);
        struct drm_crtc *crtc;
 
-       if (nouveau_runtime_pm == 0) {
-               pm_runtime_forbid(dev);
-               return -EBUSY;
-       }
-
-       /* are we optimus enabled? */
-       if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
-               DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+       if (!nouveau_pmops_runtime()) {
                pm_runtime_forbid(dev);
                return -EBUSY;
        }
index eadec2f49ad318cf44d3464ff39dbe201e7074cb..a11b6aaed325f17ddf6f82c8fd8ced531191574a 100644 (file)
@@ -108,8 +108,6 @@ nouveau_cli(struct drm_file *fpriv)
 #include <nvif/object.h>
 #include <nvif/device.h>
 
-extern int nouveau_runtime_pm;
-
 struct nouveau_drm {
        struct nouveau_cli client;
        struct drm_device *dev;
@@ -195,6 +193,7 @@ nouveau_drm(struct drm_device *dev)
 
 int nouveau_pmops_suspend(struct device *);
 int nouveau_pmops_resume(struct device *);
+bool nouveau_pmops_runtime(void);
 
 #include <nvkm/core/tegra.h>
 
index a4aacbc0cec8efe603d18152099aaecdeab93dce..02fe0efb9e1643f3a4802b947b3ab306bdd690bd 100644 (file)
@@ -87,7 +87,7 @@ void
 nouveau_vga_init(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        /* only relevant for PCI devices */
        if (!dev->pdev)
@@ -99,10 +99,6 @@ nouveau_vga_init(struct nouveau_drm *drm)
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
        vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
 
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
@@ -113,18 +109,13 @@ void
 nouveau_vga_fini(struct nouveau_drm *drm)
 {
        struct drm_device *dev = drm->dev;
-       bool runtime = false;
+       bool runtime = nouveau_pmops_runtime();
 
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 
        if (pci_is_thunderbolt_attached(dev->pdev))
                return;
 
-       if (nouveau_runtime_pm == 1)
-               runtime = true;
-       if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
-               runtime = true;
-
        vga_switcheroo_unregister_client(dev->pdev);
        if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
                vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
index a7663249b3baf2df1c5c75d87d3b32109984ba97..06e564a9ccb253b3018b45d2f0a96cc53430ab2c 100644 (file)
@@ -2107,7 +2107,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
                                        asyc->set.dither = true;
                        }
                } else {
-                       asyc->set.mask = ~0;
+                       if (asyc)
+                               asyc->set.mask = ~0;
                        asyh->set.mask = ~0;
                }
 
index f2a86eae0a0d624b31cb8ee9a65e6487705a6c1a..2437f7d41ca20de616a7193f2d6fdec6d7daea00 100644 (file)
@@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
                /* Move to completed list.  We'll drop the lock before
                 * executing the callback so it can reschedule itself.
                 */
-               list_move_tail(&alarm->head, &exec);
+               list_del_init(&alarm->head);
+               list_add(&alarm->exec, &exec);
        }
 
        /* Shut down interrupt if no more pending alarms. */
@@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
        spin_unlock_irqrestore(&tmr->lock, flags);
 
        /* Execute completed callbacks. */
-       list_for_each_entry_safe(alarm, atemp, &exec, head) {
-               list_del_init(&alarm->head);
+       list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+               list_del(&alarm->exec);
                alarm->func(alarm);
        }
 }
index d8fa7a9c9240bdf53f06d214f5952af7f85e5ee4..ce5f2d1f9994113b6322a708f47f1e23049ef3ba 100644 (file)
@@ -245,8 +245,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
                                      struct drm_connector_state *conn_state)
 {
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
-       struct rockchip_dp_device *dp = to_dp(encoder);
-       int ret;
 
        /*
         * The hardware IC designed that VOP must output the RGB10 video
@@ -258,16 +256,6 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
 
        s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
        s->output_type = DRM_MODE_CONNECTOR_eDP;
-       if (dp->data->chip_type == RK3399_EDP) {
-               /*
-                * For RK3399, VOP Lit must code the out mode to RGB888,
-                * VOP Big must code the out mode to RGB10.
-                */
-               ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
-                                                       encoder);
-               if (ret > 0)
-                       s->output_mode = ROCKCHIP_OUT_MODE_P888;
-       }
 
        return 0;
 }
index a2169dd3d26b915c851bd089f25373495c188174..14fa1f8351e8df22ab30560fbb6a1906841ba43d 100644 (file)
@@ -615,7 +615,6 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 {
        struct cdn_dp_device *dp = encoder_to_dp(encoder);
        int ret, val;
-       struct rockchip_crtc_state *state;
 
        ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
        if (ret < 0) {
@@ -625,14 +624,10 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
 
        DRM_DEV_DEBUG_KMS(dp->dev, "vop %s output to cdn-dp\n",
                          (ret) ? "LIT" : "BIG");
-       state = to_rockchip_crtc_state(encoder->crtc->state);
-       if (ret) {
+       if (ret)
                val = DP_SEL_VOP_LIT | (DP_SEL_VOP_LIT << 16);
-               state->output_mode = ROCKCHIP_OUT_MODE_P888;
-       } else {
+       else
                val = DP_SEL_VOP_LIT << 16;
-               state->output_mode = ROCKCHIP_OUT_MODE_AAAA;
-       }
 
        ret = cdn_dp_grf_write(dp, GRF_SOC_CON9, val);
        if (ret)
index 3f7a82d1e0956e6a37e1478412210955db38aa19..45589d6ce65ed0fd0a7e1be60f83dd03bd3d47b5 100644 (file)
@@ -875,6 +875,7 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
 static void vop_crtc_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
+       const struct vop_data *vop_data = vop->data;
        struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
        struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
        u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
@@ -967,6 +968,13 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
                DRM_DEV_ERROR(vop->dev, "unsupported connector_type [%d]\n",
                              s->output_type);
        }
+
+       /*
+        * if vop is not support RGB10 output, need force RGB10 to RGB888.
+        */
+       if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA &&
+           !(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
+               s->output_mode = ROCKCHIP_OUT_MODE_P888;
        VOP_CTRL_SET(vop, out_mode, s->output_mode);
 
        VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
index 5a4faa85dbd29d91af08ae51a8c2ac69012cb33c..9979fd0c22821d7efa3d7054468e0914619e0692 100644 (file)
@@ -142,6 +142,9 @@ struct vop_data {
        const struct vop_intr *intr;
        const struct vop_win_data *win;
        unsigned int win_size;
+
+#define VOP_FEATURE_OUTPUT_RGB10       BIT(0)
+       u64 feature;
 };
 
 /* interrupt define */
index 0da44442aab097b8f4b40d67c8995be625bccfcd..bafd698a28b1b491c01823d2be293a41e67c3722 100644 (file)
@@ -275,6 +275,7 @@ static const struct vop_intr rk3288_vop_intr = {
 static const struct vop_data rk3288_vop = {
        .init_table = rk3288_init_reg_table,
        .table_size = ARRAY_SIZE(rk3288_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3288_vop_intr,
        .ctrl = &rk3288_ctrl_data,
        .win = rk3288_vop_win_data,
@@ -343,6 +344,7 @@ static const struct vop_reg_data rk3399_init_reg_table[] = {
 static const struct vop_data rk3399_vop_big = {
        .init_table = rk3399_init_reg_table,
        .table_size = ARRAY_SIZE(rk3399_init_reg_table),
+       .feature = VOP_FEATURE_OUTPUT_RGB10,
        .intr = &rk3399_vop_intr,
        .ctrl = &rk3399_ctrl_data,
        /*
index 130d51c5ec6a2dab1211337d71313e6b1de15323..4b948fba9eec274b794a0546fec0179b3de1cadf 100644 (file)
@@ -41,9 +41,9 @@
 #include <drm/ttm/ttm_module.h>
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20170221"
+#define VMWGFX_DRIVER_DATE "20170607"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 12
+#define VMWGFX_DRIVER_MINOR 13
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
index b6a0806b06bffaf6da9178905f9b2f6bb037d384..a1c68e6a689e32fd0dd4d74c805ee4afd0836a99 100644 (file)
@@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
                                return fifo_state->static_buffer;
                        else {
                                fifo_state->dynamic_buffer = vmalloc(bytes);
+                               if (!fifo_state->dynamic_buffer)
+                                       goto out_err;
                                return fifo_state->dynamic_buffer;
                        }
                }
index ef9f3a2a40303290287b5259b7a71d2a8791ddb4..1d2db5d912b03c572b50f9b64b2f5d2a39de1365 100644 (file)
@@ -274,108 +274,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
 }
 
 
-
-/**
- * vmw_du_cursor_plane_update() - Update cursor image and location
- *
- * @plane: plane object to update
- * @crtc: owning CRTC of @plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of plane on crtc
- * @crtc_y: y offset of plane on crtc
- * @crtc_w: width of plane rectangle on crtc
- * @crtc_h: height of plane rectangle on crtc
- * @src_x: Not used
- * @src_y: Not used
- * @src_w: Not used
- * @src_h: Not used
- *
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h)
-{
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-       struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
-       struct vmw_surface *surface = NULL;
-       struct vmw_dma_buffer *dmabuf = NULL;
-       s32 hotspot_x, hotspot_y;
-       int ret;
-
-       hotspot_x = du->hotspot_x + fb->hot_x;
-       hotspot_y = du->hotspot_y + fb->hot_y;
-
-       /* A lot of the code assumes this */
-       if (crtc_w != 64 || crtc_h != 64) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       if (vmw_framebuffer_to_vfb(fb)->dmabuf)
-               dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
-       else
-               surface = vmw_framebuffer_to_vfbs(fb)->surface;
-
-       if (surface && !surface->snooper.image) {
-               DRM_ERROR("surface not suitable for cursor\n");
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* setup new image */
-       ret = 0;
-       if (surface) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_surface = surface;
-
-               du->cursor_age = du->cursor_surface->snooper.age;
-
-               ret = vmw_cursor_update_image(dev_priv, surface->snooper.image,
-                                             64, 64, hotspot_x, hotspot_y);
-       } else if (dmabuf) {
-               /* vmw_user_surface_lookup takes one reference */
-               du->cursor_dmabuf = dmabuf;
-
-               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, crtc_w, crtc_h,
-                                              hotspot_x, hotspot_y);
-       } else {
-               vmw_cursor_update_position(dev_priv, false, 0, 0);
-               goto out;
-       }
-
-       if (!ret) {
-               du->cursor_x = crtc_x + du->set_gui_x;
-               du->cursor_y = crtc_y + du->set_gui_y;
-
-               vmw_cursor_update_position(dev_priv, true,
-                                          du->cursor_x + hotspot_x,
-                                          du->cursor_y + hotspot_y);
-       }
-
-out:
-       return ret;
-}
-
-
-int vmw_du_cursor_plane_disable(struct drm_plane *plane)
-{
-       if (plane->fb) {
-               drm_framebuffer_unreference(plane->fb);
-               plane->fb = NULL;
-       }
-
-       return -EINVAL;
-}
-
-
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
 {
        vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
@@ -472,18 +370,6 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
 }
 
 
-void
-vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                  struct drm_plane_state *old_state)
-{
-       struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
-       struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-
-       drm_atomic_set_fb_for_plane(plane->state, NULL);
-       vmw_cursor_update_position(dev_priv, false, 0, 0);
-}
-
-
 void
 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                  struct drm_plane_state *old_state)
@@ -1498,6 +1384,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
         */
        if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
            dmabuf && only_2d &&
+           mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
            dev_priv->active_display_unit == vmw_du_screen_target) {
                ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
                                              dmabuf, &surface);
index 13f2f1d2818a755012098df126938989ba1fb297..5f8d678ae675156178dc306efc2fc83338c390ba 100644 (file)
@@ -256,10 +256,6 @@ int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
                           u16 *r, u16 *g, u16 *b,
                           uint32_t size,
                           struct drm_modeset_acquire_ctx *ctx);
-int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
-                           uint32_t handle, uint32_t width, uint32_t height,
-                           int32_t hot_x, int32_t hot_y);
-int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
 int vmw_du_connector_set_property(struct drm_connector *connector,
                                  struct drm_property *property,
                                  uint64_t val);
@@ -339,15 +335,6 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
 /* Universal Plane Helpers */
 void vmw_du_primary_plane_destroy(struct drm_plane *plane);
 void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
-int vmw_du_cursor_plane_disable(struct drm_plane *plane);
-int vmw_du_cursor_plane_update(struct drm_plane *plane,
-                              struct drm_crtc *crtc,
-                              struct drm_framebuffer *fb,
-                              int crtc_x, int crtc_y,
-                              unsigned int crtc_w,
-                              unsigned int crtc_h,
-                              uint32_t src_x, uint32_t src_y,
-                              uint32_t src_w, uint32_t src_h);
 
 /* Atomic Helpers */
 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
@@ -356,8 +343,6 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
                                     struct drm_plane_state *state);
 void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                       struct drm_plane_state *old_state);
-void vmw_du_cursor_plane_atomic_disable(struct drm_plane *plane,
-                                       struct drm_plane_state *old_state);
 int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
                                   struct drm_plane_state *new_state);
 void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
index bad31bdf09b6c1d8bd31663c7973e1ebb912f340..50be1f034f9efa701f2c6feda57fe28d8cf6d596 100644 (file)
@@ -56,6 +56,8 @@ enum stdu_content_type {
  * @right: Right side of bounding box.
  * @top: Top side of bounding box.
  * @bottom: Bottom side of bounding box.
+ * @fb_left: Left side of the framebuffer/content bounding box
+ * @fb_top: Top of the framebuffer/content bounding box
  * @buf: DMA buffer when DMA-ing between buffer and screen targets.
  * @sid: Surface ID when copying between surface and screen targets.
  */
@@ -63,6 +65,7 @@ struct vmw_stdu_dirty {
        struct vmw_kms_dirty base;
        SVGA3dTransferType  transfer;
        s32 left, right, top, bottom;
+       s32 fb_left, fb_top;
        u32 pitch;
        union {
                struct vmw_dma_buffer *buf;
@@ -647,7 +650,7 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
  *
  * @dirty: The closure structure.
  *
- * This function calculates the bounding box for all the incoming clips
+ * This function calculates the bounding box for all the incoming clips.
  */
 static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 {
@@ -656,11 +659,19 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 
        dirty->num_hits = 1;
 
-       /* Calculate bounding box */
+       /* Calculate destination bounding box */
        ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
        ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
        ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
        ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
+
+       /*
+        * Calculate content bounding box.  We only need the top-left
+        * coordinate because width and height will be the same as the
+        * destination bounding box above
+        */
+       ddirty->fb_left = min_t(s32, ddirty->fb_left, dirty->fb_x);
+       ddirty->fb_top  = min_t(s32, ddirty->fb_top, dirty->fb_y);
 }
 
 
@@ -697,11 +708,11 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
        src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
        src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
-       src += dirty->unit_y1 * src_pitch + dirty->unit_x1 * stdu->cpp;
+       src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
 
        dst_pitch = ddirty->pitch;
        dst = ttm_kmap_obj_virtual(&stdu->guest_map, &not_used);
-       dst += dirty->fb_y * dst_pitch + dirty->fb_x * stdu->cpp;
+       dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
 
 
        /* Figure out the real direction */
@@ -760,7 +771,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        }
 
 out_cleanup:
-       ddirty->left = ddirty->top = S32_MAX;
+       ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
        ddirty->right = ddirty->bottom = S32_MIN;
 }
 
@@ -812,6 +823,7 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
                SVGA3D_READ_HOST_VRAM;
        ddirty.left = ddirty.top = S32_MAX;
        ddirty.right = ddirty.bottom = S32_MIN;
+       ddirty.fb_left = ddirty.fb_top = S32_MAX;
        ddirty.pitch = vfb->base.pitches[0];
        ddirty.buf = buf;
        ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
@@ -1355,6 +1367,11 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                DRM_ERROR("Failed to bind surface to STDU.\n");
        else
                crtc->primary->fb = plane->state->fb;
+
+       ret = vmw_stdu_update_st(dev_priv, stdu);
+
+       if (ret)
+               DRM_ERROR("Failed to update STDU.\n");
 }
 
 
index 7681341fe32b8725840d70b137782f5f1f316bc0..6b70bd259953580204ccecd4ec4334c73e73eed7 100644 (file)
@@ -1274,11 +1274,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        int ret;
        uint32_t size;
-       uint32_t backup_handle;
+       uint32_t backup_handle = 0;
 
        if (req->multisample_count != 0)
                return -EINVAL;
 
+       if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+               return -EINVAL;
+
        if (unlikely(vmw_user_surface_size == 0))
                vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
                        128;
@@ -1314,12 +1317,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
                ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
                                             &res->backup,
                                             &user_srf->backup_base);
-               if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
-                   res->backup_size) {
-                       DRM_ERROR("Surface backup buffer is too small.\n");
-                       vmw_dmabuf_unreference(&res->backup);
-                       ret = -EINVAL;
-                       goto out_unlock;
+               if (ret == 0) {
+                       if (res->backup->base.num_pages * PAGE_SIZE <
+                           res->backup_size) {
+                               DRM_ERROR("Surface backup buffer is too small.\n");
+                               vmw_dmabuf_unreference(&res->backup);
+                               ret = -EINVAL;
+                               goto out_unlock;
+                       } else {
+                               backup_handle = req->buffer_handle;
+                       }
                }
        } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
                ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
@@ -1491,7 +1498,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
                                 dev_priv->stdu_max_height);
 
                if (size.width > max_width || size.height > max_height) {
-                       DRM_ERROR("%ux%u\n, exeeds max surface size %ux%u",
+                       DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
                                  size.width, size.height,
                                  max_width, max_height);
                        return -EINVAL;
index 16d556816b5fcaa62758549d9bceaa88bd4bc839..2fb5f432a54c1afd0f7c6104facb1860dbcb3f3a 100644 (file)
@@ -725,15 +725,16 @@ void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
        spin_lock_irqsave(&ipu->lock, flags);
 
        val = ipu_cm_read(ipu, IPU_CONF);
-       if (vdi) {
+       if (vdi)
                val |= IPU_CONF_IC_INPUT;
-       } else {
+       else
                val &= ~IPU_CONF_IC_INPUT;
-               if (csi_id == 1)
-                       val |= IPU_CONF_CSI_SEL;
-               else
-                       val &= ~IPU_CONF_CSI_SEL;
-       }
+
+       if (csi_id == 1)
+               val |= IPU_CONF_CSI_SEL;
+       else
+               val &= ~IPU_CONF_CSI_SEL;
+
        ipu_cm_write(ipu, val, IPU_CONF);
 
        spin_unlock_irqrestore(&ipu->lock, flags);
index c55563379e2e3ca2a1957ce777b2ac2c3586d9b7..c35f74c830657f26a3e29c34f7cef7e9f864f71a 100644 (file)
@@ -131,8 +131,6 @@ int ipu_pre_get(struct ipu_pre *pre)
        if (pre->in_use)
                return -EBUSY;
 
-       clk_prepare_enable(pre->clk_axi);
-
        /* first get the engine out of reset and remove clock gating */
        writel(0, pre->regs + IPU_PRE_CTRL);
 
@@ -149,12 +147,7 @@ int ipu_pre_get(struct ipu_pre *pre)
 
 void ipu_pre_put(struct ipu_pre *pre)
 {
-       u32 val;
-
-       val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
-       writel(val, pre->regs + IPU_PRE_CTRL);
-
-       clk_disable_unprepare(pre->clk_axi);
+       writel(IPU_PRE_CTRL_SFTRST, pre->regs + IPU_PRE_CTRL);
 
        pre->in_use = false;
 }
@@ -249,6 +242,8 @@ static int ipu_pre_probe(struct platform_device *pdev)
        if (!pre->buffer_virt)
                return -ENOMEM;
 
+       clk_prepare_enable(pre->clk_axi);
+
        pre->dev = dev;
        platform_set_drvdata(pdev, pre);
        mutex_lock(&ipu_pre_list_mutex);
@@ -268,6 +263,8 @@ static int ipu_pre_remove(struct platform_device *pdev)
        available_pres--;
        mutex_unlock(&ipu_pre_list_mutex);
 
+       clk_disable_unprepare(pre->clk_axi);
+
        if (pre->buffer_virt)
                gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
                              IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
index 5901937284e70dcd8e67260feed755c537730e14..d7a49dcfa85eef64231e77213196916c15edcaa7 100644 (file)
@@ -273,7 +273,7 @@ void ide_retry_pc(ide_drive_t *drive)
        ide_requeue_and_plug(drive, failed_rq);
        if (ide_queue_sense_rq(drive, pc)) {
                blk_start_request(failed_rq);
-               ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
+               ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq));
        }
 }
 EXPORT_SYMBOL_GPL(ide_retry_pc);
@@ -437,7 +437,8 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 
        /* No more interrupts */
        if ((stat & ATA_DRQ) == 0) {
-               int uptodate, error;
+               int uptodate;
+               blk_status_t error;
 
                debug_log("Packet command completed, %d bytes transferred\n",
                          blk_rq_bytes(rq));
@@ -490,7 +491,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
 
                if (ata_misc_request(rq)) {
                        scsi_req(rq)->result = 0;
-                       error = 0;
+                       error = BLK_STS_OK;
                } else {
 
                        if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
@@ -498,7 +499,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
                                        scsi_req(rq)->result = -EIO;
                        }
 
-                       error = uptodate ? 0 : -EIO;
+                       error = uptodate ? BLK_STS_OK : BLK_STS_IOERR;
                }
 
                ide_complete_rq(drive, error, blk_rq_bytes(rq));
index 07e5ff3a64c330b7ef028cc3b85d4bc820393a28..d55e44ed82b549d67718d4302e7d3973b922e5fd 100644 (file)
@@ -228,7 +228,7 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
                scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
                cdrom_analyze_sense_data(drive, failed);
 
-               if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
+               if (ide_end_rq(drive, failed, BLK_STS_IOERR, blk_rq_bytes(failed)))
                        BUG();
        } else
                cdrom_analyze_sense_data(drive, NULL);
@@ -508,7 +508,7 @@ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
                nr_bytes -= cmd->last_xfer_len;
 
        if (nr_bytes > 0) {
-               ide_complete_rq(drive, 0, nr_bytes);
+               ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
                return true;
        }
 
@@ -674,7 +674,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 out_end:
        if (blk_rq_is_scsi(rq) && rc == 0) {
                scsi_req(rq)->resid_len = 0;
-               blk_end_request_all(rq, 0);
+               blk_end_request_all(rq, BLK_STS_OK);
                hwif->rq = NULL;
        } else {
                if (sense && uptodate)
@@ -699,7 +699,7 @@ out_end:
                                scsi_req(rq)->resid_len += cmd->last_xfer_len;
                }
 
-               ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
+               ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, blk_rq_bytes(rq));
 
                if (sense && rc == 2)
                        ide_error(drive, "request sense failure", stat);
@@ -844,7 +844,7 @@ out_end:
        if (nsectors == 0)
                nsectors = 1;
 
-       ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
+       ide_complete_rq(drive, uptodate ? BLK_STS_OK : BLK_STS_IOERR, nsectors << 9);
 
        return ide_stopped;
 }
index 51c81223e56d07d16645c978f968872316222e06..54d4d78ca46a672a3461e0e751b6f76ba4213adb 100644 (file)
@@ -104,7 +104,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
                        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
                                ide_finish_cmd(drive, cmd, stat);
                        else
-                               ide_complete_rq(drive, 0,
+                               ide_complete_rq(drive, BLK_STS_OK,
                                                blk_rq_sectors(cmd->rq) << 9);
                        return ide_stopped;
                }
index 4b7ffd7d158dc23852c0989055b26d61c71277c3..47d5f33797480643a8558b4e021702ebcf00df9e 100644 (file)
@@ -135,7 +135,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
                        return ide_stopped;
                }
                scsi_req(rq)->result = err;
-               ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
+               ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
                return ide_stopped;
        }
 
@@ -143,7 +143,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
 }
 EXPORT_SYMBOL_GPL(ide_error);
 
-static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
+static inline void ide_complete_drive_reset(ide_drive_t *drive, blk_status_t err)
 {
        struct request *rq = drive->hwif->rq;
 
@@ -151,7 +151,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
            scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
                if (err <= 0 && scsi_req(rq)->result == 0)
                        scsi_req(rq)->result = -EIO;
-               ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
+               ide_complete_rq(drive, err, blk_rq_bytes(rq));
        }
 }
 
@@ -191,7 +191,7 @@ static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive)
        }
        /* done polling */
        hwif->polling = 0;
-       ide_complete_drive_reset(drive, 0);
+       ide_complete_drive_reset(drive, BLK_STS_OK);
        return ide_stopped;
 }
 
@@ -225,7 +225,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
        ide_hwif_t *hwif = drive->hwif;
        const struct ide_port_ops *port_ops = hwif->port_ops;
        u8 tmp;
-       int err = 0;
+       blk_status_t err = BLK_STS_OK;
 
        if (port_ops && port_ops->reset_poll) {
                err = port_ops->reset_poll(drive);
@@ -247,7 +247,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
                printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n",
                        hwif->name, tmp);
                drive->failures++;
-               err = -EIO;
+               err = BLK_STS_IOERR;
        } else  {
                tmp = ide_read_error(drive);
 
@@ -257,7 +257,7 @@ static ide_startstop_t reset_pollfunc(ide_drive_t *drive)
                } else {
                        ide_reset_report_error(hwif, tmp);
                        drive->failures++;
-                       err = -EIO;
+                       err = BLK_STS_IOERR;
                }
        }
 out:
@@ -392,7 +392,7 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi)
 
        if (io_ports->ctl_addr == 0) {
                spin_unlock_irqrestore(&hwif->lock, flags);
-               ide_complete_drive_reset(drive, -ENXIO);
+               ide_complete_drive_reset(drive, BLK_STS_IOERR);
                return ide_stopped;
        }
 
index 8ac6048cd2df9145daa13f5ac1dd4eb4deb96f3a..627b1f62a7496f86b952e2dee9f771260cc29de0 100644 (file)
@@ -143,7 +143,7 @@ static ide_startstop_t ide_floppy_issue_pc(ide_drive_t *drive,
 
                drive->failed_pc = NULL;
                drive->pc_callback(drive, 0);
-               ide_complete_rq(drive, -EIO, done);
+               ide_complete_rq(drive, BLK_STS_IOERR, done);
                return ide_stopped;
        }
 
@@ -248,7 +248,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
 
                if (ata_misc_request(rq)) {
                        scsi_req(rq)->result = 0;
-                       ide_complete_rq(drive, 0, blk_rq_bytes(rq));
+                       ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
                        return ide_stopped;
                } else
                        goto out_end;
@@ -303,7 +303,7 @@ out_end:
        drive->failed_pc = NULL;
        if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0)
                scsi_req(rq)->result = -EIO;
-       ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+       ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
        return ide_stopped;
 }
 
index 323af721f8cb96e01393124b85c2342b6e9f8ff8..3a234701d92c4ac3965cce3e48edc6ef856ca9a0 100644 (file)
@@ -54,7 +54,7 @@
 #include <linux/uaccess.h>
 #include <asm/io.h>
 
-int ide_end_rq(ide_drive_t *drive, struct request *rq, int error,
+int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
               unsigned int nr_bytes)
 {
        /*
@@ -112,7 +112,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
        }
 }
 
-int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
+int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct request *rq = hwif->rq;
@@ -122,7 +122,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
         * if failfast is set on a request, override number of sectors
         * and complete the whole request right now
         */
-       if (blk_noretry_request(rq) && error <= 0)
+       if (blk_noretry_request(rq) && error)
                nr_bytes = blk_rq_sectors(rq) << 9;
 
        rc = ide_end_rq(drive, rq, error, nr_bytes);
@@ -149,7 +149,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
                        scsi_req(rq)->result = -EIO;
        }
 
-       ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+       ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
 }
 
 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
@@ -272,7 +272,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
        printk("%s: DRIVE_CMD (null)\n", drive->name);
 #endif
        scsi_req(rq)->result = 0;
-       ide_complete_rq(drive, 0, blk_rq_bytes(rq));
+       ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq));
 
        return ide_stopped;
 }
index 0977fc1f40ce431979163cd001f01961d79d49f2..08b54bb3b7058ba5cafd05acbbb2c05b488cb038 100644 (file)
@@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
        return ret;
 }
 
-static void ide_end_sync_rq(struct request *rq, int error)
+static void ide_end_sync_rq(struct request *rq, blk_status_t error)
 {
        complete(rq->end_io_data);
 }
@@ -57,7 +57,7 @@ static int ide_pm_execute_rq(struct request *rq)
        if (unlikely(blk_queue_dying(q))) {
                rq->rq_flags |= RQF_QUIET;
                scsi_req(rq)->result = -ENXIO;
-               __blk_end_request_all(rq, 0);
+               __blk_end_request_all(rq, BLK_STS_OK);
                spin_unlock_irq(q->queue_lock);
                return -ENXIO;
        }
@@ -235,7 +235,7 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
 
        drive->hwif->rq = NULL;
 
-       if (blk_end_request(rq, 0, 0))
+       if (blk_end_request(rq, BLK_STS_OK, 0))
                BUG();
 }
 
index 023562565d118d11dbcc936d0f2374645a52341d..b3f85250dea9c66caf8debae0285f709b4871118 100644 (file)
@@ -773,6 +773,7 @@ static int ide_init_queue(ide_drive_t *drive)
        q->request_fn = do_ide_request;
        q->init_rq_fn = ide_init_rq;
        q->cmd_size = sizeof(struct ide_request);
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        if (blk_init_allocated_queue(q) < 0) {
                blk_cleanup_queue(q);
                return 1;
index a0651f948b76ec22e72ad64c74bec0cb39627a8c..4d062c56877740509eec2d33d99c206ea170ec96 100644 (file)
@@ -474,7 +474,7 @@ static ide_startstop_t ide_tape_issue_pc(ide_drive_t *drive,
 
                drive->failed_pc = NULL;
                drive->pc_callback(drive, 0);
-               ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
+               ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq));
                return ide_stopped;
        }
        ide_debug_log(IDE_DBG_SENSE, "retry #%d, cmd: 0x%02x", pc->retries,
index d71199d23c9ec02ce36cbb25e3bd87616256f48d..ab1a32cdcb0ad95f3e3a98f0ee9ad80ce24a9343 100644 (file)
@@ -318,7 +318,7 @@ static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
                }
 
                if (nr_bytes > 0)
-                       ide_complete_rq(drive, 0, nr_bytes);
+                       ide_complete_rq(drive, BLK_STS_OK, nr_bytes);
        }
 }
 
@@ -336,7 +336,7 @@ void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
                ide_driveid_update(drive);
        }
 
-       ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
+       ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq));
 }
 
 /*
@@ -394,7 +394,7 @@ out_end:
        if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
                ide_finish_cmd(drive, cmd, stat);
        else
-               ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
+               ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9);
        return ide_stopped;
 out_err:
        ide_error_cmd(drive, cmd);
index 6a1849bb476ce1ad78e438dee2f55120cf3c8924..57eea5a9047f5072093b598d1076b52008564bc6 100644 (file)
@@ -406,7 +406,7 @@ static int siimage_dma_test_irq(ide_drive_t *drive)
  *     yet.
  */
 
-static int sil_sata_reset_poll(ide_drive_t *drive)
+static blk_status_t sil_sata_reset_poll(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
        void __iomem *sata_status_addr
@@ -419,11 +419,11 @@ static int sil_sata_reset_poll(ide_drive_t *drive)
                if ((sata_stat & 0x03) != 0x03) {
                        printk(KERN_WARNING "%s: reset phy dead, status=0x%08x\n",
                                            hwif->name, sata_stat);
-                       return -ENXIO;
+                       return BLK_STS_IOERR;
                }
        }
 
-       return 0;
+       return BLK_STS_OK;
 }
 
 /**
index 21d38c8af21e560990915b93af8715827d33ee49..7f4f9c4150e3ebfc304cf491050166e7bdffc5a5 100644 (file)
@@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
        iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
 }
 
-static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
 {
        u32 channel_intr_status;
        u32 intr_status;
@@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
        return IRQ_NONE;
 }
 
-static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
 {
        irqreturn_t retval = IRQ_NONE;
        struct iproc_adc_priv *adc_priv;
@@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
        adc_priv = iio_priv(indio_dev);
 
        regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
-       dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
+       dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
                        intr_status);
 
        intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
@@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
        }
 
        ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
-                               iproc_adc_interrupt_thread,
                                iproc_adc_interrupt_handler,
+                               iproc_adc_interrupt_thread,
                                IRQF_SHARED, "iproc-adc", indio_dev);
        if (ret) {
                dev_err(&pdev->dev, "request_irq error %d\n", ret);
index ec82106480e124d6188ddb0a000c13bfb3addb76..b0526e4b9530a00265f5051736a22ba845f8004f 100644 (file)
@@ -438,10 +438,10 @@ static ssize_t max9611_shunt_resistor_show(struct device *dev,
        struct max9611_dev *max9611 = iio_priv(dev_to_iio_dev(dev));
        unsigned int i, r;
 
-       i = max9611->shunt_resistor_uohm / 1000;
-       r = max9611->shunt_resistor_uohm % 1000;
+       i = max9611->shunt_resistor_uohm / 1000000;
+       r = max9611->shunt_resistor_uohm % 1000000;
 
-       return sprintf(buf, "%u.%03u\n", i, r);
+       return sprintf(buf, "%u.%06u\n", i, r);
 }
 
 static IIO_DEVICE_ATTR(in_power_shunt_resistor, 0444,
@@ -536,8 +536,8 @@ static int max9611_probe(struct i2c_client *client,
        int ret;
 
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*max9611));
-       if (IS_ERR(indio_dev))
-               return PTR_ERR(indio_dev);
+       if (!indio_dev)
+               return -ENOMEM;
 
        i2c_set_clientdata(client, indio_dev);
 
index b2352730908805f8a29822a6a00413452a3464cb..81d4c39e414a4da6b0f8df0ebd8922d371a909d7 100644 (file)
@@ -105,6 +105,8 @@ struct sun4i_gpadc_iio {
        bool                            no_irq;
        /* prevents concurrent reads of temperature and ADC */
        struct mutex                    mutex;
+       struct thermal_zone_device      *tzd;
+       struct device                   *sensor_device;
 };
 
 #define SUN4I_GPADC_ADC_CHANNEL(_channel, _name) {             \
@@ -502,7 +504,6 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
 {
        struct sun4i_gpadc_iio *info = iio_priv(indio_dev);
        const struct of_device_id *of_dev;
-       struct thermal_zone_device *tzd;
        struct resource *mem;
        void __iomem *base;
        int ret;
@@ -532,13 +533,14 @@ static int sun4i_gpadc_probe_dt(struct platform_device *pdev,
        if (!IS_ENABLED(CONFIG_THERMAL_OF))
                return 0;
 
-       tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, info,
-                                                  &sun4i_ts_tz_ops);
-       if (IS_ERR(tzd))
+       info->sensor_device = &pdev->dev;
+       info->tzd = thermal_zone_of_sensor_register(info->sensor_device, 0,
+                                                   info, &sun4i_ts_tz_ops);
+       if (IS_ERR(info->tzd))
                dev_err(&pdev->dev, "could not register thermal sensor: %ld\n",
-                       PTR_ERR(tzd));
+                       PTR_ERR(info->tzd));
 
-       return PTR_ERR_OR_ZERO(tzd);
+       return PTR_ERR_OR_ZERO(info->tzd);
 }
 
 static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
@@ -584,15 +586,15 @@ static int sun4i_gpadc_probe_mfd(struct platform_device *pdev,
                 * of_node, and the device from this driver as third argument to
                 * return the temperature.
                 */
-               struct thermal_zone_device *tzd;
-               tzd = devm_thermal_zone_of_sensor_register(pdev->dev.parent, 0,
-                                                          info,
-                                                          &sun4i_ts_tz_ops);
-               if (IS_ERR(tzd)) {
+               info->sensor_device = pdev->dev.parent;
+               info->tzd = thermal_zone_of_sensor_register(info->sensor_device,
+                                                           0, info,
+                                                           &sun4i_ts_tz_ops);
+               if (IS_ERR(info->tzd)) {
                        dev_err(&pdev->dev,
                                "could not register thermal sensor: %ld\n",
-                               PTR_ERR(tzd));
-                       return PTR_ERR(tzd);
+                               PTR_ERR(info->tzd));
+                       return PTR_ERR(info->tzd);
                }
        } else {
                indio_dev->num_channels =
@@ -688,7 +690,13 @@ static int sun4i_gpadc_remove(struct platform_device *pdev)
 
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       if (!info->no_irq && IS_ENABLED(CONFIG_THERMAL_OF))
+
+       if (!IS_ENABLED(CONFIG_THERMAL_OF))
+               return 0;
+
+       thermal_zone_of_sensor_unregister(info->sensor_device, info->tzd);
+
+       if (!info->no_irq)
                iio_map_array_unregister(indio_dev);
 
        return 0;
@@ -700,6 +708,7 @@ static const struct platform_device_id sun4i_gpadc_id[] = {
        { "sun6i-a31-gpadc-iio", (kernel_ulong_t)&sun6i_gpadc_data },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(platform, sun4i_gpadc_id);
 
 static struct platform_driver sun4i_gpadc_driver = {
        .driver = {
@@ -711,6 +720,7 @@ static struct platform_driver sun4i_gpadc_driver = {
        .probe = sun4i_gpadc_probe,
        .remove = sun4i_gpadc_remove,
 };
+MODULE_DEVICE_TABLE(of, sun4i_gpadc_of_id);
 
 module_platform_driver(sun4i_gpadc_driver);
 
index 4282ceca3d8f9f417a1a511b3d608896a7ad4159..6cbed7eb118a60d976afbb826d9edd32b62f101d 100644 (file)
@@ -614,7 +614,7 @@ static int tiadc_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
+       indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
        if (indio_dev == NULL) {
                dev_err(&pdev->dev, "failed to allocate iio device\n");
                return -ENOMEM;
index 978e1592c2a37869eac3b07a48f27354dc6036ff..4061fed93f1f8f015cd99acb307c08962dd0bee0 100644 (file)
@@ -451,7 +451,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
        return len;
 
 out_trigger_put:
-       iio_trigger_put(trig);
+       if (trig)
+               iio_trigger_put(trig);
        return ret;
 }
 
index b30e0c1c6cc4b70eecab00d4ae49b616a26a7e94..67838edd8b37fbad0e6289f249c223e99f221740 100644 (file)
@@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
 static const struct reg_field reg_field_it =
                                REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
 static const struct reg_field reg_field_als_intr =
-                               REG_FIELD(LTR501_INTR, 0, 0);
-static const struct reg_field reg_field_ps_intr =
                                REG_FIELD(LTR501_INTR, 1, 1);
+static const struct reg_field reg_field_ps_intr =
+                               REG_FIELD(LTR501_INTR, 0, 0);
 static const struct reg_field reg_field_als_rate =
                                REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
 static const struct reg_field reg_field_ps_rate =
index ddf9bee89f777872ec27db426a46c18dd5da2fc3..aa4df0dcc8c9b81d67b3ebaedc9b9d25392bbfef 100644 (file)
@@ -40,9 +40,9 @@
 #define AS3935_AFE_PWR_BIT     BIT(0)
 
 #define AS3935_INT             0x03
-#define AS3935_INT_MASK                0x07
+#define AS3935_INT_MASK                0x0f
 #define AS3935_EVENT_INT       BIT(3)
-#define AS3935_NOISE_INT       BIT(1)
+#define AS3935_NOISE_INT       BIT(0)
 
 #define AS3935_DATA            0x07
 #define AS3935_DATA_MASK       0x3F
@@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
 
        st->buffer[0] = val & AS3935_DATA_MASK;
        iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
-                                          pf->timestamp);
+                                          iio_get_time_ns(indio_dev));
 err_read:
        iio_trigger_notify_done(indio_dev->trig);
 
@@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work)
 
        switch (val) {
        case AS3935_EVENT_INT:
-               iio_trigger_poll(st->trig);
+               iio_trigger_poll_chained(st->trig);
                break;
        case AS3935_NOISE_INT:
                dev_warn(&st->spi->dev, "noise level is too high\n");
@@ -269,8 +269,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
 
 static void calibrate_as3935(struct as3935_state *st)
 {
-       mutex_lock(&st->lock);
-
        /* mask disturber interrupt bit */
        as3935_write(st, AS3935_INT, BIT(5));
 
@@ -280,8 +278,6 @@ static void calibrate_as3935(struct as3935_state *st)
 
        mdelay(2);
        as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
-       mutex_unlock(&st->lock);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -318,6 +314,8 @@ static int as3935_resume(struct device *dev)
        val &= ~AS3935_AFE_PWR_BIT;
        ret = as3935_write(st, AS3935_AFE_GAIN, val);
 
+       calibrate_as3935(st);
+
 err_resume:
        mutex_unlock(&st->lock);
 
index e73d968023f7ce7de418dcf1315b7c554773d604..f1fa1f172107722ef13d8c98e0c2d539d096c2aa 100644 (file)
@@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
  * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E546   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E547   0x470f00        50, 12, 09      2 hw buttons
  * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
+ * Fujitsu LIFEBOOK E557   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu T725            0x470f01        05, 12, 09      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
@@ -1524,6 +1526,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E546  does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
+               },
+       },
        {
                /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
                .matches = {
@@ -1545,6 +1554,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
+               },
+       },
        {
                /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
                .matches = {
index 77dad045a4683026460dca491419112d730897d7..ad71a5e768dc46432b18b84fb344a7bb514bb56c 100644 (file)
@@ -146,7 +146,7 @@ static int rmi_f03_register_pt(struct f03_data *f03)
        if (!serio)
                return -ENOMEM;
 
-       serio->id.type = SERIO_8042;
+       serio->id.type = SERIO_PS_PSTHRU;
        serio->write = rmi_f03_pt_write;
        serio->port_data = f03;
 
index 9f44ee8ea1bc8a10a6ff4c0dad297b78b8c2f57a..19779b88a47973eef332b88b05d1ac76193f788d 100644 (file)
@@ -118,6 +118,7 @@ static const struct iommu_ops
 
        ops = iommu_ops_from_fwnode(fwnode);
        if ((ops && !ops->of_xlate) ||
+           !of_device_is_available(iommu_spec->np) ||
            (!ops && !of_iommu_driver_present(iommu_spec->np)))
                return NULL;
 
@@ -236,6 +237,12 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
                        ops = ERR_PTR(err);
        }
 
+       /* Ignore all other errors apart from EPROBE_DEFER */
+       if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
+               dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+               ops = NULL;
+       }
+
        return ops;
 }
 
index d07dd5196ffca59c11532051fb88e2ecdc7326c9..8aa158a091806fd7d3107ed56b8550198be7b9a0 100644 (file)
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
                       id);
                return NULL;
        } else {
-               rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
+               rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
                if (!rs)
                        return NULL;
                rs->state = CCPResetIdle;
index 8b7faea2ddf88b718c252dc049e5d1b8b5e8357b..422dced7c90ac26dcf0d366fedb32ab9edf44207 100644 (file)
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
                if (sk->sk_state != MISDN_BOUND)
                        continue;
                if (!cskb)
-                       cskb = skb_copy(skb, GFP_KERNEL);
+                       cskb = skb_copy(skb, GFP_ATOMIC);
                if (!cskb) {
                        printk(KERN_WARNING "%s no skb\n", __func__);
                        break;
index 5e44768ccffa8f35ed372d68067af6e55d2280ed..4e0de995cd902ba95daa08640de25c61504e0f05 100644 (file)
@@ -296,8 +296,8 @@ void pblk_flush_writer(struct pblk *pblk)
                pr_err("pblk: tear down bio failed\n");
        }
 
-       if (bio->bi_error)
-               pr_err("pblk: flush sync write failed (%u)\n", bio->bi_error);
+       if (bio->bi_status)
+               pr_err("pblk: flush sync write failed (%u)\n", bio->bi_status);
 
        bio_put(bio);
 }
index 4a12f14d78c68e4a901ff5b012f2797af0022f28..762c0b73cb67a0cff1cf2e2a084ebc73971432e5 100644 (file)
@@ -114,7 +114,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
                pblk_log_read_err(pblk, rqd);
 #ifdef CONFIG_NVM_DEBUG
        else
-               WARN_ONCE(bio->bi_error, "pblk: corrupted read error\n");
+               WARN_ONCE(bio->bi_status, "pblk: corrupted read error\n");
 #endif
 
        if (rqd->nr_ppas > 1)
@@ -123,7 +123,7 @@ static void pblk_end_io_read(struct nvm_rq *rqd)
        bio_put(bio);
        if (r_ctx->orig_bio) {
 #ifdef CONFIG_NVM_DEBUG
-               WARN_ONCE(r_ctx->orig_bio->bi_error,
+               WARN_ONCE(r_ctx->orig_bio->bi_status,
                                                "pblk: corrupted read bio\n");
 #endif
                bio_endio(r_ctx->orig_bio);
index aef6fd7c4a0cbae0859398fcd07ad9854d4a1f77..79b90d8dbcb39c324212db394169dffd0d9dcc76 100644 (file)
@@ -186,7 +186,7 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
        }
 #ifdef CONFIG_NVM_DEBUG
        else
-               WARN_ONCE(rqd->bio->bi_error, "pblk: corrupted write error\n");
+               WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
 #endif
 
        pblk_complete_write(pblk, rqd, c_ctx);
index cf0e28a0ff61d34844b629f7c22e3625f1efe2bf..8d3b53bb3307e537f73016566dc517b925c3ca68 100644 (file)
@@ -279,8 +279,8 @@ static void rrpc_end_sync_bio(struct bio *bio)
 {
        struct completion *waiting = bio->bi_private;
 
-       if (bio->bi_error)
-               pr_err("nvm: gc request failed (%u).\n", bio->bi_error);
+       if (bio->bi_status)
+               pr_err("nvm: gc request failed (%u).\n", bio->bi_status);
 
        complete(waiting);
 }
@@ -359,7 +359,7 @@ try:
                        goto finished;
                }
                wait_for_completion_io(&wait);
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        rrpc_inflight_laddr_release(rrpc, rqd);
                        goto finished;
                }
@@ -385,7 +385,7 @@ try:
                wait_for_completion_io(&wait);
 
                rrpc_inflight_laddr_release(rrpc, rqd);
-               if (bio->bi_error)
+               if (bio->bi_status)
                        goto finished;
 
                bio_reset(bio);
index c3ea03c9a1a8ef603a25934ccbe32ea4bfca3d66..dee542fff68ead0bcc0288e78661913729e4776d 100644 (file)
@@ -849,10 +849,11 @@ static inline void wake_up_allocators(struct cache_set *c)
 
 /* Forward declarations */
 
-void bch_count_io_errors(struct cache *, int, const char *);
+void bch_count_io_errors(struct cache *, blk_status_t, const char *);
 void bch_bbio_count_io_errors(struct cache_set *, struct bio *,
-                             int, const char *);
-void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
+                             blk_status_t, const char *);
+void bch_bbio_endio(struct cache_set *, struct bio *, blk_status_t,
+               const char *);
 void bch_bbio_free(struct bio *, struct cache_set *);
 struct bio *bch_bbio_alloc(struct cache_set *);
 
index 450d0e848ae436ee0e9517b90fa60f00ca5e4964..866dcf78ff8e691e051dace4506ae0ac760b2901 100644 (file)
@@ -307,7 +307,7 @@ static void bch_btree_node_read(struct btree *b)
        bch_submit_bbio(bio, b->c, &b->key, 0);
        closure_sync(&cl);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                set_btree_node_io_error(b);
 
        bch_bbio_free(bio, b->c);
@@ -374,10 +374,10 @@ static void btree_node_write_endio(struct bio *bio)
        struct closure *cl = bio->bi_private;
        struct btree *b = container_of(cl, struct btree, io);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                set_btree_node_io_error(b);
 
-       bch_bbio_count_io_errors(b->c, bio, bio->bi_error, "writing btree");
+       bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
        closure_put(cl);
 }
 
index db45a88c0ce9d76a11fd6a755c0f436aac0fb024..6a9b85095e7b5948d1403830c9b3bd734a1e7485 100644 (file)
@@ -50,7 +50,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 
 /* IO errors */
 
-void bch_count_io_errors(struct cache *ca, int error, const char *m)
+void bch_count_io_errors(struct cache *ca, blk_status_t error, const char *m)
 {
        /*
         * The halflife of an error is:
@@ -103,7 +103,7 @@ void bch_count_io_errors(struct cache *ca, int error, const char *m)
 }
 
 void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
-                             int error, const char *m)
+                             blk_status_t error, const char *m)
 {
        struct bbio *b = container_of(bio, struct bbio, bio);
        struct cache *ca = PTR_CACHE(c, &b->key, 0);
@@ -132,7 +132,7 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
 }
 
 void bch_bbio_endio(struct cache_set *c, struct bio *bio,
-                   int error, const char *m)
+                   blk_status_t error, const char *m)
 {
        struct closure *cl = bio->bi_private;
 
index 1198e53d5670317263e86d5dfdd7b4695fd264d2..0352d05e495c14509fbb0bd22e13771ad872196b 100644 (file)
@@ -549,7 +549,7 @@ static void journal_write_endio(struct bio *bio)
 {
        struct journal_write *w = bio->bi_private;
 
-       cache_set_err_on(bio->bi_error, w->c, "journal io error");
+       cache_set_err_on(bio->bi_status, w->c, "journal io error");
        closure_put(&w->c->journal.io);
 }
 
index 13b8a907006dd2be35c89c1188c0fa4932459fde..f633b30c962e197db1480f58eb6591c466dd9aab 100644 (file)
@@ -63,14 +63,14 @@ static void read_moving_endio(struct bio *bio)
        struct moving_io *io = container_of(bio->bi_private,
                                            struct moving_io, cl);
 
-       if (bio->bi_error)
-               io->op.error = bio->bi_error;
+       if (bio->bi_status)
+               io->op.status = bio->bi_status;
        else if (!KEY_DIRTY(&b->key) &&
                 ptr_stale(io->op.c, &b->key, 0)) {
-               io->op.error = -EINTR;
+               io->op.status = BLK_STS_IOERR;
        }
 
-       bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
+       bch_bbio_endio(io->op.c, bio, bio->bi_status, "reading data to move");
 }
 
 static void moving_init(struct moving_io *io)
@@ -92,7 +92,7 @@ static void write_moving(struct closure *cl)
        struct moving_io *io = container_of(cl, struct moving_io, cl);
        struct data_insert_op *op = &io->op;
 
-       if (!op->error) {
+       if (!op->status) {
                moving_init(io);
 
                io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
index 709c9cc34369fe5c0e0b206673debfcf0c3efe4f..019b3df9f1c603be4a7d2ad0b6d1de05ad6a2711 100644 (file)
@@ -81,7 +81,7 @@ static void bch_data_insert_keys(struct closure *cl)
        if (ret == -ESRCH) {
                op->replace_collision = true;
        } else if (ret) {
-               op->error               = -ENOMEM;
+               op->status              = BLK_STS_RESOURCE;
                op->insert_data_done    = true;
        }
 
@@ -178,17 +178,17 @@ static void bch_data_insert_endio(struct bio *bio)
        struct closure *cl = bio->bi_private;
        struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                /* TODO: We could try to recover from this. */
                if (op->writeback)
-                       op->error = bio->bi_error;
+                       op->status = bio->bi_status;
                else if (!op->replace)
                        set_closure_fn(cl, bch_data_insert_error, op->wq);
                else
                        set_closure_fn(cl, NULL, NULL);
        }
 
-       bch_bbio_endio(op->c, bio, bio->bi_error, "writing data to cache");
+       bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
 }
 
 static void bch_data_insert_start(struct closure *cl)
@@ -488,15 +488,15 @@ static void bch_cache_read_endio(struct bio *bio)
         * from the backing device.
         */
 
-       if (bio->bi_error)
-               s->iop.error = bio->bi_error;
+       if (bio->bi_status)
+               s->iop.status = bio->bi_status;
        else if (!KEY_DIRTY(&b->key) &&
                 ptr_stale(s->iop.c, &b->key, 0)) {
                atomic_long_inc(&s->iop.c->cache_read_races);
-               s->iop.error = -EINTR;
+               s->iop.status = BLK_STS_IOERR;
        }
 
-       bch_bbio_endio(s->iop.c, bio, bio->bi_error, "reading from cache");
+       bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
 }
 
 /*
@@ -593,9 +593,9 @@ static void request_endio(struct bio *bio)
 {
        struct closure *cl = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                struct search *s = container_of(cl, struct search, cl);
-               s->iop.error = bio->bi_error;
+               s->iop.status = bio->bi_status;
                /* Only cache read errors are recoverable */
                s->recoverable = false;
        }
@@ -611,7 +611,7 @@ static void bio_complete(struct search *s)
                                    &s->d->disk->part0, s->start_time);
 
                trace_bcache_request_end(s->d, s->orig_bio);
-               s->orig_bio->bi_error = s->iop.error;
+               s->orig_bio->bi_status = s->iop.status;
                bio_endio(s->orig_bio);
                s->orig_bio = NULL;
        }
@@ -664,7 +664,7 @@ static inline struct search *search_alloc(struct bio *bio,
        s->iop.inode            = d->id;
        s->iop.write_point      = hash_long((unsigned long) current, 16);
        s->iop.write_prio       = 0;
-       s->iop.error            = 0;
+       s->iop.status           = 0;
        s->iop.flags            = 0;
        s->iop.flush_journal    = op_is_flush(bio->bi_opf);
        s->iop.wq               = bcache_wq;
@@ -707,7 +707,7 @@ static void cached_dev_read_error(struct closure *cl)
                /* Retry from the backing device: */
                trace_bcache_read_retry(s->orig_bio);
 
-               s->iop.error = 0;
+               s->iop.status = 0;
                do_bio_hook(s, s->orig_bio);
 
                /* XXX: invalidate cache */
@@ -767,7 +767,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
                                  !s->cache_miss, s->iop.bypass);
        trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
 
-       if (s->iop.error)
+       if (s->iop.status)
                continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
        else if (s->iop.bio || verify(dc, &s->bio.bio))
                continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
index 1ff36875c2b30bcb29e650290b194c52cc57bc09..7689176951ce5bed3eeed3a0d33f9f8e28284fb9 100644 (file)
@@ -10,7 +10,7 @@ struct data_insert_op {
        unsigned                inode;
        uint16_t                write_point;
        uint16_t                write_prio;
-       short                   error;
+       blk_status_t            status;
 
        union {
                uint16_t        flags;
index e57353e39168120dccdfe34965ac9360b58fd2db..fbc4f5412decd9de92d8edd86ebccd18f25f1507 100644 (file)
@@ -271,7 +271,7 @@ static void write_super_endio(struct bio *bio)
 {
        struct cache *ca = bio->bi_private;
 
-       bch_count_io_errors(ca, bio->bi_error, "writing superblock");
+       bch_count_io_errors(ca, bio->bi_status, "writing superblock");
        closure_put(&ca->set->sb_write);
 }
 
@@ -321,7 +321,7 @@ static void uuid_endio(struct bio *bio)
        struct closure *cl = bio->bi_private;
        struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
-       cache_set_err_on(bio->bi_error, c, "accessing uuids");
+       cache_set_err_on(bio->bi_status, c, "accessing uuids");
        bch_bbio_free(bio, c);
        closure_put(cl);
 }
@@ -494,7 +494,7 @@ static void prio_endio(struct bio *bio)
 {
        struct cache *ca = bio->bi_private;
 
-       cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
+       cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
        bch_bbio_free(bio, ca->set);
        closure_put(&ca->prio);
 }
index 6ac2e48b92354474d9dcc0e98096be2b3ba86eeb..42c66e76f05e519ba05dc910695011071f929d0b 100644 (file)
@@ -167,7 +167,7 @@ static void dirty_endio(struct bio *bio)
        struct keybuf_key *w = bio->bi_private;
        struct dirty_io *io = w->private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                SET_KEY_DIRTY(&w->key, false);
 
        closure_put(&io->cl);
@@ -195,7 +195,7 @@ static void read_dirty_endio(struct bio *bio)
        struct dirty_io *io = w->private;
 
        bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
-                           bio->bi_error, "reading dirty data from cache");
+                           bio->bi_status, "reading dirty data from cache");
 
        dirty_endio(bio);
 }
index ae7da2c30a5781353f39ef54b9b5e895e5738319..82d27384d31f523ec11bf580ba5c165b77319956 100644 (file)
@@ -229,7 +229,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
 
 void dm_cell_error(struct dm_bio_prison *prison,
-                  struct dm_bio_prison_cell *cell, int error)
+                  struct dm_bio_prison_cell *cell, blk_status_t error)
 {
        struct bio_list bios;
        struct bio *bio;
@@ -238,7 +238,7 @@ void dm_cell_error(struct dm_bio_prison *prison,
        dm_cell_release(prison, cell, &bios);
 
        while ((bio = bio_list_pop(&bios))) {
-               bio->bi_error = error;
+               bio->bi_status = error;
                bio_endio(bio);
        }
 }
index cddd4ac07e2cb2664d3e8b478193f7fe699ee85a..cec52ac5e1ae76a4836ac2ee24b6e71fd56d4530 100644 (file)
@@ -91,7 +91,7 @@ void dm_cell_release_no_holder(struct dm_bio_prison *prison,
                               struct dm_bio_prison_cell *cell,
                               struct bio_list *inmates);
 void dm_cell_error(struct dm_bio_prison *prison,
-                  struct dm_bio_prison_cell *cell, int error);
+                  struct dm_bio_prison_cell *cell, blk_status_t error);
 
 /*
  * Visits the cell and then releases.  Guarantees no new inmates are
index 840c1496b2b138ef504bde4c441b1082df183473..850ff6c6799449541cf8c0357cf7efa17d9e8c60 100644 (file)
@@ -145,8 +145,8 @@ struct dm_buffer {
        enum data_mode data_mode;
        unsigned char list_mode;                /* LIST_* */
        unsigned hold_count;
-       int read_error;
-       int write_error;
+       blk_status_t read_error;
+       blk_status_t write_error;
        unsigned long state;
        unsigned long last_accessed;
        struct dm_bufio_client *c;
@@ -555,7 +555,7 @@ static void dmio_complete(unsigned long error, void *context)
 {
        struct dm_buffer *b = context;
 
-       b->bio.bi_error = error ? -EIO : 0;
+       b->bio.bi_status = error ? BLK_STS_IOERR : 0;
        b->bio.bi_end_io(&b->bio);
 }
 
@@ -588,7 +588,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 
        r = dm_io(&io_req, 1, &region, NULL);
        if (r) {
-               b->bio.bi_error = r;
+               b->bio.bi_status = errno_to_blk_status(r);
                end_io(&b->bio);
        }
 }
@@ -596,7 +596,7 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t sector,
 static void inline_endio(struct bio *bio)
 {
        bio_end_io_t *end_fn = bio->bi_private;
-       int error = bio->bi_error;
+       blk_status_t status = bio->bi_status;
 
        /*
         * Reset the bio to free any attached resources
@@ -604,7 +604,7 @@ static void inline_endio(struct bio *bio)
         */
        bio_reset(bio);
 
-       bio->bi_error = error;
+       bio->bi_status = status;
        end_fn(bio);
 }
 
@@ -685,11 +685,12 @@ static void write_endio(struct bio *bio)
 {
        struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-       b->write_error = bio->bi_error;
-       if (unlikely(bio->bi_error)) {
+       b->write_error = bio->bi_status;
+       if (unlikely(bio->bi_status)) {
                struct dm_bufio_client *c = b->c;
-               int error = bio->bi_error;
-               (void)cmpxchg(&c->async_write_error, 0, error);
+
+               (void)cmpxchg(&c->async_write_error, 0,
+                               blk_status_to_errno(bio->bi_status));
        }
 
        BUG_ON(!test_bit(B_WRITING, &b->state));
@@ -1063,7 +1064,7 @@ static void read_endio(struct bio *bio)
 {
        struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 
-       b->read_error = bio->bi_error;
+       b->read_error = bio->bi_status;
 
        BUG_ON(!test_bit(B_READING, &b->state));
 
@@ -1107,7 +1108,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
        wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 
        if (b->read_error) {
-               int error = b->read_error;
+               int error = blk_status_to_errno(b->read_error);
 
                dm_bufio_release(b);
 
@@ -1257,7 +1258,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
  */
 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
 {
-       int a, f;
+       blk_status_t a;
+       int f;
        unsigned long buffers_processed = 0;
        struct dm_buffer *b, *tmp;
 
index d682a0511381aad0cadb7ab1f4eae9f815639aa1..c5ea03fc7ee1537914f222753b5018bf34e4a169 100644 (file)
@@ -119,7 +119,7 @@ static void iot_io_end(struct io_tracker *iot, sector_t len)
  */
 struct continuation {
        struct work_struct ws;
-       int input;
+       blk_status_t input;
 };
 
 static inline void init_continuation(struct continuation *k,
@@ -145,7 +145,7 @@ struct batcher {
        /*
         * The operation that everyone is waiting for.
         */
-       int (*commit_op)(void *context);
+       blk_status_t (*commit_op)(void *context);
        void *commit_context;
 
        /*
@@ -171,8 +171,7 @@ struct batcher {
 static void __commit(struct work_struct *_ws)
 {
        struct batcher *b = container_of(_ws, struct batcher, commit_work);
-
-       int r;
+       blk_status_t r;
        unsigned long flags;
        struct list_head work_items;
        struct work_struct *ws, *tmp;
@@ -205,7 +204,7 @@ static void __commit(struct work_struct *_ws)
 
        while ((bio = bio_list_pop(&bios))) {
                if (r) {
-                       bio->bi_error = r;
+                       bio->bi_status = r;
                        bio_endio(bio);
                } else
                        b->issue_op(bio, b->issue_context);
@@ -213,7 +212,7 @@ static void __commit(struct work_struct *_ws)
 }
 
 static void batcher_init(struct batcher *b,
-                        int (*commit_op)(void *),
+                        blk_status_t (*commit_op)(void *),
                         void *commit_context,
                         void (*issue_op)(struct bio *bio, void *),
                         void *issue_context,
@@ -955,7 +954,7 @@ static void writethrough_endio(struct bio *bio)
 
        dm_unhook_bio(&pb->hook_info, bio);
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                bio_endio(bio);
                return;
        }
@@ -1220,7 +1219,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
        struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
 
        if (read_err || write_err)
-               mg->k.input = -EIO;
+               mg->k.input = BLK_STS_IOERR;
 
        queue_continuation(mg->cache->wq, &mg->k);
 }
@@ -1266,8 +1265,8 @@ static void overwrite_endio(struct bio *bio)
 
        dm_unhook_bio(&pb->hook_info, bio);
 
-       if (bio->bi_error)
-               mg->k.input = bio->bi_error;
+       if (bio->bi_status)
+               mg->k.input = bio->bi_status;
 
        queue_continuation(mg->cache->wq, &mg->k);
 }
@@ -1323,8 +1322,10 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
                if (mg->overwrite_bio) {
                        if (success)
                                force_set_dirty(cache, cblock);
+                       else if (mg->k.input)
+                               mg->overwrite_bio->bi_status = mg->k.input;
                        else
-                               mg->overwrite_bio->bi_error = (mg->k.input ? : -EIO);
+                               mg->overwrite_bio->bi_status = BLK_STS_IOERR;
                        bio_endio(mg->overwrite_bio);
                } else {
                        if (success)
@@ -1504,7 +1505,7 @@ static void mg_copy(struct work_struct *ws)
                r = copy(mg, is_policy_promote);
                if (r) {
                        DMERR_LIMIT("%s: migration copy failed", cache_device_name(cache));
-                       mg->k.input = -EIO;
+                       mg->k.input = BLK_STS_IOERR;
                        mg_complete(mg, false);
                }
        }
@@ -1907,12 +1908,12 @@ static int commit(struct cache *cache, bool clean_shutdown)
 /*
  * Used by the batcher.
  */
-static int commit_op(void *context)
+static blk_status_t commit_op(void *context)
 {
        struct cache *cache = context;
 
        if (dm_cache_changed_this_transaction(cache->cmd))
-               return commit(cache, false);
+               return errno_to_blk_status(commit(cache, false));
 
        return 0;
 }
@@ -2018,7 +2019,7 @@ static void requeue_deferred_bios(struct cache *cache)
        bio_list_init(&cache->deferred_bios);
 
        while ((bio = bio_list_pop(&bios))) {
-               bio->bi_error = DM_ENDIO_REQUEUE;
+               bio->bi_status = BLK_STS_DM_REQUEUE;
                bio_endio(bio);
        }
 }
@@ -2820,7 +2821,8 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
        return r;
 }
 
-static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int cache_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct cache *cache = ti->private;
        unsigned long flags;
@@ -2838,7 +2840,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
        bio_drop_shared_lock(cache, bio);
        accounted_complete(cache, bio);
 
-       return 0;
+       return DM_ENDIO_DONE;
 }
 
 static int write_dirty_bitset(struct cache *cache)
index ebf9e72d479b9c46e2316eb121917ce9862af5be..586cef085c6afcdafdd1c70c9c1421ac0e27509b 100644 (file)
@@ -71,7 +71,7 @@ struct dm_crypt_io {
        struct convert_context ctx;
 
        atomic_t io_pending;
-       int error;
+       blk_status_t error;
        sector_t sector;
 
        struct rb_node rb_node;
@@ -1292,7 +1292,7 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
 /*
  * Encrypt / decrypt data from one bio to another one (can be the same one)
  */
-static int crypt_convert(struct crypt_config *cc,
+static blk_status_t crypt_convert(struct crypt_config *cc,
                         struct convert_context *ctx)
 {
        unsigned int tag_offset = 0;
@@ -1343,13 +1343,13 @@ static int crypt_convert(struct crypt_config *cc,
                 */
                case -EBADMSG:
                        atomic_dec(&ctx->cc_pending);
-                       return -EILSEQ;
+                       return BLK_STS_PROTECTION;
                /*
                 * There was an error while processing the request.
                 */
                default:
                        atomic_dec(&ctx->cc_pending);
-                       return -EIO;
+                       return BLK_STS_IOERR;
                }
        }
 
@@ -1463,7 +1463,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *base_bio = io->base_bio;
-       int error = io->error;
+       blk_status_t error = io->error;
 
        if (!atomic_dec_and_test(&io->io_pending))
                return;
@@ -1476,7 +1476,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
        else
                kfree(io->integrity_metadata);
 
-       base_bio->bi_error = error;
+       base_bio->bi_status = error;
        bio_endio(base_bio);
 }
 
@@ -1502,7 +1502,7 @@ static void crypt_endio(struct bio *clone)
        struct dm_crypt_io *io = clone->bi_private;
        struct crypt_config *cc = io->cc;
        unsigned rw = bio_data_dir(clone);
-       int error;
+       blk_status_t error;
 
        /*
         * free the processed pages
@@ -1510,7 +1510,7 @@ static void crypt_endio(struct bio *clone)
        if (rw == WRITE)
                crypt_free_buffer_pages(cc, clone);
 
-       error = clone->bi_error;
+       error = clone->bi_status;
        bio_put(clone);
 
        if (rw == READ && !error) {
@@ -1570,7 +1570,7 @@ static void kcryptd_io_read_work(struct work_struct *work)
 
        crypt_inc_pending(io);
        if (kcryptd_io_read(io, GFP_NOIO))
-               io->error = -ENOMEM;
+               io->error = BLK_STS_RESOURCE;
        crypt_dec_pending(io);
 }
 
@@ -1656,7 +1656,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
        sector_t sector;
        struct rb_node **rbp, *parent;
 
-       if (unlikely(io->error < 0)) {
+       if (unlikely(io->error)) {
                crypt_free_buffer_pages(cc, clone);
                bio_put(clone);
                crypt_dec_pending(io);
@@ -1697,7 +1697,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        struct bio *clone;
        int crypt_finished;
        sector_t sector = io->sector;
-       int r;
+       blk_status_t r;
 
        /*
         * Prevent io from disappearing until this function completes.
@@ -1707,7 +1707,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
        clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
        if (unlikely(!clone)) {
-               io->error = -EIO;
+               io->error = BLK_STS_IOERR;
                goto dec;
        }
 
@@ -1718,7 +1718,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
        crypt_inc_pending(io);
        r = crypt_convert(cc, &io->ctx);
-       if (r < 0)
+       if (r)
                io->error = r;
        crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
 
@@ -1740,7 +1740,7 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
-       int r = 0;
+       blk_status_t r;
 
        crypt_inc_pending(io);
 
@@ -1748,7 +1748,7 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
                           io->sector);
 
        r = crypt_convert(cc, &io->ctx);
-       if (r < 0)
+       if (r)
                io->error = r;
 
        if (atomic_dec_and_test(&io->ctx.cc_pending))
@@ -1781,9 +1781,9 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
        if (error == -EBADMSG) {
                DMERR_LIMIT("INTEGRITY AEAD ERROR, sector %llu",
                            (unsigned long long)le64_to_cpu(*org_sector_of_dmreq(cc, dmreq)));
-               io->error = -EILSEQ;
+               io->error = BLK_STS_PROTECTION;
        } else if (error < 0)
-               io->error = -EIO;
+               io->error = BLK_STS_IOERR;
 
        crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
 
@@ -2795,10 +2795,10 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
         * and is aligned to this size as defined in IO hints.
         */
        if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
index 13305a182611080902cc944e45baf835818ebe38..3d04d5ce19d936b2ca46dce6d87d6826c023d679 100644 (file)
@@ -321,7 +321,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
                if (bio_data_dir(bio) == READ) {
                        if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags) &&
                            !test_bit(ERROR_WRITES, &fc->flags))
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        goto map_bio;
                }
 
@@ -349,7 +349,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
                /*
                 * By default, error all I/O.
                 */
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
 map_bio:
@@ -358,12 +358,13 @@ map_bio:
        return DM_MAPIO_REMAPPED;
 }
 
-static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int flakey_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct flakey_c *fc = ti->private;
        struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
 
-       if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+       if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
                if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
                    all_corrupt_bio_flags_match(bio, fc)) {
                        /*
@@ -377,11 +378,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
                         * Error read during the down_interval if drop_writes
                         * and error_writes were not configured.
                         */
-                       return -EIO;
+                       *error = BLK_STS_IOERR;
                }
        }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 static void flakey_status(struct dm_target *ti, status_type_t type,
index 7910bfe50da4469c44b571363cc6696f74f5fa42..339af38459fc3738752e79e0b98647b3c7241262 100644 (file)
@@ -246,7 +246,7 @@ struct dm_integrity_io {
        unsigned metadata_offset;
 
        atomic_t in_flight;
-       int bi_error;
+       blk_status_t bi_status;
 
        struct completion *completion;
 
@@ -1115,8 +1115,8 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *
 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
 {
        int r = dm_integrity_failed(ic);
-       if (unlikely(r) && !bio->bi_error)
-               bio->bi_error = r;
+       if (unlikely(r) && !bio->bi_status)
+               bio->bi_status = errno_to_blk_status(r);
        bio_endio(bio);
 }
 
@@ -1124,7 +1124,7 @@ static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *di
 {
        struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
 
-       if (unlikely(dio->fua) && likely(!bio->bi_error) && likely(!dm_integrity_failed(ic)))
+       if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
                submit_flush_bio(ic, dio);
        else
                do_endio(ic, bio);
@@ -1143,9 +1143,9 @@ static void dec_in_flight(struct dm_integrity_io *dio)
 
                bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
 
-               if (unlikely(dio->bi_error) && !bio->bi_error)
-                       bio->bi_error = dio->bi_error;
-               if (likely(!bio->bi_error) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
+               if (unlikely(dio->bi_status) && !bio->bi_status)
+                       bio->bi_status = dio->bi_status;
+               if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
                        dio->range.logical_sector += dio->range.n_sectors;
                        bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
                        INIT_WORK(&dio->work, integrity_bio_wait);
@@ -1319,7 +1319,7 @@ skip_io:
        dec_in_flight(dio);
        return;
 error:
-       dio->bi_error = r;
+       dio->bi_status = errno_to_blk_status(r);
        dec_in_flight(dio);
 }
 
@@ -1332,7 +1332,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
        sector_t area, offset;
 
        dio->ic = ic;
-       dio->bi_error = 0;
+       dio->bi_status = 0;
 
        if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
                submit_flush_bio(ic, dio);
@@ -1353,13 +1353,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
                DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
                      (unsigned long long)ic->provided_data_sectors);
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
        if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
                DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
                      ic->sectors_per_block,
                      (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (ic->sectors_per_block > 1) {
@@ -1369,7 +1369,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
                        if (unlikely((bv.bv_offset | bv.bv_len) & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
                                DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
                                        bv.bv_offset, bv.bv_len, ic->sectors_per_block);
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        }
                }
        }
@@ -1384,18 +1384,18 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
                                wanted_tag_size *= ic->tag_size;
                        if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
                                DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
-                               return -EIO;
+                               return DM_MAPIO_KILL;
                        }
                }
        } else {
                if (unlikely(bip != NULL)) {
                        DMERR("Unexpected integrity data when using internal hash");
-                       return -EIO;
+                       return DM_MAPIO_KILL;
                }
        }
 
        if (unlikely(ic->mode == 'R') && unlikely(dio->write))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
        dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
index 3702e502466d37a902c64a74a1f5ad7b516770bb..c8f8f3004085e39712897fb34f9e78c4a91fa5b2 100644 (file)
@@ -124,7 +124,7 @@ static void complete_io(struct io *io)
        fn(error_bits, context);
 }
 
-static void dec_count(struct io *io, unsigned int region, int error)
+static void dec_count(struct io *io, unsigned int region, blk_status_t error)
 {
        if (error)
                set_bit(region, &io->error_bits);
@@ -137,9 +137,9 @@ static void endio(struct bio *bio)
 {
        struct io *io;
        unsigned region;
-       int error;
+       blk_status_t error;
 
-       if (bio->bi_error && bio_data_dir(bio) == READ)
+       if (bio->bi_status && bio_data_dir(bio) == READ)
                zero_fill_bio(bio);
 
        /*
@@ -147,7 +147,7 @@ static void endio(struct bio *bio)
         */
        retrieve_io_and_region_from_bio(bio, &io, &region);
 
-       error = bio->bi_error;
+       error = bio->bi_status;
        bio_put(bio);
 
        dec_count(io, region, error);
@@ -319,7 +319,7 @@ static void do_region(int op, int op_flags, unsigned region,
        if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
             op == REQ_OP_WRITE_SAME)  &&
            special_cmd_max_sectors == 0) {
-               dec_count(io, region, -EOPNOTSUPP);
+               dec_count(io, region, BLK_STS_NOTSUPP);
                return;
        }
 
index 4dfe38655a49c495837b36e7984c28c60ef56100..a1da0eb58a93e51355c680bc3a056211042f3f54 100644 (file)
@@ -150,10 +150,10 @@ static void log_end_io(struct bio *bio)
 {
        struct log_writes_c *lc = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                unsigned long flags;
 
-               DMERR("Error writing log block, error=%d", bio->bi_error);
+               DMERR("Error writing log block, error=%d", bio->bi_status);
                spin_lock_irqsave(&lc->blocks_lock, flags);
                lc->logging_enabled = false;
                spin_unlock_irqrestore(&lc->blocks_lock, flags);
@@ -586,7 +586,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
                spin_lock_irq(&lc->blocks_lock);
                lc->logging_enabled = false;
                spin_unlock_irq(&lc->blocks_lock);
-               return -ENOMEM;
+               return DM_MAPIO_KILL;
        }
        INIT_LIST_HEAD(&block->list);
        pb->block = block;
@@ -639,7 +639,7 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
                        spin_lock_irq(&lc->blocks_lock);
                        lc->logging_enabled = false;
                        spin_unlock_irq(&lc->blocks_lock);
-                       return -ENOMEM;
+                       return DM_MAPIO_KILL;
                }
 
                src = kmap_atomic(bv.bv_page);
@@ -664,7 +664,8 @@ map_bio:
        return DM_MAPIO_REMAPPED;
 }
 
-static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int normal_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct log_writes_c *lc = ti->private;
        struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
@@ -686,7 +687,7 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio, int error)
                spin_unlock_irqrestore(&lc->blocks_lock, flags);
        }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 /*
index 3df056b73b6610927a57058394a5bbe08f1b3404..a7d2e0840cc5e97555f016f59a2b1ae1ae706b8b 100644 (file)
@@ -559,13 +559,13 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
                if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
                        return DM_MAPIO_REQUEUE;
                dm_report_EIO(m);
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        mpio->pgpath = pgpath;
        mpio->nr_bytes = nr_bytes;
 
-       bio->bi_error = 0;
+       bio->bi_status = 0;
        bio->bi_bdev = pgpath->path.dev->bdev;
        bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
 
@@ -621,11 +621,18 @@ static void process_queued_bios(struct work_struct *work)
        blk_start_plug(&plug);
        while ((bio = bio_list_pop(&bios))) {
                r = __multipath_map_bio(m, bio, get_mpio_from_bio(bio));
-               if (r < 0 || r == DM_MAPIO_REQUEUE) {
-                       bio->bi_error = r;
+               switch (r) {
+               case DM_MAPIO_KILL:
+                       bio->bi_status = BLK_STS_IOERR;
+                       bio_endio(bio);
+               case DM_MAPIO_REQUEUE:
+                       bio->bi_status = BLK_STS_DM_REQUEUE;
                        bio_endio(bio);
-               } else if (r == DM_MAPIO_REMAPPED)
+                       break;
+               case DM_MAPIO_REMAPPED:
                        generic_make_request(bio);
+                       break;
+               }
        }
        blk_finish_plug(&plug);
 }
@@ -1442,22 +1449,15 @@ static void activate_path_work(struct work_struct *work)
        activate_or_offline_path(pgpath);
 }
 
-static int noretry_error(int error)
+static int noretry_error(blk_status_t error)
 {
        switch (error) {
-       case -EBADE:
-               /*
-                * EBADE signals an reservation conflict.
-                * We shouldn't fail the path here as we can communicate with
-                * the target.  We should failover to the next path, but in
-                * doing so we might be causing a ping-pong between paths.
-                * So just return the reservation conflict error.
-                */
-       case -EOPNOTSUPP:
-       case -EREMOTEIO:
-       case -EILSEQ:
-       case -ENODATA:
-       case -ENOSPC:
+       case BLK_STS_NOTSUPP:
+       case BLK_STS_NOSPC:
+       case BLK_STS_TARGET:
+       case BLK_STS_NEXUS:
+       case BLK_STS_MEDIUM:
+       case BLK_STS_RESOURCE:
                return 1;
        }
 
@@ -1466,7 +1466,7 @@ static int noretry_error(int error)
 }
 
 static int multipath_end_io(struct dm_target *ti, struct request *clone,
-                           int error, union map_info *map_context)
+                           blk_status_t error, union map_info *map_context)
 {
        struct dm_mpath_io *mpio = get_mpio(map_context);
        struct pgpath *pgpath = mpio->pgpath;
@@ -1493,7 +1493,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
 
                if (atomic_read(&m->nr_valid_paths) == 0 &&
                    !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
-                       if (error == -EIO)
+                       if (error == BLK_STS_IOERR)
                                dm_report_EIO(m);
                        /* complete with the original error */
                        r = DM_ENDIO_DONE;
@@ -1510,24 +1510,26 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
        return r;
 }
 
-static int do_end_io_bio(struct multipath *m, struct bio *clone,
-                        int error, struct dm_mpath_io *mpio)
+static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
+               blk_status_t *error)
 {
+       struct multipath *m = ti->private;
+       struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
+       struct pgpath *pgpath = mpio->pgpath;
        unsigned long flags;
+       int r = DM_ENDIO_DONE;
 
-       if (!error)
-               return 0;       /* I/O complete */
-
-       if (noretry_error(error))
-               return error;
+       if (!*error || noretry_error(*error))
+               goto done;
 
-       if (mpio->pgpath)
-               fail_path(mpio->pgpath);
+       if (pgpath)
+               fail_path(pgpath);
 
        if (atomic_read(&m->nr_valid_paths) == 0 &&
            !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
                dm_report_EIO(m);
-               return -EIO;
+               *error = BLK_STS_IOERR;
+               goto done;
        }
 
        /* Queue for the daemon to resubmit */
@@ -1539,23 +1541,11 @@ static int do_end_io_bio(struct multipath *m, struct bio *clone,
        if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
                queue_work(kmultipathd, &m->process_queued_bios);
 
-       return DM_ENDIO_INCOMPLETE;
-}
-
-static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, int error)
-{
-       struct multipath *m = ti->private;
-       struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
-       struct pgpath *pgpath;
-       struct path_selector *ps;
-       int r;
-
-       BUG_ON(!mpio);
-
-       r = do_end_io_bio(m, clone, error, mpio);
-       pgpath = mpio->pgpath;
+       r = DM_ENDIO_INCOMPLETE;
+done:
        if (pgpath) {
-               ps = &pgpath->pg->ps;
+               struct path_selector *ps = &pgpath->pg->ps;
+
                if (ps->type->end_io)
                        ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
        }
index e61c45047c25a9ba2683c313fbc2151c9051b178..3ab584b686e0ca9c64223737384e44a55f0cff34 100644 (file)
@@ -490,9 +490,9 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
                 * If device is suspended, complete the bio.
                 */
                if (dm_noflush_suspending(ms->ti))
-                       bio->bi_error = DM_ENDIO_REQUEUE;
+                       bio->bi_status = BLK_STS_DM_REQUEUE;
                else
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
 
                bio_endio(bio);
                return;
@@ -626,7 +626,7 @@ static void write_callback(unsigned long error, void *context)
         * degrade the array.
         */
        if (bio_op(bio) == REQ_OP_DISCARD) {
-               bio->bi_error = -EOPNOTSUPP;
+               bio->bi_status = BLK_STS_NOTSUPP;
                bio_endio(bio);
                return;
        }
@@ -1207,14 +1207,14 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
 
        r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
        if (r < 0 && r != -EWOULDBLOCK)
-               return r;
+               return DM_MAPIO_KILL;
 
        /*
         * If region is not in-sync queue the bio.
         */
        if (!r || (r == -EWOULDBLOCK)) {
                if (bio->bi_opf & REQ_RAHEAD)
-                       return -EWOULDBLOCK;
+                       return DM_MAPIO_KILL;
 
                queue_bio(ms, bio, rw);
                return DM_MAPIO_SUBMITTED;
@@ -1226,7 +1226,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
         */
        m = choose_mirror(ms, bio->bi_iter.bi_sector);
        if (unlikely(!m))
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        dm_bio_record(&bio_record->details, bio);
        bio_record->m = m;
@@ -1236,7 +1236,8 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_REMAPPED;
 }
 
-static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int mirror_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        int rw = bio_data_dir(bio);
        struct mirror_set *ms = (struct mirror_set *) ti->private;
@@ -1252,16 +1253,16 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                if (!(bio->bi_opf & REQ_PREFLUSH) &&
                    bio_op(bio) != REQ_OP_DISCARD)
                        dm_rh_dec(ms->rh, bio_record->write_region);
-               return error;
+               return DM_ENDIO_DONE;
        }
 
-       if (error == -EOPNOTSUPP)
-               return error;
+       if (*error == BLK_STS_NOTSUPP)
+               return DM_ENDIO_DONE;
 
-       if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-               return error;
+       if (bio->bi_opf & REQ_RAHEAD)
+               return DM_ENDIO_DONE;
 
-       if (unlikely(error)) {
+       if (unlikely(*error)) {
                m = bio_record->m;
 
                DMERR("Mirror read failed from %s. Trying alternative device.",
@@ -1277,7 +1278,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                        bd = &bio_record->details;
 
                        dm_bio_restore(bd, bio);
-                       bio->bi_error = 0;
+                       bio->bi_status = 0;
 
                        queue_bio(ms, bio, rw);
                        return DM_ENDIO_INCOMPLETE;
@@ -1285,7 +1286,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
                DMERR("All replicated volumes dead, failing I/O");
        }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 static void mirror_presuspend(struct dm_target *ti)
index b639fa7246eebec191aa8a084333391ab0435dfa..fafd5326e5726c9843548ac3e7803997ef7008ec 100644 (file)
@@ -119,7 +119,7 @@ static void end_clone_bio(struct bio *clone)
        struct dm_rq_target_io *tio = info->tio;
        struct bio *bio = info->orig;
        unsigned int nr_bytes = info->orig->bi_iter.bi_size;
-       int error = clone->bi_error;
+       blk_status_t error = clone->bi_status;
 
        bio_put(clone);
 
@@ -158,7 +158,7 @@ static void end_clone_bio(struct bio *clone)
         * Do not use blk_end_request() here, because it may complete
         * the original request before the clone, and break the ordering.
         */
-       blk_update_request(tio->orig, 0, nr_bytes);
+       blk_update_request(tio->orig, BLK_STS_OK, nr_bytes);
 }
 
 static struct dm_rq_target_io *tio_from_request(struct request *rq)
@@ -216,7 +216,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
  * Must be called without clone's queue lock held,
  * see end_clone_request() for more details.
  */
-static void dm_end_request(struct request *clone, int error)
+static void dm_end_request(struct request *clone, blk_status_t error)
 {
        int rw = rq_data_dir(clone);
        struct dm_rq_target_io *tio = clone->end_io_data;
@@ -285,7 +285,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
        rq_completed(md, rw, false);
 }
 
-static void dm_done(struct request *clone, int error, bool mapped)
+static void dm_done(struct request *clone, blk_status_t error, bool mapped)
 {
        int r = DM_ENDIO_DONE;
        struct dm_rq_target_io *tio = clone->end_io_data;
@@ -298,7 +298,7 @@ static void dm_done(struct request *clone, int error, bool mapped)
                        r = rq_end_io(tio->ti, clone, error, &tio->info);
        }
 
-       if (unlikely(error == -EREMOTEIO)) {
+       if (unlikely(error == BLK_STS_TARGET)) {
                if (req_op(clone) == REQ_OP_WRITE_SAME &&
                    !clone->q->limits.max_write_same_sectors)
                        disable_write_same(tio->md);
@@ -358,7 +358,7 @@ static void dm_softirq_done(struct request *rq)
  * Complete the clone and the original request with the error status
  * through softirq context.
  */
-static void dm_complete_request(struct request *rq, int error)
+static void dm_complete_request(struct request *rq, blk_status_t error)
 {
        struct dm_rq_target_io *tio = tio_from_request(rq);
 
@@ -375,7 +375,7 @@ static void dm_complete_request(struct request *rq, int error)
  * Target's rq_end_io() function isn't called.
  * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
  */
-static void dm_kill_unmapped_request(struct request *rq, int error)
+static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
 {
        rq->rq_flags |= RQF_FAILED;
        dm_complete_request(rq, error);
@@ -384,7 +384,7 @@ static void dm_kill_unmapped_request(struct request *rq, int error)
 /*
  * Called with the clone's queue lock held (in the case of .request_fn)
  */
-static void end_clone_request(struct request *clone, int error)
+static void end_clone_request(struct request *clone, blk_status_t error)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
 
@@ -401,7 +401,7 @@ static void end_clone_request(struct request *clone, int error)
 
 static void dm_dispatch_clone_request(struct request *clone, struct request *rq)
 {
-       int r;
+       blk_status_t r;
 
        if (blk_queue_io_stat(clone->q))
                clone->rq_flags |= RQF_IO_STAT;
@@ -506,7 +506,7 @@ static int map_request(struct dm_rq_target_io *tio)
                break;
        case DM_MAPIO_KILL:
                /* The target wants to complete the I/O */
-               dm_kill_unmapped_request(rq, -EIO);
+               dm_kill_unmapped_request(rq, BLK_STS_IOERR);
                break;
        default:
                DMWARN("unimplemented target map return value: %d", r);
@@ -727,7 +727,7 @@ static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
        return __dm_rq_init_rq(set->driver_data, rq);
 }
 
-static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                          const struct blk_mq_queue_data *bd)
 {
        struct request *rq = bd->rq;
@@ -744,7 +744,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        }
 
        if (ti->type->busy && ti->type->busy(ti))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        dm_start_request(md, rq);
 
@@ -762,10 +762,10 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                rq_end_stats(md, rq);
                rq_completed(md, rq_data_dir(rq), false);
                blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static const struct blk_mq_ops dm_mq_ops = {
index f0020d21b95fcd52f9843ee4045cb11c3253a3dc..9813922e4fe583f64c16bdf5b9c51ab856e7c2b7 100644 (file)
@@ -24,7 +24,7 @@ struct dm_rq_target_io {
        struct dm_target *ti;
        struct request *orig, *clone;
        struct kthread_work work;
-       int error;
+       blk_status_t error;
        union map_info info;
        struct dm_stats_aux stats_aux;
        unsigned long duration_jiffies;
index e152d9817c81a078a1aec8e9243b7b3dc74be5d8..1ba41048b438b2fb3c470380387f6c16fea9bc55 100644 (file)
@@ -1590,7 +1590,7 @@ static void full_bio_end_io(struct bio *bio)
 {
        void *callback_data = bio->bi_private;
 
-       dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0);
+       dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
 }
 
 static void start_full_bio(struct dm_snap_pending_exception *pe,
@@ -1690,7 +1690,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
        /* Full snapshots are not usable */
        /* To get here the table must be live so s->active is always set. */
        if (!s->valid)
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        /* FIXME: should only take write lock if we need
         * to copy an exception */
@@ -1698,7 +1698,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
        if (!s->valid || (unlikely(s->snapshot_overflowed) &&
            bio_data_dir(bio) == WRITE)) {
-               r = -EIO;
+               r = DM_MAPIO_KILL;
                goto out_unlock;
        }
 
@@ -1723,7 +1723,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
 
                        if (!s->valid || s->snapshot_overflowed) {
                                free_pending_exception(pe);
-                               r = -EIO;
+                               r = DM_MAPIO_KILL;
                                goto out_unlock;
                        }
 
@@ -1741,7 +1741,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
                                        DMERR("Snapshot overflowed: Unable to allocate exception.");
                                } else
                                        __invalidate_snapshot(s, -ENOMEM);
-                               r = -EIO;
+                               r = DM_MAPIO_KILL;
                                goto out_unlock;
                        }
                }
@@ -1851,14 +1851,15 @@ out_unlock:
        return r;
 }
 
-static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        struct dm_snapshot *s = ti->private;
 
        if (is_bio_tracked(bio))
                stop_tracking_chunk(s, bio);
 
-       return 0;
+       return DM_ENDIO_DONE;
 }
 
 static void snapshot_merge_presuspend(struct dm_target *ti)
index 75152482f3ad068b71e17001129903c091a5628d..11621a0af8870e63533031c191edd2314c4fe6eb 100644 (file)
@@ -375,20 +375,21 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
        }
 }
 
-static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
+static int stripe_end_io(struct dm_target *ti, struct bio *bio,
+               blk_status_t *error)
 {
        unsigned i;
        char major_minor[16];
        struct stripe_c *sc = ti->private;
 
-       if (!error)
-               return 0; /* I/O complete */
+       if (!*error)
+               return DM_ENDIO_DONE; /* I/O complete */
 
-       if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
-               return error;
+       if (bio->bi_opf & REQ_RAHEAD)
+               return DM_ENDIO_DONE;
 
-       if (error == -EOPNOTSUPP)
-               return error;
+       if (*error == BLK_STS_NOTSUPP)
+               return DM_ENDIO_DONE;
 
        memset(major_minor, 0, sizeof(major_minor));
        sprintf(major_minor, "%d:%d",
@@ -409,7 +410,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
                                schedule_work(&sc->trigger_event);
                }
 
-       return error;
+       return DM_ENDIO_DONE;
 }
 
 static int stripe_iterate_devices(struct dm_target *ti,
index b242b750542fd8465e21b28cfcb4a0e4b5b3abc1..c0d7e60820c45d5c3bcfcf1ebaa0bfed1e530448 100644 (file)
@@ -128,7 +128,7 @@ static void io_err_dtr(struct dm_target *tt)
 
 static int io_err_map(struct dm_target *tt, struct bio *bio)
 {
-       return -EIO;
+       return DM_MAPIO_KILL;
 }
 
 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
index 17ad50daed08ef5022b8648ef2e8701208c85a9d..3490b300cbff1fa0d4d290c3abc747d9e8d68672 100644 (file)
@@ -383,8 +383,8 @@ static void end_discard(struct discard_op *op, int r)
         * Even if r is set, there could be sub discards in flight that we
         * need to wait for.
         */
-       if (r && !op->parent_bio->bi_error)
-               op->parent_bio->bi_error = r;
+       if (r && !op->parent_bio->bi_status)
+               op->parent_bio->bi_status = errno_to_blk_status(r);
        bio_endio(op->parent_bio);
 }
 
@@ -450,22 +450,20 @@ static void cell_release_no_holder(struct pool *pool,
 }
 
 static void cell_error_with_code(struct pool *pool,
-                                struct dm_bio_prison_cell *cell, int error_code)
+               struct dm_bio_prison_cell *cell, blk_status_t error_code)
 {
        dm_cell_error(pool->prison, cell, error_code);
        dm_bio_prison_free_cell(pool->prison, cell);
 }
 
-static int get_pool_io_error_code(struct pool *pool)
+static blk_status_t get_pool_io_error_code(struct pool *pool)
 {
-       return pool->out_of_data_space ? -ENOSPC : -EIO;
+       return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR;
 }
 
 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
 {
-       int error = get_pool_io_error_code(pool);
-
-       cell_error_with_code(pool, cell, error);
+       cell_error_with_code(pool, cell, get_pool_io_error_code(pool));
 }
 
 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
@@ -475,7 +473,7 @@ static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
 
 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
 {
-       cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
+       cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE);
 }
 
 /*----------------------------------------------------------------*/
@@ -555,17 +553,18 @@ static void __merge_bio_list(struct bio_list *bios, struct bio_list *master)
        bio_list_init(master);
 }
 
-static void error_bio_list(struct bio_list *bios, int error)
+static void error_bio_list(struct bio_list *bios, blk_status_t error)
 {
        struct bio *bio;
 
        while ((bio = bio_list_pop(bios))) {
-               bio->bi_error = error;
+               bio->bi_status = error;
                bio_endio(bio);
        }
 }
 
-static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, int error)
+static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master,
+               blk_status_t error)
 {
        struct bio_list bios;
        unsigned long flags;
@@ -608,11 +607,11 @@ static void requeue_io(struct thin_c *tc)
        __merge_bio_list(&bios, &tc->retry_on_resume_list);
        spin_unlock_irqrestore(&tc->lock, flags);
 
-       error_bio_list(&bios, DM_ENDIO_REQUEUE);
+       error_bio_list(&bios, BLK_STS_DM_REQUEUE);
        requeue_deferred_cells(tc);
 }
 
-static void error_retry_list_with_code(struct pool *pool, int error)
+static void error_retry_list_with_code(struct pool *pool, blk_status_t error)
 {
        struct thin_c *tc;
 
@@ -624,9 +623,7 @@ static void error_retry_list_with_code(struct pool *pool, int error)
 
 static void error_retry_list(struct pool *pool)
 {
-       int error = get_pool_io_error_code(pool);
-
-       error_retry_list_with_code(pool, error);
+       error_retry_list_with_code(pool, get_pool_io_error_code(pool));
 }
 
 /*
@@ -774,7 +771,7 @@ struct dm_thin_new_mapping {
         */
        atomic_t prepare_actions;
 
-       int err;
+       blk_status_t status;
        struct thin_c *tc;
        dm_block_t virt_begin, virt_end;
        dm_block_t data_block;
@@ -814,7 +811,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 {
        struct dm_thin_new_mapping *m = context;
 
-       m->err = read_err || write_err ? -EIO : 0;
+       m->status = read_err || write_err ? BLK_STS_IOERR : 0;
        complete_mapping_preparation(m);
 }
 
@@ -825,7 +822,7 @@ static void overwrite_endio(struct bio *bio)
 
        bio->bi_end_io = m->saved_bi_end_io;
 
-       m->err = bio->bi_error;
+       m->status = bio->bi_status;
        complete_mapping_preparation(m);
 }
 
@@ -925,7 +922,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
        struct bio *bio = m->bio;
        int r;
 
-       if (m->err) {
+       if (m->status) {
                cell_error(pool, m->cell);
                goto out;
        }
@@ -1495,7 +1492,7 @@ static void retry_on_resume(struct bio *bio)
        spin_unlock_irqrestore(&tc->lock, flags);
 }
 
-static int should_error_unserviceable_bio(struct pool *pool)
+static blk_status_t should_error_unserviceable_bio(struct pool *pool)
 {
        enum pool_mode m = get_pool_mode(pool);
 
@@ -1503,27 +1500,27 @@ static int should_error_unserviceable_bio(struct pool *pool)
        case PM_WRITE:
                /* Shouldn't get here */
                DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
-               return -EIO;
+               return BLK_STS_IOERR;
 
        case PM_OUT_OF_DATA_SPACE:
-               return pool->pf.error_if_no_space ? -ENOSPC : 0;
+               return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
 
        case PM_READ_ONLY:
        case PM_FAIL:
-               return -EIO;
+               return BLK_STS_IOERR;
        default:
                /* Shouldn't get here */
                DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
-               return -EIO;
+               return BLK_STS_IOERR;
        }
 }
 
 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
 {
-       int error = should_error_unserviceable_bio(pool);
+       blk_status_t error = should_error_unserviceable_bio(pool);
 
        if (error) {
-               bio->bi_error = error;
+               bio->bi_status = error;
                bio_endio(bio);
        } else
                retry_on_resume(bio);
@@ -1533,7 +1530,7 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
 {
        struct bio *bio;
        struct bio_list bios;
-       int error;
+       blk_status_t error;
 
        error = should_error_unserviceable_bio(pool);
        if (error) {
@@ -2071,7 +2068,8 @@ static void process_thin_deferred_bios(struct thin_c *tc)
        unsigned count = 0;
 
        if (tc->requeue_mode) {
-               error_thin_bio_list(tc, &tc->deferred_bio_list, DM_ENDIO_REQUEUE);
+               error_thin_bio_list(tc, &tc->deferred_bio_list,
+                               BLK_STS_DM_REQUEUE);
                return;
        }
 
@@ -2322,7 +2320,7 @@ static void do_no_space_timeout(struct work_struct *ws)
        if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
                pool->pf.error_if_no_space = true;
                notify_of_pool_mode_change_to_oods(pool);
-               error_retry_list_with_code(pool, -ENOSPC);
+               error_retry_list_with_code(pool, BLK_STS_NOSPC);
        }
 }
 
@@ -2624,7 +2622,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
        thin_hook_bio(tc, bio);
 
        if (tc->requeue_mode) {
-               bio->bi_error = DM_ENDIO_REQUEUE;
+               bio->bi_status = BLK_STS_DM_REQUEUE;
                bio_endio(bio);
                return DM_MAPIO_SUBMITTED;
        }
@@ -4177,7 +4175,8 @@ static int thin_map(struct dm_target *ti, struct bio *bio)
        return thin_bio_map(ti, bio);
 }
 
-static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
+static int thin_endio(struct dm_target *ti, struct bio *bio,
+               blk_status_t *err)
 {
        unsigned long flags;
        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -4212,7 +4211,7 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
        if (h->cell)
                cell_defer_no_holder(h->tc, h->cell);
 
-       return 0;
+       return DM_ENDIO_DONE;
 }
 
 static void thin_presuspend(struct dm_target *ti)
index 1ec9b2c51c076d99ba6003f90eae608d9c9e35af..b46705ebf01f6d55cfeb0cff8327d767d4fc7572 100644 (file)
@@ -538,13 +538,13 @@ static int verity_verify_io(struct dm_verity_io *io)
 /*
  * End one "io" structure with a given error.
  */
-static void verity_finish_io(struct dm_verity_io *io, int error)
+static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
 {
        struct dm_verity *v = io->v;
        struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
 
        bio->bi_end_io = io->orig_bi_end_io;
-       bio->bi_error = error;
+       bio->bi_status = status;
 
        verity_fec_finish_io(io);
 
@@ -555,15 +555,15 @@ static void verity_work(struct work_struct *w)
 {
        struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
 
-       verity_finish_io(io, verity_verify_io(io));
+       verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
 }
 
 static void verity_end_io(struct bio *bio)
 {
        struct dm_verity_io *io = bio->bi_private;
 
-       if (bio->bi_error && !verity_fec_is_enabled(io->v)) {
-               verity_finish_io(io, bio->bi_error);
+       if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
+               verity_finish_io(io, bio->bi_status);
                return;
        }
 
@@ -643,17 +643,17 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
            ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
                DMERR_LIMIT("unaligned io");
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (bio_end_sector(bio) >>
            (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
                DMERR_LIMIT("io out of range");
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        if (bio_data_dir(bio) == WRITE)
-               return -EIO;
+               return DM_MAPIO_KILL;
 
        io = dm_per_bio_data(bio, ti->per_io_data_size);
        io->v = v;
index b616f11d84735a978b13e2a6392921bafc66b58b..b65ca8dcfbdc7f51ab4004ee3b8cb43a36dc8f97 100644 (file)
@@ -39,7 +39,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
        case REQ_OP_READ:
                if (bio->bi_opf & REQ_RAHEAD) {
                        /* readahead of null bytes only wastes buffer cache */
-                       return -EIO;
+                       return DM_MAPIO_KILL;
                }
                zero_fill_bio(bio);
                break;
@@ -47,7 +47,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
                /* writes get silently dropped */
                break;
        default:
-               return -EIO;
+               return DM_MAPIO_KILL;
        }
 
        bio_endio(bio);
index 37ccd73c79ecf2eeb4f33b5bc597f88ca5750d4b..c4b74f7398ac044eb880d623d01c0c7d1219c857 100644 (file)
@@ -63,7 +63,7 @@ static struct workqueue_struct *deferred_remove_workqueue;
  */
 struct dm_io {
        struct mapped_device *md;
-       int error;
+       blk_status_t status;
        atomic_t io_count;
        struct bio *bio;
        unsigned long start_time;
@@ -768,23 +768,24 @@ static int __noflush_suspending(struct mapped_device *md)
  * Decrements the number of outstanding ios that a bio has been
  * cloned into, completing the original io if necc.
  */
-static void dec_pending(struct dm_io *io, int error)
+static void dec_pending(struct dm_io *io, blk_status_t error)
 {
        unsigned long flags;
-       int io_error;
+       blk_status_t io_error;
        struct bio *bio;
        struct mapped_device *md = io->md;
 
        /* Push-back supersedes any I/O errors */
        if (unlikely(error)) {
                spin_lock_irqsave(&io->endio_lock, flags);
-               if (!(io->error > 0 && __noflush_suspending(md)))
-                       io->error = error;
+               if (!(io->status == BLK_STS_DM_REQUEUE &&
+                               __noflush_suspending(md)))
+                       io->status = error;
                spin_unlock_irqrestore(&io->endio_lock, flags);
        }
 
        if (atomic_dec_and_test(&io->io_count)) {
-               if (io->error == DM_ENDIO_REQUEUE) {
+               if (io->status == BLK_STS_DM_REQUEUE) {
                        /*
                         * Target requested pushing back the I/O.
                         */
@@ -793,16 +794,16 @@ static void dec_pending(struct dm_io *io, int error)
                                bio_list_add_head(&md->deferred, io->bio);
                        else
                                /* noflush suspend was interrupted. */
-                               io->error = -EIO;
+                               io->status = BLK_STS_IOERR;
                        spin_unlock_irqrestore(&md->deferred_lock, flags);
                }
 
-               io_error = io->error;
+               io_error = io->status;
                bio = io->bio;
                end_io_acct(io);
                free_io(md, io);
 
-               if (io_error == DM_ENDIO_REQUEUE)
+               if (io_error == BLK_STS_DM_REQUEUE)
                        return;
 
                if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
@@ -814,7 +815,7 @@ static void dec_pending(struct dm_io *io, int error)
                        queue_io(md, bio);
                } else {
                        /* done with normal IO or empty flush */
-                       bio->bi_error = io_error;
+                       bio->bi_status = io_error;
                        bio_endio(bio);
                }
        }
@@ -838,31 +839,13 @@ void disable_write_zeroes(struct mapped_device *md)
 
 static void clone_endio(struct bio *bio)
 {
-       int error = bio->bi_error;
-       int r = error;
+       blk_status_t error = bio->bi_status;
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
        struct dm_io *io = tio->io;
        struct mapped_device *md = tio->io->md;
        dm_endio_fn endio = tio->ti->type->end_io;
 
-       if (endio) {
-               r = endio(tio->ti, bio, error);
-               if (r < 0 || r == DM_ENDIO_REQUEUE)
-                       /*
-                        * error and requeue request are handled
-                        * in dec_pending().
-                        */
-                       error = r;
-               else if (r == DM_ENDIO_INCOMPLETE)
-                       /* The target will handle the io */
-                       return;
-               else if (r) {
-                       DMWARN("unimplemented target endio return value: %d", r);
-                       BUG();
-               }
-       }
-
-       if (unlikely(r == -EREMOTEIO)) {
+       if (unlikely(error == BLK_STS_TARGET)) {
                if (bio_op(bio) == REQ_OP_WRITE_SAME &&
                    !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors)
                        disable_write_same(md);
@@ -871,6 +854,23 @@ static void clone_endio(struct bio *bio)
                        disable_write_zeroes(md);
        }
 
+       if (endio) {
+               int r = endio(tio->ti, bio, &error);
+               switch (r) {
+               case DM_ENDIO_REQUEUE:
+                       error = BLK_STS_DM_REQUEUE;
+                       /*FALLTHRU*/
+               case DM_ENDIO_DONE:
+                       break;
+               case DM_ENDIO_INCOMPLETE:
+                       /* The target will handle the io */
+                       return;
+               default:
+                       DMWARN("unimplemented target endio return value: %d", r);
+                       BUG();
+               }
+       }
+
        free_tio(tio);
        dec_pending(io, error);
 }
@@ -1084,18 +1084,24 @@ static void __map_bio(struct dm_target_io *tio)
        r = ti->type->map(ti, clone);
        dm_offload_end(&o);
 
-       if (r == DM_MAPIO_REMAPPED) {
+       switch (r) {
+       case DM_MAPIO_SUBMITTED:
+               break;
+       case DM_MAPIO_REMAPPED:
                /* the bio has been remapped so dispatch it */
-
                trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
                                      tio->io->bio->bi_bdev->bd_dev, sector);
-
                generic_make_request(clone);
-       } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
-               /* error the io and bail out, or requeue it if needed */
-               dec_pending(tio->io, r);
+               break;
+       case DM_MAPIO_KILL:
+               dec_pending(tio->io, BLK_STS_IOERR);
+               free_tio(tio);
+               break;
+       case DM_MAPIO_REQUEUE:
+               dec_pending(tio->io, BLK_STS_DM_REQUEUE);
                free_tio(tio);
-       } else if (r != DM_MAPIO_SUBMITTED) {
+               break;
+       default:
                DMWARN("unimplemented target map return value: %d", r);
                BUG();
        }
@@ -1360,7 +1366,7 @@ static void __split_and_process_bio(struct mapped_device *md,
        ci.map = map;
        ci.md = md;
        ci.io = alloc_io(md);
-       ci.io->error = 0;
+       ci.io->status = 0;
        atomic_set(&ci.io->io_count, 1);
        ci.io->bio = bio;
        ci.io->md = md;
index a50302e42cb5fd34fe4ee5c446eb75b5ddcd704b..6d493b54d56c4d68f74c46e0c7598694d00715f1 100644 (file)
@@ -273,7 +273,7 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
        }
        if (mddev->ro == 1 && unlikely(rw == WRITE)) {
                if (bio_sectors(bio) != 0)
-                       bio->bi_error = -EROFS;
+                       bio->bi_status = BLK_STS_IOERR;
                bio_endio(bio);
                return BLK_QC_T_NONE;
        }
@@ -719,8 +719,8 @@ static void super_written(struct bio *bio)
        struct md_rdev *rdev = bio->bi_private;
        struct mddev *mddev = rdev->mddev;
 
-       if (bio->bi_error) {
-               pr_err("md: super_written gets error=%d\n", bio->bi_error);
+       if (bio->bi_status) {
+               pr_err("md: super_written gets error=%d\n", bio->bi_status);
                md_error(mddev, rdev);
                if (!test_bit(Faulty, &rdev->flags)
                    && (bio->bi_opf & MD_FAILFAST)) {
@@ -801,7 +801,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
 
        submit_bio_wait(bio);
 
-       ret = !bio->bi_error;
+       ret = !bio->bi_status;
        bio_put(bio);
        return ret;
 }
@@ -5174,6 +5174,18 @@ static void mddev_delayed_delete(struct work_struct *ws)
 
 static void no_op(struct percpu_ref *r) {}
 
+int mddev_init_writes_pending(struct mddev *mddev)
+{
+       if (mddev->writes_pending.percpu_count_ptr)
+               return 0;
+       if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
+               return -ENOMEM;
+       /* We want to start with the refcount at zero */
+       percpu_ref_put(&mddev->writes_pending);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mddev_init_writes_pending);
+
 static int md_alloc(dev_t dev, char *name)
 {
        /*
@@ -5239,10 +5251,6 @@ static int md_alloc(dev_t dev, char *name)
        blk_queue_make_request(mddev->queue, md_make_request);
        blk_set_stacking_limits(&mddev->queue->limits);
 
-       if (percpu_ref_init(&mddev->writes_pending, no_op, 0, GFP_KERNEL) < 0)
-               goto abort;
-       /* We want to start with the refcount at zero */
-       percpu_ref_put(&mddev->writes_pending);
        disk = alloc_disk(1 << shift);
        if (!disk) {
                blk_cleanup_queue(mddev->queue);
index 11f15146ce5177de0468c706a5f82a037b42c132..0fa1de42c42bcb328276a42fc56809d53217a285 100644 (file)
@@ -648,6 +648,7 @@ extern void md_unregister_thread(struct md_thread **threadp);
 extern void md_wakeup_thread(struct md_thread *thread);
 extern void md_check_recovery(struct mddev *mddev);
 extern void md_reap_sync_thread(struct mddev *mddev);
+extern int mddev_init_writes_pending(struct mddev *mddev);
 extern void md_write_start(struct mddev *mddev, struct bio *bi);
 extern void md_write_inc(struct mddev *mddev, struct bio *bi);
 extern void md_write_end(struct mddev *mddev);
index e95d521d93e9b912caa561121593b2b7e54dd3a6..68d036e640418a6647db4d552edf2ddd3d1a5f80 100644 (file)
@@ -73,12 +73,12 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
  * operation and are ready to return a success/failure code to the buffer
  * cache layer.
  */
-static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
+static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
 {
        struct bio *bio = mp_bh->master_bio;
        struct mpconf *conf = mp_bh->mddev->private;
 
-       bio->bi_error = err;
+       bio->bi_status = status;
        bio_endio(bio);
        mempool_free(mp_bh, conf->pool);
 }
@@ -89,7 +89,7 @@ static void multipath_end_request(struct bio *bio)
        struct mpconf *conf = mp_bh->mddev->private;
        struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                multipath_end_bh_io(mp_bh, 0);
        else if (!(bio->bi_opf & REQ_RAHEAD)) {
                /*
@@ -102,7 +102,7 @@ static void multipath_end_request(struct bio *bio)
                        (unsigned long long)bio->bi_iter.bi_sector);
                multipath_reschedule_retry(mp_bh);
        } else
-               multipath_end_bh_io(mp_bh, bio->bi_error);
+               multipath_end_bh_io(mp_bh, bio->bi_status);
        rdev_dec_pending(rdev, conf->mddev);
 }
 
@@ -347,7 +347,7 @@ static void multipathd(struct md_thread *thread)
                        pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
                               bdevname(bio->bi_bdev,b),
                               (unsigned long long)bio->bi_iter.bi_sector);
-                       multipath_end_bh_io(mp_bh, -EIO);
+                       multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
                } else {
                        pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
                               bdevname(bio->bi_bdev,b),
index af5056d568788a53f6c3a2456a353cba3bbfe35a..a1a3cf0293df23fd102faa753a09de76b32893d3 100644 (file)
@@ -277,7 +277,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
        struct r1conf *conf = r1_bio->mddev->private;
 
        if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
 
        bio_endio(bio);
        /*
@@ -335,7 +335,7 @@ static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
 
 static void raid1_end_read_request(struct bio *bio)
 {
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct r1bio *r1_bio = bio->bi_private;
        struct r1conf *conf = r1_bio->mddev->private;
        struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
@@ -426,12 +426,12 @@ static void raid1_end_write_request(struct bio *bio)
        struct md_rdev *rdev = conf->mirrors[mirror].rdev;
        bool discard_error;
 
-       discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+       discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
        /*
         * 'one mirror IO has finished' event handler:
         */
-       if (bio->bi_error && !discard_error) {
+       if (bio->bi_status && !discard_error) {
                set_bit(WriteErrorSeen, &rdev->flags);
                if (!test_and_set_bit(WantReplacement, &rdev->flags))
                        set_bit(MD_RECOVERY_NEEDED, &
@@ -802,7 +802,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
                bio->bi_next = NULL;
                bio->bi_bdev = rdev->bdev;
                if (test_bit(Faulty, &rdev->flags)) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
                                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1856,7 +1856,7 @@ static void end_sync_read(struct bio *bio)
         * or re-read if the read failed.
         * We don't do much here, just schedule handling by raid1d
         */
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                set_bit(R1BIO_Uptodate, &r1_bio->state);
 
        if (atomic_dec_and_test(&r1_bio->remaining))
@@ -1865,7 +1865,7 @@ static void end_sync_read(struct bio *bio)
 
 static void end_sync_write(struct bio *bio)
 {
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct r1bio *r1_bio = get_resync_r1bio(bio);
        struct mddev *mddev = r1_bio->mddev;
        struct r1conf *conf = mddev->private;
@@ -2058,7 +2058,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
                idx ++;
        }
        set_bit(R1BIO_Uptodate, &r1_bio->state);
-       bio->bi_error = 0;
+       bio->bi_status = 0;
        return 1;
 }
 
@@ -2082,16 +2082,16 @@ static void process_checks(struct r1bio *r1_bio)
        for (i = 0; i < conf->raid_disks * 2; i++) {
                int j;
                int size;
-               int error;
+               blk_status_t status;
                struct bio_vec *bi;
                struct bio *b = r1_bio->bios[i];
                struct resync_pages *rp = get_resync_pages(b);
                if (b->bi_end_io != end_sync_read)
                        continue;
                /* fixup the bio for reuse, but preserve errno */
-               error = b->bi_error;
+               status = b->bi_status;
                bio_reset(b);
-               b->bi_error = error;
+               b->bi_status = status;
                b->bi_vcnt = vcnt;
                b->bi_iter.bi_size = r1_bio->sectors << 9;
                b->bi_iter.bi_sector = r1_bio->sector +
@@ -2113,7 +2113,7 @@ static void process_checks(struct r1bio *r1_bio)
        }
        for (primary = 0; primary < conf->raid_disks * 2; primary++)
                if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
-                   !r1_bio->bios[primary]->bi_error) {
+                   !r1_bio->bios[primary]->bi_status) {
                        r1_bio->bios[primary]->bi_end_io = NULL;
                        rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
                        break;
@@ -2123,7 +2123,7 @@ static void process_checks(struct r1bio *r1_bio)
                int j;
                struct bio *pbio = r1_bio->bios[primary];
                struct bio *sbio = r1_bio->bios[i];
-               int error = sbio->bi_error;
+               blk_status_t status = sbio->bi_status;
                struct page **ppages = get_resync_pages(pbio)->pages;
                struct page **spages = get_resync_pages(sbio)->pages;
                struct bio_vec *bi;
@@ -2132,12 +2132,12 @@ static void process_checks(struct r1bio *r1_bio)
                if (sbio->bi_end_io != end_sync_read)
                        continue;
                /* Now we can 'fixup' the error value */
-               sbio->bi_error = 0;
+               sbio->bi_status = 0;
 
                bio_for_each_segment_all(bi, sbio, j)
                        page_len[j] = bi->bv_len;
 
-               if (!error) {
+               if (!status) {
                        for (j = vcnt; j-- ; ) {
                                if (memcmp(page_address(ppages[j]),
                                           page_address(spages[j]),
@@ -2149,7 +2149,7 @@ static void process_checks(struct r1bio *r1_bio)
                if (j >= 0)
                        atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
                if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
-                             && !error)) {
+                             && !status)) {
                        /* No need to write to this device. */
                        sbio->bi_end_io = NULL;
                        rdev_dec_pending(conf->mirrors[i].rdev, mddev);
@@ -2400,11 +2400,11 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
                struct bio *bio = r1_bio->bios[m];
                if (bio->bi_end_io == NULL)
                        continue;
-               if (!bio->bi_error &&
+               if (!bio->bi_status &&
                    test_bit(R1BIO_MadeGood, &r1_bio->state)) {
                        rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
                }
-               if (bio->bi_error &&
+               if (bio->bi_status &&
                    test_bit(R1BIO_WriteError, &r1_bio->state)) {
                        if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
                                md_error(conf->mddev, rdev);
@@ -3063,6 +3063,8 @@ static int raid1_run(struct mddev *mddev)
                        mdname(mddev));
                return -EIO;
        }
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
        /*
         * copy the already verified devices into our private RAID1
         * bookkeeping area. [whatever we allocate in run(),
index 4343d7ff9916bee9a9a399572c2bd3313723fa3a..3178273a7253883fbe14ec7f97045b604c8a73db 100644 (file)
@@ -336,7 +336,7 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
        struct r10conf *conf = r10_bio->mddev->private;
 
        if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
 
        bio_endio(bio);
        /*
@@ -389,7 +389,7 @@ static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
 
 static void raid10_end_read_request(struct bio *bio)
 {
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct r10bio *r10_bio = bio->bi_private;
        int slot, dev;
        struct md_rdev *rdev;
@@ -477,7 +477,7 @@ static void raid10_end_write_request(struct bio *bio)
        struct bio *to_put = NULL;
        bool discard_error;
 
-       discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
+       discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
 
        dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
 
@@ -491,7 +491,7 @@ static void raid10_end_write_request(struct bio *bio)
        /*
         * this branch is our 'one mirror IO has finished' event handler:
         */
-       if (bio->bi_error && !discard_error) {
+       if (bio->bi_status && !discard_error) {
                if (repl)
                        /* Never record new bad blocks to replacement,
                         * just fail it.
@@ -913,7 +913,7 @@ static void flush_pending_writes(struct r10conf *conf)
                        bio->bi_next = NULL;
                        bio->bi_bdev = rdev->bdev;
                        if (test_bit(Faulty, &rdev->flags)) {
-                               bio->bi_error = -EIO;
+                               bio->bi_status = BLK_STS_IOERR;
                                bio_endio(bio);
                        } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
                                            !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1098,7 +1098,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio->bi_next = NULL;
                bio->bi_bdev = rdev->bdev;
                if (test_bit(Faulty, &rdev->flags)) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                } else if (unlikely((bio_op(bio) ==  REQ_OP_DISCARD) &&
                                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
@@ -1888,7 +1888,7 @@ static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
 {
        struct r10conf *conf = r10_bio->mddev->private;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                set_bit(R10BIO_Uptodate, &r10_bio->state);
        else
                /* The write handler will notice the lack of
@@ -1972,7 +1972,7 @@ static void end_sync_write(struct bio *bio)
        else
                rdev = conf->mirrors[d].rdev;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                if (repl)
                        md_error(mddev, rdev);
                else {
@@ -2021,7 +2021,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
 
        /* find the first device with a block */
        for (i=0; i<conf->copies; i++)
-               if (!r10_bio->devs[i].bio->bi_error)
+               if (!r10_bio->devs[i].bio->bi_status)
                        break;
 
        if (i == conf->copies)
@@ -2050,7 +2050,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
                tpages = get_resync_pages(tbio)->pages;
                d = r10_bio->devs[i].devnum;
                rdev = conf->mirrors[d].rdev;
-               if (!r10_bio->devs[i].bio->bi_error) {
+               if (!r10_bio->devs[i].bio->bi_status) {
                        /* We know that the bi_io_vec layout is the same for
                         * both 'first' and 'i', so we just compare them.
                         * All vec entries are PAGE_SIZE;
@@ -2633,7 +2633,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                        rdev = conf->mirrors[dev].rdev;
                        if (r10_bio->devs[m].bio == NULL)
                                continue;
-                       if (!r10_bio->devs[m].bio->bi_error) {
+                       if (!r10_bio->devs[m].bio->bi_status) {
                                rdev_clear_badblocks(
                                        rdev,
                                        r10_bio->devs[m].addr,
@@ -2649,7 +2649,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                        if (r10_bio->devs[m].repl_bio == NULL)
                                continue;
 
-                       if (!r10_bio->devs[m].repl_bio->bi_error) {
+                       if (!r10_bio->devs[m].repl_bio->bi_status) {
                                rdev_clear_badblocks(
                                        rdev,
                                        r10_bio->devs[m].addr,
@@ -2675,7 +2675,7 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
                                        r10_bio->devs[m].addr,
                                        r10_bio->sectors, 0);
                                rdev_dec_pending(rdev, conf->mddev);
-                       } else if (bio != NULL && bio->bi_error) {
+                       } else if (bio != NULL && bio->bi_status) {
                                fail = true;
                                if (!narrow_write_error(r10_bio, m)) {
                                        md_error(conf->mddev, rdev);
@@ -3267,7 +3267,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                                r10_bio->devs[i].repl_bio->bi_end_io = NULL;
 
                        bio = r10_bio->devs[i].bio;
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        rcu_read_lock();
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
@@ -3309,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 
                        /* Need to set up for writing to the replacement */
                        bio = r10_bio->devs[i].repl_bio;
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
 
                        sector = r10_bio->devs[i].addr;
                        bio->bi_next = biolist;
@@ -3375,7 +3375,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
 
                if (bio->bi_end_io == end_sync_read) {
                        md_sync_acct(bio->bi_bdev, nr_sectors);
-                       bio->bi_error = 0;
+                       bio->bi_status = 0;
                        generic_make_request(bio);
                }
        }
@@ -3611,6 +3611,9 @@ static int raid10_run(struct mddev *mddev)
        int first = 1;
        bool discard_supported = false;
 
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
+
        if (mddev->private == NULL) {
                conf = setup_conf(mddev);
                if (IS_ERR(conf))
@@ -4394,7 +4397,7 @@ read_more:
        read_bio->bi_end_io = end_reshape_read;
        bio_set_op_attrs(read_bio, REQ_OP_READ, 0);
        read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
-       read_bio->bi_error = 0;
+       read_bio->bi_status = 0;
        read_bio->bi_vcnt = 0;
        read_bio->bi_iter.bi_size = 0;
        r10_bio->master_bio = read_bio;
@@ -4638,7 +4641,7 @@ static void end_reshape_write(struct bio *bio)
                rdev = conf->mirrors[d].rdev;
        }
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                /* FIXME should record badblock */
                md_error(mddev, rdev);
        }
index 0a7af8b0a80a031a99a7af1742e2d64e6df0d106..3746c9c27e546ea15d992b6742d753b414a8c25b 100644 (file)
@@ -572,7 +572,7 @@ static void r5l_log_endio(struct bio *bio)
        struct r5l_log *log = io->log;
        unsigned long flags;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                md_error(log->rdev->mddev, log->rdev);
 
        bio_put(bio);
@@ -1247,7 +1247,7 @@ static void r5l_log_flush_endio(struct bio *bio)
        unsigned long flags;
        struct r5l_io_unit *io;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                md_error(log->rdev->mddev, log->rdev);
 
        spin_lock_irqsave(&log->io_list_lock, flags);
index ccce92e68d7fa5d8258bb7f2ca2bfa1bcd545709..e709ada0bf092290183372cedab3a28a5884a65d 100644 (file)
@@ -397,7 +397,7 @@ static void ppl_log_endio(struct bio *bio)
 
        pr_debug("%s: seq: %llu\n", __func__, io->seq);
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                md_error(ppl_conf->mddev, log->rdev);
 
        list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
index 722064689e822f3b876411f076921e244abbec2f..7171bfd48223dae8d3b5348ec775c86906b9c564 100644 (file)
@@ -2476,7 +2476,7 @@ static void raid5_end_read_request(struct bio * bi)
 
        pr_debug("end_read_request %llu/%d, count: %d, error %d.\n",
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-               bi->bi_error);
+               bi->bi_status);
        if (i == disks) {
                bio_reset(bi);
                BUG();
@@ -2496,7 +2496,7 @@ static void raid5_end_read_request(struct bio * bi)
                s = sh->sector + rdev->new_data_offset;
        else
                s = sh->sector + rdev->data_offset;
-       if (!bi->bi_error) {
+       if (!bi->bi_status) {
                set_bit(R5_UPTODATE, &sh->dev[i].flags);
                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
                        /* Note that this cannot happen on a
@@ -2613,7 +2613,7 @@ static void raid5_end_write_request(struct bio *bi)
        }
        pr_debug("end_write_request %llu/%d, count %d, error: %d.\n",
                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
-               bi->bi_error);
+               bi->bi_status);
        if (i == disks) {
                bio_reset(bi);
                BUG();
@@ -2621,14 +2621,14 @@ static void raid5_end_write_request(struct bio *bi)
        }
 
        if (replacement) {
-               if (bi->bi_error)
+               if (bi->bi_status)
                        md_error(conf->mddev, rdev);
                else if (is_badblock(rdev, sh->sector,
                                     STRIPE_SECTORS,
                                     &first_bad, &bad_sectors))
                        set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
        } else {
-               if (bi->bi_error) {
+               if (bi->bi_status) {
                        set_bit(STRIPE_DEGRADED, &sh->state);
                        set_bit(WriteErrorSeen, &rdev->flags);
                        set_bit(R5_WriteError, &sh->dev[i].flags);
@@ -2649,7 +2649,7 @@ static void raid5_end_write_request(struct bio *bi)
        }
        rdev_dec_pending(rdev, conf->mddev);
 
-       if (sh->batch_head && bi->bi_error && !replacement)
+       if (sh->batch_head && bi->bi_status && !replacement)
                set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
        bio_reset(bi);
@@ -3381,7 +3381,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                        sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
 
-                       bi->bi_error = -EIO;
+                       bi->bi_status = BLK_STS_IOERR;
                        md_write_end(conf->mddev);
                        bio_endio(bi);
                        bi = nextbi;
@@ -3403,7 +3403,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                       sh->dev[i].sector + STRIPE_SECTORS) {
                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
 
-                       bi->bi_error = -EIO;
+                       bi->bi_status = BLK_STS_IOERR;
                        md_write_end(conf->mddev);
                        bio_endio(bi);
                        bi = bi2;
@@ -3429,7 +3429,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
                                struct bio *nextbi =
                                        r5_next_bio(bi, sh->dev[i].sector);
 
-                               bi->bi_error = -EIO;
+                               bi->bi_status = BLK_STS_IOERR;
                                bio_endio(bi);
                                bi = nextbi;
                        }
@@ -5154,7 +5154,7 @@ static void raid5_align_endio(struct bio *bi)
        struct mddev *mddev;
        struct r5conf *conf;
        struct md_rdev *rdev;
-       int error = bi->bi_error;
+       blk_status_t error = bi->bi_status;
 
        bio_put(bi);
 
@@ -5731,7 +5731,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
                        release_stripe_plug(mddev, sh);
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
-                       bi->bi_error = -EIO;
+                       bi->bi_status = BLK_STS_IOERR;
                        break;
                }
        }
@@ -7118,6 +7118,9 @@ static int raid5_run(struct mddev *mddev)
        long long min_offset_diff = 0;
        int first = 1;
 
+       if (mddev_init_writes_pending(mddev) < 0)
+               return -ENOMEM;
+
        if (mddev->recovery_cp != MaxSector)
                pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
                          mdname(mddev));
index b72edd27f880fbe99641cd36b44005c6ad9252c6..55d9c2b82b7eab11268d88f7d8e5cf586f910985 100644 (file)
@@ -2,6 +2,12 @@
 # Multimedia device configuration
 #
 
+config CEC_CORE
+       tristate
+
+config CEC_NOTIFIER
+       bool
+
 menuconfig MEDIA_SUPPORT
        tristate "Multimedia support"
        depends on HAS_IOMEM
index 523fea3648ad71749009fc9f7d0f543d36734128..044503aa8801744785da9c637bda4c020bfac766 100644 (file)
@@ -4,8 +4,6 @@
 
 media-objs     := media-device.o media-devnode.o media-entity.o
 
-obj-$(CONFIG_CEC_CORE) += cec/
-
 #
 # I2C drivers should come before other drivers, otherwise they'll fail
 # when compiled as builtin drivers
@@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE)  += dvb-core/
 # There are both core and drivers at RC subtree - merge before drivers
 obj-y += rc/
 
+obj-$(CONFIG_CEC_CORE) += cec/
+
 #
 # Finally, merge the drivers that require the core
 #
index f944d93e3167f4e338a1e9cb607af32311136b50..4e25a950ae6f5af73c7bc7bd305a51368ef7d52b 100644 (file)
@@ -1,19 +1,5 @@
-config CEC_CORE
-       tristate
-       depends on MEDIA_CEC_SUPPORT
-       default y
-
-config MEDIA_CEC_NOTIFIER
-       bool
-
 config MEDIA_CEC_RC
        bool "HDMI CEC RC integration"
        depends on CEC_CORE && RC_CORE
        ---help---
          Pass on CEC remote control messages to the RC framework.
-
-config MEDIA_CEC_DEBUG
-       bool "HDMI CEC debugfs interface"
-       depends on CEC_CORE && DEBUG_FS
-       ---help---
-         Turns on the DebugFS interface for CEC devices.
index 402a6c62a3e8b9bb4857ac553e1b9a14c6babc16..eaf408e646697adcb8cc95b95c8425574715bdee 100644 (file)
@@ -1,6 +1,6 @@
 cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
 
-ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y)
+ifeq ($(CONFIG_CEC_NOTIFIER),y)
   cec-objs += cec-notifier.o
 endif
 
index f5fe01c9da8af17906805668b8448cd6aaf9da1e..9dfc79800c7191964557afe729603afa5cf2b5df 100644 (file)
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap)
                WARN_ON(call_op(adap, adap_monitor_all_enable, 0));
 }
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
 /*
  * Log the current state of the CEC adapter.
  * Very useful for debugging.
index f9ebff90f8ebc08b77088c8f1901d8aa7077b99f..2f87748ba4fceea377284be6a1c35b6478ca4788 100644 (file)
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
        put_device(&devnode->dev);
 }
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 static void cec_cec_notify(struct cec_adapter *adap, u16 pa)
 {
        cec_s_phys_addr(adap, pa, false);
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap,
        }
 
        dev_set_drvdata(&adap->devnode.dev, adap);
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
        if (!top_cec_dir)
                return 0;
 
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap)
        adap->rc = NULL;
 #endif
        debugfs_remove_recursive(adap->cec_dir);
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
        if (adap->notifier)
                cec_notifier_unregister(adap->notifier);
 #endif
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void)
                return ret;
        }
 
-#ifdef CONFIG_MEDIA_CEC_DEBUG
+#ifdef CONFIG_DEBUG_FS
        top_cec_dir = debugfs_create_dir("cec", NULL);
        if (IS_ERR_OR_NULL(top_cec_dir)) {
                pr_warn("cec: Failed to create debugfs cec dir\n");
index fd181c99ce117fc44c16ded99d50abe9a655b4f8..aaa9471c7d117eae0a00e4bbbf4ed20e3ca65b36 100644 (file)
@@ -220,7 +220,8 @@ config VIDEO_ADV7604
 
 config VIDEO_ADV7604_CEC
        bool "Enable Analog Devices ADV7604 CEC support"
-       depends on VIDEO_ADV7604 && CEC_CORE
+       depends on VIDEO_ADV7604
+       select CEC_CORE
        ---help---
          When selected the adv7604 will support the optional
          HDMI CEC feature.
@@ -240,7 +241,8 @@ config VIDEO_ADV7842
 
 config VIDEO_ADV7842_CEC
        bool "Enable Analog Devices ADV7842 CEC support"
-       depends on VIDEO_ADV7842 && CEC_CORE
+       depends on VIDEO_ADV7842
+       select CEC_CORE
        ---help---
          When selected the adv7842 will support the optional
          HDMI CEC feature.
@@ -478,7 +480,8 @@ config VIDEO_ADV7511
 
 config VIDEO_ADV7511_CEC
        bool "Enable Analog Devices ADV7511 CEC support"
-       depends on VIDEO_ADV7511 && CEC_CORE
+       depends on VIDEO_ADV7511
+       select CEC_CORE
        ---help---
          When selected the adv7511 will support the optional
          HDMI CEC feature.
index ac026ee1ca07484ffa6b0c51632d1c9958c0d3cd..041cb80a26b1ff22f049abaed44b0a62c9b91a9c 100644 (file)
@@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS
 
 config VIDEO_SAMSUNG_S5P_CEC
        tristate "Samsung S5P CEC driver"
-       depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for Samsung S5P HDMI CEC interface. It uses the
          generic CEC framework interface.
@@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC
 
 config VIDEO_STI_HDMI_CEC
        tristate "STMicroelectronics STiH4xx HDMI CEC driver"
-       depends on CEC_CORE && (ARCH_STI || COMPILE_TEST)
-       select MEDIA_CEC_NOTIFIER
+       depends on ARCH_STI || COMPILE_TEST
+       select CEC_CORE
+       select CEC_NOTIFIER
        ---help---
          This is a driver for STIH4xx HDMI CEC interface. It uses the
          generic CEC framework interface.
index b36ac19dc6e48d60afbc4053fb848d9439dfcd5d..154de92dd809e74ff3d7539787856fef631229fc 100644 (file)
@@ -26,7 +26,8 @@ config VIDEO_VIVID
 
 config VIDEO_VIVID_CEC
        bool "Enable CEC emulation support"
-       depends on VIDEO_VIVID && CEC_CORE
+       depends on VIDEO_VIVID
+       select CEC_CORE
        ---help---
          When selected the vivid module will emulate the optional
          HDMI CEC feature.
index 90f66dc7c0d74dbed7cf370f652546eadc563bf8..a2fc1a1d58b0e317539a1a679e32f96c12d3b00f 100644 (file)
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
  */
 void ir_raw_event_handle(struct rc_dev *dev)
 {
-       if (!dev->raw)
+       if (!dev->raw || !dev->raw->thread)
                return;
 
        wake_up_process(dev->raw->thread);
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev)
 {
        int rc;
        struct ir_raw_handler *handler;
+       struct task_struct *thread;
 
        if (!dev)
                return -EINVAL;
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev)
         * because the event is coming from userspace
         */
        if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
-               dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
-                                              "rc%u", dev->minor);
+               thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
+                                    dev->minor);
 
-               if (IS_ERR(dev->raw->thread)) {
-                       rc = PTR_ERR(dev->raw->thread);
+               if (IS_ERR(thread)) {
+                       rc = PTR_ERR(thread);
                        goto out;
                }
+
+               dev->raw->thread = thread;
        }
 
        mutex_lock(&ir_raw_handler_lock);
index 8937f3986a01f1c89e3fcc5b6825cb6b754ddf20..18ead44824ba2bd02d27480e0f04ac69f39182fd 100644 (file)
@@ -1,6 +1,7 @@
 config USB_PULSE8_CEC
        tristate "Pulse Eight HDMI CEC"
-       depends on USB_ACM && CEC_CORE
+       depends on USB_ACM
+       select CEC_CORE
        select SERIO
        select SERIO_SERPORT
        ---help---
index 3eb86607efb8f627566af1ff374053d7f6b980ec..030ef01b1ff04137ede4a84ddf0357afe943343d 100644 (file)
@@ -1,6 +1,7 @@
 config USB_RAINSHADOW_CEC
        tristate "RainShadow Tech HDMI CEC"
-       depends on USB_ACM && CEC_CORE
+       depends on USB_ACM
+       select CEC_CORE
        select SERIO
        select SERIO_SERPORT
        ---help---
index 541ca543f71f4efe7e29e7c22bce114c2b18fc3d..71bd68548c9c87d3359a458efe9069c59a81e81a 100644 (file)
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work)
 
        while (true) {
                unsigned long flags;
-               bool exit_loop;
+               bool exit_loop = false;
                char data;
 
                spin_lock_irqsave(&rain->buf_lock, flags);
index 35910f945bfad02823f7146c0746feb90aa2cda9..99e644cda4d13db301b713a5752788c0f646dfa1 100644 (file)
@@ -581,7 +581,7 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        return of_platform_populate(np, NULL, NULL, dev);
 }
 
-static int atmel_ebi_resume(struct device *dev)
+static __maybe_unused int atmel_ebi_resume(struct device *dev)
 {
        struct atmel_ebi *ebi = dev_get_drvdata(dev);
        struct atmel_ebi_dev *ebid;
index 99e651c27fb7add156fad3d53af4c702fe750cc3..22de7f5ed03236cda482dc8e994471522b6b39b5 100644 (file)
@@ -1921,12 +1921,13 @@ static void msb_io_work(struct work_struct *work)
                spin_lock_irqsave(&msb->q_lock, flags);
 
                if (len)
-                       if (!__blk_end_request(msb->req, 0, len))
+                       if (!__blk_end_request(msb->req, BLK_STS_OK, len))
                                msb->req = NULL;
 
                if (error && msb->req) {
+                       blk_status_t ret = errno_to_blk_status(error);
                        dbg_verbose("IO: ending one sector of the request with error");
-                       if (!__blk_end_request(msb->req, error, msb->page_size))
+                       if (!__blk_end_request(msb->req, ret, msb->page_size))
                                msb->req = NULL;
                }
 
@@ -2014,7 +2015,7 @@ static void msb_submit_req(struct request_queue *q)
                WARN_ON(!msb->io_queue_stopped);
 
                while ((req = blk_fetch_request(q)) != NULL)
-                       __blk_end_request_all(req, -ENODEV);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                return;
        }
 
index c00d8a266878035cafa9bc6570ca73d67b1da372..8897962781bb1ad861a7a3b392f6ff881b78f5ec 100644 (file)
@@ -709,7 +709,8 @@ try_again:
                                               msb->req_sg);
 
                if (!msb->seg_count) {
-                       chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
+                       chunk = __blk_end_request_cur(msb->block_req,
+                                       BLK_STS_RESOURCE);
                        continue;
                }
 
@@ -776,7 +777,8 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
                if (error && !t_len)
                        t_len = blk_rq_cur_bytes(msb->block_req);
 
-               chunk = __blk_end_request(msb->block_req, error, t_len);
+               chunk = __blk_end_request(msb->block_req,
+                               errno_to_blk_status(error), t_len);
 
                error = mspro_block_issue_req(card, chunk);
 
@@ -838,7 +840,7 @@ static void mspro_block_submit_req(struct request_queue *q)
 
        if (msb->eject) {
                while ((req = blk_fetch_request(q)) != NULL)
-                       __blk_end_request_all(req, -ENODEV);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
 
                return;
        }
index 17b433f1ce23b7deeb2e3f35139d3bef57ef20d0..0761271d68c5613b23152c03522f6a388950ea86 100644 (file)
@@ -159,11 +159,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
 
        /* Do this outside the status_mutex to avoid a circular dependency with
         * the locking in cxl_mmap_fault() */
-       if (copy_from_user(&work, uwork,
-                          sizeof(struct cxl_ioctl_start_work))) {
-               rc = -EFAULT;
-               goto out;
-       }
+       if (copy_from_user(&work, uwork, sizeof(work)))
+               return -EFAULT;
 
        mutex_lock(&ctx->status_mutex);
        if (ctx->status != OPENED) {
index 871a2f09c71845b2803bab920618c36de634e4fc..8d6ea9712dbd1830fcdc5d6eecda3d28d69a9376 100644 (file)
@@ -1302,13 +1302,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
 
 void cxl_native_release_psl_err_irq(struct cxl *adapter)
 {
-       if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq))
+       if (adapter->native->err_virq == 0 ||
+           adapter->native->err_virq !=
+           irq_find_mapping(NULL, adapter->native->err_hwirq))
                return;
 
        cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
        cxl_unmap_irq(adapter->native->err_virq, adapter);
        cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
        kfree(adapter->irq_name);
+       adapter->native->err_virq = 0;
 }
 
 int cxl_native_register_serr_irq(struct cxl_afu *afu)
@@ -1346,13 +1349,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
 
 void cxl_native_release_serr_irq(struct cxl_afu *afu)
 {
-       if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
+       if (afu->serr_virq == 0 ||
+           afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
                return;
 
        cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
        cxl_unmap_irq(afu->serr_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
        kfree(afu->err_irq_name);
+       afu->serr_virq = 0;
 }
 
 int cxl_native_register_psl_irq(struct cxl_afu *afu)
@@ -1375,12 +1380,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
 
 void cxl_native_release_psl_irq(struct cxl_afu *afu)
 {
-       if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq))
+       if (afu->native->psl_virq == 0 ||
+           afu->native->psl_virq !=
+           irq_find_mapping(NULL, afu->native->psl_hwirq))
                return;
 
        cxl_unmap_irq(afu->native->psl_virq, afu);
        cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
        kfree(afu->psl_irq_name);
+       afu->native->psl_virq = 0;
 }
 
 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
index d1928fdd0f435630609c92056a66ad1af000a4ae..07aad85763348b0a7ec85ccf85ab21eb0031b43a 100644 (file)
@@ -763,8 +763,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
 {
        struct mei_cl_device *cldev = to_mei_cl_device(dev);
        const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
+       u8 version = mei_me_cl_ver(cldev->me_cl);
 
-       return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid);
+       return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
+                        cldev->name, uuid, version);
 }
 static DEVICE_ATTR_RO(modalias);
 
index 8273b078686d0a939d1c5dc1b6ce58e8c72ef94a..6ff94a948a4b2d496d2036e9d33cf3fdab46d74f 100644 (file)
@@ -1184,9 +1184,10 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
        int err = 0, type = MMC_BLK_DISCARD;
+       blk_status_t status = BLK_STS_OK;
 
        if (!mmc_can_erase(card)) {
-               err = -EOPNOTSUPP;
+               status = BLK_STS_NOTSUPP;
                goto fail;
        }
 
@@ -1212,10 +1213,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
                if (!err)
                        err = mmc_erase(card, from, nr, arg);
        } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
-       if (!err)
+       if (err)
+               status = BLK_STS_IOERR;
+       else
                mmc_blk_reset_success(md, type);
 fail:
-       blk_end_request(req, err, blk_rq_bytes(req));
+       blk_end_request(req, status, blk_rq_bytes(req));
 }
 
 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
@@ -1225,9 +1228,10 @@ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
        struct mmc_card *card = md->queue.card;
        unsigned int from, nr, arg;
        int err = 0, type = MMC_BLK_SECDISCARD;
+       blk_status_t status = BLK_STS_OK;
 
        if (!(mmc_can_secure_erase_trim(card))) {
-               err = -EOPNOTSUPP;
+               status = BLK_STS_NOTSUPP;
                goto out;
        }
 
@@ -1254,8 +1258,10 @@ retry:
        err = mmc_erase(card, from, nr, arg);
        if (err == -EIO)
                goto out_retry;
-       if (err)
+       if (err) {
+               status = BLK_STS_IOERR;
                goto out;
+       }
 
        if (arg == MMC_SECURE_TRIM1_ARG) {
                if (card->quirks & MMC_QUIRK_INAND_CMD38) {
@@ -1270,8 +1276,10 @@ retry:
                err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
                if (err == -EIO)
                        goto out_retry;
-               if (err)
+               if (err) {
+                       status = BLK_STS_IOERR;
                        goto out;
+               }
        }
 
 out_retry:
@@ -1280,7 +1288,7 @@ out_retry:
        if (!err)
                mmc_blk_reset_success(md, type);
 out:
-       blk_end_request(req, err, blk_rq_bytes(req));
+       blk_end_request(req, status, blk_rq_bytes(req));
 }
 
 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
@@ -1290,10 +1298,7 @@ static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
        int ret = 0;
 
        ret = mmc_flush_cache(card);
-       if (ret)
-               ret = -EIO;
-
-       blk_end_request_all(req, ret);
+       blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
 }
 
 /*
@@ -1641,7 +1646,7 @@ static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card,
 {
        if (mmc_card_removed(card))
                req->rq_flags |= RQF_QUIET;
-       while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req)));
+       while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req)));
        mmc_queue_req_free(mq, mqrq);
 }
 
@@ -1661,7 +1666,7 @@ static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req,
         */
        if (mmc_card_removed(mq->card)) {
                req->rq_flags |= RQF_QUIET;
-               blk_end_request_all(req, -EIO);
+               blk_end_request_all(req, BLK_STS_IOERR);
                mmc_queue_req_free(mq, mqrq);
                return;
        }
@@ -1743,7 +1748,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                         */
                        mmc_blk_reset_success(md, type);
 
-                       req_pending = blk_end_request(old_req, 0,
+                       req_pending = blk_end_request(old_req, BLK_STS_OK,
                                                      brq->data.bytes_xfered);
                        /*
                         * If the blk_end_request function returns non-zero even
@@ -1811,7 +1816,7 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
                         * time, so we only reach here after trying to
                         * read a single sector.
                         */
-                       req_pending = blk_end_request(old_req, -EIO,
+                       req_pending = blk_end_request(old_req, BLK_STS_IOERR,
                                                      brq->data.blksz);
                        if (!req_pending) {
                                mmc_queue_req_free(mq, mq_rq);
@@ -1860,7 +1865,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
        ret = mmc_blk_part_switch(card, md);
        if (ret) {
                if (req) {
-                       blk_end_request_all(req, -EIO);
+                       blk_end_request_all(req, BLK_STS_IOERR);
                }
                goto out;
        }
index 5c37b6be3e7b62db3f4f2104cc282a74cd033444..7f20298d892b01b0a731f50a914dac89483bb757 100644 (file)
@@ -133,7 +133,7 @@ static void mmc_request_fn(struct request_queue *q)
        if (!mq) {
                while ((req = blk_fetch_request(q)) != NULL) {
                        req->rq_flags |= RQF_QUIET;
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                }
                return;
        }
index 6b8d5cd7dbf6bdc3442c1d44ae136a3c6f885aff..91c17fba76598f78b7e8968dc9477a92c6ea5f49 100644 (file)
@@ -73,7 +73,7 @@ static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
 }
 
 
-static int do_blktrans_request(struct mtd_blktrans_ops *tr,
+static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
                               struct mtd_blktrans_dev *dev,
                               struct request *req)
 {
@@ -84,33 +84,37 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
        nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
        buf = bio_data(req->bio);
 
-       if (req_op(req) == REQ_OP_FLUSH)
-               return tr->flush(dev);
+       if (req_op(req) == REQ_OP_FLUSH) {
+               if (tr->flush(dev))
+                       return BLK_STS_IOERR;
+               return BLK_STS_OK;
+       }
 
        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
            get_capacity(req->rq_disk))
-               return -EIO;
+               return BLK_STS_IOERR;
 
        switch (req_op(req)) {
        case REQ_OP_DISCARD:
-               return tr->discard(dev, block, nsect);
+               if (tr->discard(dev, block, nsect))
+                       return BLK_STS_IOERR;
+               return BLK_STS_OK;
        case REQ_OP_READ:
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->readsect(dev, block, buf))
-                               return -EIO;
+                               return BLK_STS_IOERR;
                rq_flush_dcache_pages(req);
-               return 0;
+               return BLK_STS_OK;
        case REQ_OP_WRITE:
                if (!tr->writesect)
-                       return -EIO;
+                       return BLK_STS_IOERR;
 
                rq_flush_dcache_pages(req);
                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
                        if (tr->writesect(dev, block, buf))
-                               return -EIO;
-               return 0;
+                               return BLK_STS_IOERR;
        default:
-               return -EIO;
+               return BLK_STS_IOERR;
        }
 }
 
@@ -132,7 +136,7 @@ static void mtd_blktrans_work(struct work_struct *work)
        spin_lock_irq(rq->queue_lock);
 
        while (1) {
-               int res;
+               blk_status_t res;
 
                dev->bg_stop = false;
                if (!req && !(req = blk_fetch_request(rq))) {
@@ -178,7 +182,7 @@ static void mtd_blktrans_request(struct request_queue *rq)
 
        if (!dev)
                while ((req = blk_fetch_request(rq)) != NULL)
-                       __blk_end_request_all(req, -ENODEV);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
        else
                queue_work(dev->wq, &dev->work);
 }
index 5497e65439df6458cc02d1e6cef0b42c581a7add..c3963f88044818d15a3b6a45d7a940f56d1972da 100644 (file)
@@ -313,10 +313,10 @@ static void ubiblock_do_work(struct work_struct *work)
        ret = ubiblock_read(pdu);
        rq_flush_dcache_pages(req);
 
-       blk_mq_end_request(req, ret);
+       blk_mq_end_request(req, errno_to_blk_status(ret));
 }
 
-static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
                             const struct blk_mq_queue_data *bd)
 {
        struct request *req = bd->rq;
@@ -327,9 +327,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
        case REQ_OP_READ:
                ubi_sgl_init(&pdu->usgl);
                queue_work(dev->wq, &pdu->work);
-               return BLK_MQ_RQ_QUEUE_OK;
+               return BLK_STS_OK;
        default:
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
 }
index 96046bb12ca17333530f237fddb46438c3298dea..14c0be98e0a4d449aa4122c2db6e9ef6af007c84 100644 (file)
@@ -114,13 +114,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
        return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev,
-                          int src_port, u16 data)
+static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip,
+                                        int src_dev, int src_port, u16 data)
 {
        return -EOPNOTSUPP;
 }
 
-int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
+static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip)
 {
        return -EOPNOTSUPP;
 }
index b3bc87fe3764e397e4a9ce19518866cb97530771..0a98c369df2045ccbb9fbf7a55af848530a5f464 100644 (file)
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
                              struct xgbe_ring *ring,
                              struct xgbe_ring_data *rdata)
 {
-       int order, ret;
+       int ret;
 
        if (!ring->rx_hdr_pa.pages) {
                ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
        }
 
        if (!ring->rx_buf_pa.pages) {
-               order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
                ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
-                                      order);
+                                      PAGE_ALLOC_COSTLY_ORDER);
                if (ret)
                        return ret;
        }
index 099b374c1b17bbd8e9cabe68cdc7cd991a258737..5274501428e4fb05850bada0d4ff3cd8a346f59f 100644 (file)
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        priv->num_rx_desc_words = params->num_rx_desc_words;
 
        priv->irq0 = platform_get_irq(pdev, 0);
-       if (!priv->is_lite)
+       if (!priv->is_lite) {
                priv->irq1 = platform_get_irq(pdev, 1);
-       priv->wol_irq = platform_get_irq(pdev, 2);
+               priv->wol_irq = platform_get_irq(pdev, 2);
+       } else {
+               priv->wol_irq = platform_get_irq(pdev, 1);
+       }
        if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
                dev_err(&pdev->dev, "invalid interrupts\n");
                ret = -EINVAL;
index eccb3d1b6abb748c14567d0efcdc28a405b64fb5..5f49334dcad5a8c8602cc3aa2e8795b2d489bb43 100644 (file)
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
        }
 
        /* select a non-FCoE queue */
-       return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+       return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
 }
 
 void bnx2x_set_num_queues(struct bnx2x *bp)
index 38a5c6764bb50f45124c212db37e11d2cc777076..77ed2f628f9ca23854ae8b062ff919ce6d2e3425 100644 (file)
@@ -2196,10 +2196,14 @@ static int cxgb_up(struct adapter *adap)
                if (err)
                        goto irq_err;
        }
+
+       mutex_lock(&uld_mutex);
        enable_rx(adap);
        t4_sge_start(adap);
        t4_intr_enable(adap);
        adap->flags |= FULL_INIT_DONE;
+       mutex_unlock(&uld_mutex);
+
        notify_ulds(adap, CXGB4_STATE_UP);
 #if IS_ENABLED(CONFIG_IPV6)
        update_clip(adap);
@@ -2771,6 +2775,9 @@ void t4_fatal_err(struct adapter *adap)
 {
        int port;
 
+       if (pci_channel_offline(adap->pdev))
+               return;
+
        /* Disable the SGE since ULDs are going to free resources that
         * could be exposed to the adapter.  RDMA MWs for example...
         */
@@ -3882,9 +3889,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
        spin_lock(&adap->stats_lock);
        for_each_port(adap, i) {
                struct net_device *dev = adap->port[i];
-
-               netif_device_detach(dev);
-               netif_carrier_off(dev);
+               if (dev) {
+                       netif_device_detach(dev);
+                       netif_carrier_off(dev);
+               }
        }
        spin_unlock(&adap->stats_lock);
        disable_interrupts(adap);
@@ -3963,12 +3971,13 @@ static void eeh_resume(struct pci_dev *pdev)
        rtnl_lock();
        for_each_port(adap, i) {
                struct net_device *dev = adap->port[i];
-
-               if (netif_running(dev)) {
-                       link_start(dev);
-                       cxgb_set_rxmode(dev);
+               if (dev) {
+                       if (netif_running(dev)) {
+                               link_start(dev);
+                               cxgb_set_rxmode(dev);
+                       }
+                       netif_device_attach(dev);
                }
-               netif_device_attach(dev);
        }
        rtnl_unlock();
 }
index aded42b96f6d966ba7e814c0a89c738968a655b6..3a34aa629f7dd81a56e6b5c1c63bfdeb685c3f25 100644 (file)
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter)
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-       u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
-       u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+       u32 whoami, pf;
+
+       if (pci_channel_offline(adapter->pdev))
+               return;
+
+       whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+       pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
                        SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
 
        t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
index 3549d387627888a2629b5f07dd1b001d2db1fc70..f2d623a7aee04e21f1e4e52645d66788a59341ab 100644 (file)
@@ -37,7 +37,7 @@
 
 #define T4FW_VERSION_MAJOR 0x01
 #define T4FW_VERSION_MINOR 0x10
-#define T4FW_VERSION_MICRO 0x2B
+#define T4FW_VERSION_MICRO 0x2D
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -46,7 +46,7 @@
 
 #define T5FW_VERSION_MAJOR 0x01
 #define T5FW_VERSION_MINOR 0x10
-#define T5FW_VERSION_MICRO 0x2B
+#define T5FW_VERSION_MICRO 0x2D
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -55,7 +55,7 @@
 
 #define T6FW_VERSION_MAJOR 0x01
 #define T6FW_VERSION_MINOR 0x10
-#define T6FW_VERSION_MICRO 0x2B
+#define T6FW_VERSION_MICRO 0x2D
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index e863ba74d005d7f255931b336825df2abadd2fc8..8bb0db990c8fcf8258201f1af5fcf3fa9976b5f9 100644 (file)
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev)
        if (ret)
                return ret;
 
+       napi_enable(&priv->napi);
+
        ethoc_init_ring(priv, dev->mem_start);
        ethoc_reset(priv);
 
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev)
        priv->old_duplex = -1;
 
        phy_start(dev->phydev);
-       napi_enable(&priv->napi);
 
        if (netif_msg_ifup(priv)) {
                dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
index 446c7b374ff5c36712d5813cf94b4e9b4ca0b01e..a10de1e9c157d2590eb19122f27fd5dda1a4816b 100644 (file)
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
 {
        const struct of_device_id *id =
                of_match_device(fsl_pq_mdio_match, &pdev->dev);
-       const struct fsl_pq_mdio_data *data = id->data;
+       const struct fsl_pq_mdio_data *data;
        struct device_node *np = pdev->dev.of_node;
        struct resource res;
        struct device_node *tbi;
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
        struct mii_bus *new_bus;
        int err;
 
+       if (!id) {
+               dev_err(&pdev->dev, "Failed to match device\n");
+               return -ENODEV;
+       }
+
+       data = id->data;
+
        dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
 
        new_bus = mdiobus_alloc_size(sizeof(*priv));
index 4f2d329dba998308eeb2ddaed52733cb3059f605..a93757c255f77445e2245ee8b065d2c6ee31cf3f 100644 (file)
@@ -81,7 +81,7 @@
 static const char ibmvnic_driver_name[] = "ibmvnic";
 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
 
-MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
+MODULE_AUTHOR("Santiago Leon");
 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
index d5c9c9e06ff57e21c1e28d09b9eea18af6e22f1a..150caf6ca2b4bb1da5e0ea63f37fd086c058efad 100644 (file)
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
  **/
 void i40e_service_event_schedule(struct i40e_pf *pf)
 {
-       if (!test_bit(__I40E_VSI_DOWN, pf->state) &&
+       if (!test_bit(__I40E_DOWN, pf->state) &&
            !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
                queue_work(i40e_wq, &pf->service_task);
 }
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
                 * this is not a performance path and napi_schedule()
                 * can deal with rescheduling.
                 */
-               if (!test_bit(__I40E_VSI_DOWN, pf->state))
+               if (!test_bit(__I40E_DOWN, pf->state))
                        napi_schedule_irqoff(&q_vector->napi);
        }
 
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
 enable_intr:
        /* re-enable interrupt causes */
        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-       if (!test_bit(__I40E_VSI_DOWN, pf->state)) {
+       if (!test_bit(__I40E_DOWN, pf->state)) {
                i40e_service_event_schedule(pf);
                i40e_irq_dynamic_enable_icr0(pf, false);
        }
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
 {
 
        /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, pf->state))
+       if (test_bit(__I40E_DOWN, pf->state))
                return;
 
        if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
        int i;
 
        /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, pf->state) ||
+       if (test_bit(__I40E_DOWN, pf->state) ||
            test_bit(__I40E_CONFIG_BUSY, pf->state))
                return;
 
@@ -6399,9 +6399,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
                reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
        }
-       if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) {
-               reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED);
-               clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state);
+       if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
+               reset_flags |= BIT(__I40E_DOWN_REQUESTED);
+               clear_bit(__I40E_DOWN_REQUESTED, pf->state);
        }
 
        /* If there's a recovery already waiting, it takes
@@ -6415,7 +6415,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        /* If we're already down or resetting, just bail */
        if (reset_flags &&
-           !test_bit(__I40E_VSI_DOWN, pf->state) &&
+           !test_bit(__I40E_DOWN, pf->state) &&
            !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
                rtnl_lock();
                i40e_do_reset(pf, reset_flags, true);
@@ -7002,7 +7002,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        u32 val;
        int v;
 
-       if (test_bit(__I40E_VSI_DOWN, pf->state))
+       if (test_bit(__I40E_DOWN, pf->state))
                goto clear_recovery;
        dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
 
@@ -9767,7 +9767,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                return -ENODEV;
        }
        if (vsi == pf->vsi[pf->lan_vsi] &&
-           !test_bit(__I40E_VSI_DOWN, pf->state)) {
+           !test_bit(__I40E_DOWN, pf->state)) {
                dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
                return -ENODEV;
        }
@@ -11003,7 +11003,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        pf->next_vsi = 0;
        pf->pdev = pdev;
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
 
        hw = &pf->hw;
        hw->back = pf;
@@ -11293,7 +11293,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         * before setting up the misc vector or we get a race and the vector
         * ends up disabled forever.
         */
-       clear_bit(__I40E_VSI_DOWN, pf->state);
+       clear_bit(__I40E_DOWN, pf->state);
 
        /* In case of MSIX we are going to setup the misc vector right here
         * to handle admin queue events etc. In case of legacy and MSI
@@ -11448,7 +11448,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* Unwind what we've done if something failed in the setup */
 err_vsis:
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        i40e_clear_interrupt_scheme(pf);
        kfree(pf->vsi);
 err_switch_setup:
@@ -11500,7 +11500,7 @@ static void i40e_remove(struct pci_dev *pdev)
 
        /* no more scheduling of any task */
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        if (pf->service_timer.data)
                del_timer_sync(&pf->service_timer);
        if (pf->service_task.func)
@@ -11740,7 +11740,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
        struct i40e_hw *hw = &pf->hw;
 
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
        rtnl_lock();
        i40e_prep_for_reset(pf, true);
        rtnl_unlock();
@@ -11789,7 +11789,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
        int retval = 0;
 
        set_bit(__I40E_SUSPENDED, pf->state);
-       set_bit(__I40E_VSI_DOWN, pf->state);
+       set_bit(__I40E_DOWN, pf->state);
 
        if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
                i40e_enable_mc_magic_wake(pf);
@@ -11841,7 +11841,7 @@ static int i40e_resume(struct pci_dev *pdev)
 
        /* handling the reset will rebuild the device state */
        if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) {
-               clear_bit(__I40E_VSI_DOWN, pf->state);
+               clear_bit(__I40E_DOWN, pf->state);
                rtnl_lock();
                i40e_reset_and_rebuild(pf, false, true);
                rtnl_unlock();
index 29321a6167a6675757e74ab4e3cd1a100cb423b0..cd894f4023b1b68cc4e202ff7064e63e2f8be031 100644 (file)
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
        struct sk_buff *skb;
 
index dfe241a12ad0756d10a77b954486d2194a31c5ca..12b02e5305038d55fcee5d2109b4ab534a09b1bb 100644 (file)
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = SKB_DATA_ALIGN(size);
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(I40E_SKB_PAD + size);
 #endif
        struct sk_buff *skb;
 
index ae5fdc2df65412afce4e72b8384c9c4b3302c56e..ffbcb27c05e55f43630a812249bab21609886dd9 100644 (file)
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev,
                qpn = priv->drop_qp.qpn;
        else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
                qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
-               if (qpn < priv->rss_map.base_qpn ||
-                   qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) {
-                       en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn);
-                       return -EINVAL;
-               }
        } else {
                if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
                        en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
index 1a670b68155550fe9f61fb2bc2c7a3688391249c..0710b367746468f1d4faeb5b8a8f3266ca941674 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/etherdevice.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
 #include <linux/export.h>
 
 #include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
 
+       if (!mlx4_qp_lookup(dev, rule->qpn)) {
+               mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+               ret = -EINVAL;
+               goto out;
+       }
+
        trans_rule_ctrl_to_hw(rule, mailbox->buf);
 
        size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
 
        list_for_each_entry(cur, &rule->list, list) {
                ret = parse_trans_rule(dev, cur, mailbox->buf + size);
-               if (ret < 0) {
-                       mlx4_free_cmd_mailbox(dev, mailbox);
-                       return ret;
-               }
+               if (ret < 0)
+                       goto out;
+
                size += ret;
        }
 
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
                }
        }
 
+out:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
        return ret;
index 2d6abd4662b143612769ef9b91783249bcd2ac8b..5a310d313e94d08d035c265e6b538c27dcf957c3 100644 (file)
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
                __mlx4_qp_free_icm(dev, qpn);
 }
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+       struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+       struct mlx4_qp *qp;
+
+       spin_lock(&qp_table->lock);
+
+       qp = __mlx4_qp_lookup(dev, qpn);
+
+       spin_unlock(&qp_table->lock);
+       return qp;
+}
+
 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
        }
 
        if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
+               if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
+                       mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
+                       err = -EOPNOTSUPP;
+                       goto out;
+               }
+
                qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
                cmd->qp_context.qos_vport = params->qos_vport;
        }
index 07516545474f3ac76e750aaa4af2532b6ac81207..812783865205715e8e88ad66d4ccbfe7172ec6e5 100644 (file)
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
 
+static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
+                          struct mlx4_vf_immed_vlan_work *work)
+{
+       ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
+       ctx->qp_context.qos_vport = work->qos_vport;
+}
+
 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
 {
        struct mlx4_vf_immed_vlan_work *work =
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
                                        qp->sched_queue & 0xC7;
                                upd_context->qp_context.pri_path.sched_queue |=
                                        ((work->qos & 0x7) << 3);
-                               upd_context->qp_mask |=
-                                       cpu_to_be64(1ULL <<
-                                                   MLX4_UPD_QP_MASK_QOS_VPP);
-                               upd_context->qp_context.qos_vport =
-                                       work->qos_vport;
+
+                               if (dev->caps.flags2 &
+                                   MLX4_DEV_CAP_FLAG2_QOS_VPP)
+                                       update_qos_vpp(upd_context, work);
                        }
 
                        err = mlx4_cmd(dev, mailbox->dma,
index fe5546bb41537f0af0c4bcfe9054ccceaa42bbb2..af945edfee1905dbe676218cb53123535a37171f 100644 (file)
@@ -621,10 +621,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
                        priv->irq_info[i].mask);
 
-#ifdef CONFIG_SMP
-       if (irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+       if (IS_ENABLED(CONFIG_SMP) &&
+           irq_set_affinity_hint(irq, priv->irq_info[i].mask))
                mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
-#endif
 
        return 0;
 }
index 537d1236a4fec0a2973d52ed34cf7f73d8a4b052..715b3aaf83ac4d65cdea4eb15f6f2089e402c469 100644 (file)
@@ -1730,7 +1730,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
                qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
                break;
        default:
-               DP_ERR(cdev, "Invalid protocol type = %d\n", type);
+               DP_VERBOSE(cdev, QED_MSG_SP,
+                          "Invalid protocol type = %d\n", type);
                return;
        }
 }
index 7245b1072518fff31566c471b6eb32b512e41846..81312924df1407092fd1dd43cc0555d16976160b 100644 (file)
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops {
        u32 (*get_cap_size)(void *, int);
        void (*set_sys_info)(void *, int, u32);
        void (*store_cap_mask)(void *, u32);
+       bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
+       bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
 };
 
 extern struct qlcnic_nic_template qlcnic_vf_ops;
 
-static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
 {
        return adapter->ahw->extra_capability[0] &
               QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
 }
 
-static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
 {
        return adapter->ahw->extra_capability[0] &
               QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
 }
 
+static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+       return false;
+}
+
+static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return false;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_rx_offload(adapter);
+}
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+        return adapter->ahw->hw_ops->encap_tx_offload(adapter);
+}
+
 static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
 {
        return adapter->nic_ops->start_firmware(adapter);
index 4fb68797630e9531e7ffc4e7bc7c015313a44063..f7080d0ab8746263c6955163640964eac2fd2cb0 100644 (file)
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
        .get_cap_size                   = qlcnic_83xx_get_cap_size,
        .set_sys_info                   = qlcnic_83xx_set_sys_info,
        .store_cap_mask                 = qlcnic_83xx_store_cap_mask,
+       .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_83xx_ops = {
index 838cc0ceafd8d0824495206bf30cdaf53f98bc59..7848cf04b29a83f0a356b3eb5360d6a6e65871f0 100644 (file)
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
                        }
                        return -EIO;
                }
-               usleep_range(1000, 1500);
+               udelay(1200);
        }
 
        if (id_reg)
index b6628aaa6e4a45a8eaecd9d5fc4ea8136d7a07af..1b5f7d57b6f8fed6a8b232adfdee76b2cbaff13f 100644 (file)
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
        .get_cap_size                   = qlcnic_82xx_get_cap_size,
        .set_sys_info                   = qlcnic_82xx_set_sys_info,
        .store_cap_mask                 = qlcnic_82xx_store_cap_mask,
+       .encap_rx_offload               = qlcnic_82xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_82xx_encap_tx_offload,
 };
 
 static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
index 2f656f395f39699e4cec53f4ff25ea7e745d1041..c58180f408448e9a86a7ce40c6fb286063a6afb0 100644 (file)
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
        .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
        .enable_sds_intr                = qlcnic_83xx_enable_sds_intr,
        .disable_sds_intr               = qlcnic_83xx_disable_sds_intr,
+       .encap_rx_offload               = qlcnic_83xx_encap_rx_offload,
+       .encap_tx_offload               = qlcnic_83xx_encap_tx_offload,
 };
 
 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
index cc065ffbe4b5584a6498237d1e4a929ff1d6ebd0..bcd4708b374574fb06faf28d9b0a6cc90bc9c56d 100644 (file)
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt)
        emac_mac_config(adpt);
        emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
 
-       adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
+       adpt->phydev->irq = PHY_POLL;
        ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
                                 PHY_INTERFACE_MODE_SGMII);
        if (ret) {
index 441c1936648993fa394e5c676b7bd9957e52efa3..18461fcb981501efd7015634999cb787041c01a7 100644 (file)
 /* Qualcomm Technologies, Inc. EMAC PHY Controller driver.
  */
 
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
 #include <linux/of_mdio.h>
 #include <linux/phy.h>
 #include <linux/iopoll.h>
 #include <linux/acpi.h>
 #include "emac.h"
-#include "emac-mac.h"
 
 /* EMAC base register offsets */
 #define EMAC_MDIO_CTRL                                        0x001414
 
 #define MDIO_WAIT_TIMES                                           1000
 
-#define EMAC_LINK_SPEED_DEFAULT (\
-               EMAC_LINK_SPEED_10_HALF  |\
-               EMAC_LINK_SPEED_10_FULL  |\
-               EMAC_LINK_SPEED_100_HALF |\
-               EMAC_LINK_SPEED_100_FULL |\
-               EMAC_LINK_SPEED_1GB_FULL)
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The autopoll feature takes over the MDIO bus.  In order for
- * the PHY driver to be able to talk to the PHY over the MDIO
- * bus, we need to temporarily disable the autopoll feature.
- */
-static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt)
-{
-       u32 val;
-
-       /* disable autopoll */
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0);
-
-       /* wait for any mdio polling to complete */
-       if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val,
-                               !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100))
-               return 0;
-
-       /* failed to disable; ensure it is enabled before returning */
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-
-       return -EBUSY;
-}
-
-/**
- * emac_phy_mdio_autopoll_disable() - disable mdio autopoll
- * @adpt: the emac adapter
- *
- * The EMAC has the ability to poll the external PHY on the MDIO
- * bus for link state changes.  This eliminates the need for the
- * driver to poll the phy.  If if the link state does change,
- * the EMAC issues an interrupt on behalf of the PHY.
- */
-static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt)
-{
-       emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN);
-}
-
 static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
 {
        struct emac_adapter *adpt = bus->priv;
        u32 reg;
-       int ret;
-
-       ret = emac_phy_mdio_autopoll_disable(adpt);
-       if (ret)
-               return ret;
 
        emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
                          (addr << PHY_ADDR_SHFT));
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)),
                               100, MDIO_WAIT_TIMES * 100))
-               ret = -EIO;
-       else
-               ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
+               return -EIO;
 
-       emac_phy_mdio_autopoll_enable(adpt);
-
-       return ret;
+       return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
 }
 
 static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
 {
        struct emac_adapter *adpt = bus->priv;
        u32 reg;
-       int ret;
-
-       ret = emac_phy_mdio_autopoll_disable(adpt);
-       if (ret)
-               return ret;
 
        emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK,
                          (addr << PHY_ADDR_SHFT));
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
        if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg,
                               !(reg & (MDIO_START | MDIO_BUSY)), 100,
                               MDIO_WAIT_TIMES * 100))
-               ret = -EIO;
+               return -EIO;
 
-       emac_phy_mdio_autopoll_enable(adpt);
-
-       return ret;
+       return 0;
 }
 
 /* Configure the MDIO bus and connect the external PHY */
index 28a8cdc364851e56a5757a8f2970853c0a462cc4..98a326faea294eec0c59f9b0ffe15d57046ce5eb 100644 (file)
 #define DMAR_DLY_CNT_DEF                                   15
 #define DMAW_DLY_CNT_DEF                                    4
 
-#define IMR_NORMAL_MASK         (\
-               ISR_ERROR       |\
-               ISR_GPHY_LINK   |\
-               ISR_TX_PKT      |\
-               GPHY_WAKEUP_INT)
-
-#define IMR_EXTENDED_MASK       (\
-               SW_MAN_INT      |\
-               ISR_OVER        |\
-               ISR_ERROR       |\
-               ISR_GPHY_LINK   |\
-               ISR_TX_PKT      |\
-               GPHY_WAKEUP_INT)
+#define IMR_NORMAL_MASK                (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
 
 #define ISR_TX_PKT      (\
        TX_PKT_INT      |\
        TX_PKT_INT2     |\
        TX_PKT_INT3)
 
-#define ISR_GPHY_LINK        (\
-       GPHY_LINK_UP_INT     |\
-       GPHY_LINK_DOWN_INT)
-
 #define ISR_OVER        (\
        RFD0_UR_INT     |\
        RFD1_UR_INT     |\
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data)
        if (status & ISR_OVER)
                net_warn_ratelimited("warning: TX/RX overflow\n");
 
-       /* link event */
-       if (status & ISR_GPHY_LINK)
-               phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT));
-
 exit:
        /* enable the interrupt */
        writel(irq->mask, adpt->base + EMAC_INT_MASK);
index 3cd7989c007dfe46947e2ddb366a904f1af90198..784782da3a85b638e9e2a195fe66b15c72fe0fe5 100644 (file)
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        int ring_size;
        int i;
 
-       /* Free RX skb ringbuffer */
-       if (priv->rx_skb[q]) {
-               for (i = 0; i < priv->num_rx_ring[q]; i++)
-                       dev_kfree_skb(priv->rx_skb[q][i]);
-       }
-       kfree(priv->rx_skb[q]);
-       priv->rx_skb[q] = NULL;
-
-       /* Free aligned TX buffers */
-       kfree(priv->tx_align[q]);
-       priv->tx_align[q] = NULL;
-
        if (priv->rx_ring[q]) {
                for (i = 0; i < priv->num_rx_ring[q]; i++) {
                        struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
                priv->tx_ring[q] = NULL;
        }
 
+       /* Free RX skb ringbuffer */
+       if (priv->rx_skb[q]) {
+               for (i = 0; i < priv->num_rx_ring[q]; i++)
+                       dev_kfree_skb(priv->rx_skb[q][i]);
+       }
+       kfree(priv->rx_skb[q]);
+       priv->rx_skb[q] = NULL;
+
+       /* Free aligned TX buffers */
+       kfree(priv->tx_align[q]);
+       priv->tx_align[q] = NULL;
+
        /* Free TX skb ringbuffer.
         * SKBs are freed by ravb_tx_free() call above.
         */
index 489ef146201e61c629c17010f672a621642e94b3..6a9c954492f225987d5dc63713034548e7aabdbb 100644 (file)
@@ -37,6 +37,7 @@
 #define TSE_PCS_CONTROL_AN_EN_MASK                     BIT(12)
 #define TSE_PCS_CONTROL_REG                            0x00
 #define TSE_PCS_CONTROL_RESTART_AN_MASK                        BIT(9)
+#define TSE_PCS_CTRL_AUTONEG_SGMII                     0x1140
 #define TSE_PCS_IF_MODE_REG                            0x28
 #define TSE_PCS_LINK_TIMER_0_REG                       0x24
 #define TSE_PCS_LINK_TIMER_1_REG                       0x26
@@ -65,6 +66,7 @@
 #define TSE_PCS_SW_RESET_TIMEOUT                       100
 #define TSE_PCS_USE_SGMII_AN_MASK                      BIT(1)
 #define TSE_PCS_USE_SGMII_ENA                          BIT(0)
+#define TSE_PCS_IF_USE_SGMII                           0x03
 
 #define SGMII_ADAPTER_CTRL_REG                         0x00
 #define SGMII_ADAPTER_DISABLE                          0x0001
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
 {
        int ret = 0;
 
-       writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
+       writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
+
+       writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
 
        writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
        writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
index a74c481401c46ee659b1244b0124966cabeafdbd..12236daf7bb6d5358fdafe50e37227e19b95bc33 100644 (file)
@@ -1208,7 +1208,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
        u32 rx_count = priv->plat->rx_queues_to_use;
        unsigned int bfsize = 0;
        int ret = -ENOMEM;
-       u32 queue;
+       int queue;
        int i;
 
        if (priv->hw->mode->set_16kib_bfsize)
@@ -2724,7 +2724,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
 
                priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
                        0, 1,
-                       (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+                       (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
                        0, 0);
 
                tmp_len -= TSO_MAX_BUFF_SIZE;
@@ -2947,7 +2947,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        int i, csum_insertion = 0, is_jumbo = 0;
        u32 queue = skb_get_queue_mapping(skb);
        int nfrags = skb_shinfo(skb)->nr_frags;
-       unsigned int entry, first_entry;
+       int entry;
+       unsigned int first_entry;
        struct dma_desc *desc, *first;
        struct stmmac_tx_queue *tx_q;
        unsigned int enh_desc;
index 959fd12d2e670dfa52d7d9d11f835e990c82aa7c..6ebb0f559a427fdb4d27d9b668b46d7151650043 100644 (file)
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
 
        /* make enough headroom for basic scenario */
        encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
-       if (ip_tunnel_info_af(info) == AF_INET) {
+       if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
                encap_len += sizeof(struct iphdr);
                dev->max_mtu -= sizeof(struct iphdr);
        } else {
index 8c3633c1d0789718fc528b9873e8295e6ab6b09e..97e3bc60c3e7d111184f4c60ce4afe50757d1398 100644 (file)
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case HDLCDRVCTL_CALIBRATE:
                if(!capable(CAP_SYS_RAWIO))
                        return -EPERM;
+               if (s->par.bitrate <= 0)
+                       return -EINVAL;
                if (bi.data.calibrate > INT_MAX / s->par.bitrate)
                        return -EINVAL;
                s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
index 9097e42bec2e42d8ee864edea4a6be5d8c0a92cc..57297ba239871c631baef1dcfc2c30a66fdf3bba 100644 (file)
@@ -1127,8 +1127,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
                if (adv < 0)
                        return adv;
 
-               lpa &= adv;
-
                if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
                        phydev->duplex = DUPLEX_FULL;
                else
index 8e73f5f36e7120a5aa28b6e0dfb992eca3330e3e..f99c21f78b639fc1e6b984a20383f62021b757eb 100644 (file)
@@ -658,6 +658,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
        return 0;
 }
 
+static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       int rc;
+
+       /* Some devices have extra OF data and an OF-style MODALIAS */
+       rc = of_device_uevent_modalias(dev, env);
+       if (rc != -ENODEV)
+               return rc;
+
+       return 0;
+}
+
 #ifdef CONFIG_PM
 static int mdio_bus_suspend(struct device *dev)
 {
@@ -708,6 +720,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = {
 struct bus_type mdio_bus_type = {
        .name           = "mdio_bus",
        .match          = mdio_bus_match,
+       .uevent         = mdio_uevent,
        .pm             = MDIO_BUS_PM_OPS,
 };
 EXPORT_SYMBOL(mdio_bus_type);
index 6a5fd18f062c4ea400bc8036d787a94fe5108a34..b9252b8d81ffb720272ca5f0b25910c021eb28a3 100644 (file)
@@ -268,23 +268,12 @@ out:
        return ret;
 }
 
-static int kszphy_config_init(struct phy_device *phydev)
+/* Some config bits need to be set again on resume, handle them here. */
+static int kszphy_config_reset(struct phy_device *phydev)
 {
        struct kszphy_priv *priv = phydev->priv;
-       const struct kszphy_type *type;
        int ret;
 
-       if (!priv)
-               return 0;
-
-       type = priv->type;
-
-       if (type->has_broadcast_disable)
-               kszphy_broadcast_disable(phydev);
-
-       if (type->has_nand_tree_disable)
-               kszphy_nand_tree_disable(phydev);
-
        if (priv->rmii_ref_clk_sel) {
                ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
                if (ret) {
@@ -295,11 +284,30 @@ static int kszphy_config_init(struct phy_device *phydev)
        }
 
        if (priv->led_mode >= 0)
-               kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
+               kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode);
 
        return 0;
 }
 
+static int kszphy_config_init(struct phy_device *phydev)
+{
+       struct kszphy_priv *priv = phydev->priv;
+       const struct kszphy_type *type;
+
+       if (!priv)
+               return 0;
+
+       type = priv->type;
+
+       if (type->has_broadcast_disable)
+               kszphy_broadcast_disable(phydev);
+
+       if (type->has_nand_tree_disable)
+               kszphy_nand_tree_disable(phydev);
+
+       return kszphy_config_reset(phydev);
+}
+
 static int ksz8041_config_init(struct phy_device *phydev)
 {
        struct device_node *of_node = phydev->mdio.dev.of_node;
@@ -700,8 +708,14 @@ static int kszphy_suspend(struct phy_device *phydev)
 
 static int kszphy_resume(struct phy_device *phydev)
 {
+       int ret;
+
        genphy_resume(phydev);
 
+       ret = kszphy_config_reset(phydev);
+       if (ret)
+               return ret;
+
        /* Enable PHY Interrupts */
        if (phy_interrupt_is_valid(phydev)) {
                phydev->interrupts = PHY_INTERRUPT_ENABLED;
index 82ab8fb82587553fefc1d05bbff06d4a76bc9679..7524caa0f29d9806e11826c7ecfd57842bff1822 100644 (file)
@@ -241,7 +241,7 @@ static const struct phy_setting settings[] = {
  * phy_lookup_setting - lookup a PHY setting
  * @speed: speed to match
  * @duplex: duplex to match
- * @feature: allowed link modes
+ * @features: allowed link modes
  * @exact: an exact match is required
  *
  * Search the settings array for a setting that matches the speed and
index 3e9246cc49c3784ebc045868a53318d35bf01075..a871f45ecc79a438b2b43465d3719f240ff25cb5 100644 (file)
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
        unsigned int len;
 
        len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
-                               rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len);
+                               rq->min_buf_len, PAGE_SIZE - hdr_len);
        return ALIGN(len, L1_CACHE_BYTES);
 }
 
@@ -2144,7 +2144,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
        unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
        unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
 
-       return max(min_buf_len, hdr_len);
+       return max(max(min_buf_len, hdr_len) - hdr_len,
+                  (unsigned int)GOOD_PACKET_LEN);
 }
 
 static int virtnet_find_vqs(struct virtnet_info *vi)
index 328b4712683c334bf1de66a3a3789d0a04d734c3..a6b5052c1d36bb99260dd4232842fa9e8df2621c 100644 (file)
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
 
 static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
        struct list_head  vxlan_list;
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
        call_rcu(&f->rcu, vxlan_fdb_free);
 }
 
+static void vxlan_dst_free(struct rcu_head *head)
+{
+       struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
+
+       dst_cache_destroy(&rd->dst_cache);
+       kfree(rd);
+}
+
+static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+                                 struct vxlan_rdst *rd)
+{
+       list_del_rcu(&rd->list);
+       vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+       call_rcu(&rd->rcu, vxlan_dst_free);
+}
+
 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
                           union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
                           __be32 *vni, u32 *ifindex)
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
         * otherwise destroy the fdb entry
         */
        if (rd && !list_is_singular(&f->remotes)) {
-               list_del_rcu(&rd->list);
-               vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
-               kfree_rcu(rd, rcu);
+               vxlan_fdb_dst_destroy(vxlan, f, rd);
                goto out;
        }
 
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
        rcu_assign_pointer(vxlan->vn4_sock, NULL);
        synchronize_net();
 
+       vxlan_vs_del_dev(vxlan);
+
        if (__vxlan_sock_release_prep(sock4)) {
                udp_tunnel_sock_release(sock4->sock);
                kfree(sock4);
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg)
        mod_timer(&vxlan->age_timer, next_timer);
 }
 
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+       spin_lock(&vn->sock_lock);
+       hlist_del_init_rcu(&vxlan->hlist);
+       spin_unlock(&vn->sock_lock);
+}
+
 static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
 {
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 
        vxlan_flush(vxlan, true);
 
-       spin_lock(&vn->sock_lock);
-       if (!hlist_unhashed(&vxlan->hlist))
-               hlist_del_rcu(&vxlan->hlist);
-       spin_unlock(&vn->sock_lock);
-
        gro_cells_destroy(&vxlan->gro_cells);
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
index d5e993dc9b238c6fd306b801083346570eec8790..517a315e259b79f05f2d8d3e88c371a9f39ad9e9 100644 (file)
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
        qcom_smem_state_put(wcn->tx_enable_state);
        qcom_smem_state_put(wcn->tx_rings_empty_state);
 
+       rpmsg_destroy_ept(wcn->smd_channel);
+
        iounmap(wcn->dxe_base);
        iounmap(wcn->ccu_base);
 
index fc64b8913aa6a11c0111fec3b9d900174dc250c3..e03450059b06c0bfe510148f985c19668bcd3dff 100644 (file)
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
                /* otherwise, set txglomalign */
                value = sdiodev->settings->bus.sdio.sd_sgentry_align;
                /* SDIO ADMA requires at least 32 bit alignment */
-               value = max_t(u32, value, 4);
+               value = max_t(u32, value, ALIGNMENT);
                err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value,
                                           sizeof(u32));
        }
index 3b3e076571d6d7089ae2df09d2dfbd2008cf81e4..45e2efc70d19e5f44c7a5e2a1cde2cf9a7448f91 100644 (file)
@@ -79,8 +79,8 @@
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  17
 #define IWL7265_UCODE_API_MIN  17
-#define IWL7265D_UCODE_API_MIN 17
-#define IWL3168_UCODE_API_MIN  20
+#define IWL7265D_UCODE_API_MIN 22
+#define IWL3168_UCODE_API_MIN  22
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
index b9718c0cf17480dc4c1ab212fdbfabecb3125775..89137717c1fce778d63a55af99f16f970f3b34d6 100644 (file)
@@ -74,8 +74,8 @@
 #define IWL8265_UCODE_API_MAX  30
 
 /* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN  17
-#define IWL8265_UCODE_API_MIN  20
+#define IWL8000_UCODE_API_MIN  22
+#define IWL8265_UCODE_API_MIN  22
 
 /* NVM versions */
 #define IWL8000_NVM_VERSION            0x0a1d
index 306bc967742ee9a7b5629bab00f20cab77309fa1..77efbb78e867bfac47ae992c2703275e1b5062b2 100644 (file)
 #define MON_DMARB_RD_DATA_ADDR         (0xa03c5c)
 
 #define DBGC_IN_SAMPLE                 (0xa03c00)
+#define DBGC_OUT_CTRL                  (0xa03c0c)
 
 /* enable the ID buf for read */
 #define WFPM_PS_CTL_CLR                        0xA0300C
index 1b7d265ffb0acb476228b5b2a10040c09e41d61c..a10c6aae9ab98de7c752b05b9cc33059337b1410 100644 (file)
@@ -307,6 +307,11 @@ enum {
 /* Bit 1-3: LQ command color. Used to match responses to LQ commands */
 #define LQ_FLAG_COLOR_POS               1
 #define LQ_FLAG_COLOR_MSK               (7 << LQ_FLAG_COLOR_POS)
+#define LQ_FLAG_COLOR_GET(_f)          (((_f) & LQ_FLAG_COLOR_MSK) >>\
+                                        LQ_FLAG_COLOR_POS)
+#define LQ_FLAGS_COLOR_INC(_c)         ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
+                                        LQ_FLAG_COLOR_MSK)
+#define LQ_FLAG_COLOR_SET(_f, _c)      ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
 
 /* Bit 4-5: Tx RTS BW Signalling
  * (0) No RTS BW signalling
index 81b98915b1a42e21a318d293c459015688489c7f..1360ebfdc51bc6475c2c68301781b2296bb03369 100644 (file)
@@ -519,8 +519,11 @@ struct agg_tx_status {
  * bit-7 invalid rate indication
  */
 #define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_POS 4
 #define TX_RES_RATE_TABLE_COLOR_MSK 0x70
 #define TX_RES_INV_RATE_INDEX_MSK 0x80
+#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
+                                      TX_RES_RATE_TABLE_COLOR_POS)
 
 #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
 #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
index 7b86a4f1b574c6f507fbde87c1276241fd534a4a..c8712e6eea74187af9c0d8ef3a73622ab7d1d5fd 100644 (file)
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm,
        return 0;
 }
 
-static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm)
-{
-       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
-               iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
-       else
-               iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1);
-}
-
 int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
 {
        u8 *ptr;
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id)
        /* EARLY START - firmware's configuration is hard coded */
        if ((!mvm->fw->dbg_conf_tlv[conf_id] ||
             !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
-           conf_id == FW_DBG_START_FROM_ALIVE) {
-               iwl_mvm_restart_early_start(mvm);
+           conf_id == FW_DBG_START_FROM_ALIVE)
                return 0;
-       }
 
        if (!mvm->fw->dbg_conf_tlv[conf_id])
                return -EINVAL;
index 0f1831b419159b606967889ff8ea20c7ef86f0f2..fd2fc46e2fe51d8e8f1930af05e40ee9400c84a6 100644 (file)
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
                struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
                struct iwl_mac_beacon_cmd_v7 beacon_cmd;
        } u = {};
-       struct iwl_mac_beacon_cmd beacon_cmd;
+       struct iwl_mac_beacon_cmd beacon_cmd = {};
        struct ieee80211_tx_info *info;
        u32 beacon_skb_len;
        u32 rate, tx_flags;
index 4e74a6b90e70626d6e0e8bb8092796f078c7d55d..52f8d7a6a7dcec95d32089a11c7c9e37b53748a7 100644 (file)
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
  */
 static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
 {
+       u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE :
+               IWL_MVM_CMD_QUEUE;
+
        return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
-               ~BIT(IWL_MVM_CMD_QUEUE));
+               ~BIT(cmd_queue));
 }
 
 static inline
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
        if (!iwl_mvm_has_new_tx_api(mvm))
                iwl_free_fw_paging(mvm);
        mvm->ucode_loaded = false;
+       mvm->fw_dbg_conf = FW_DBG_INVALID;
        iwl_trans_stop_device(mvm->trans);
 }
 
index 9ffff6ed813386418800cf38906b91989c5b6f52..3da5ec40aaead90731224bce3686071ae9dd9344 100644 (file)
@@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
 
        mutex_lock(&mvm->mutex);
 
-       /* stop recording */
        if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               /* stop recording */
                iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+
+               iwl_mvm_fw_error_dump(mvm);
+
+               /* start recording again if the firmware is not crashed */
+               if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+                   mvm->fw->dbg_dest_tlv)
+                       iwl_clear_bits_prph(mvm->trans,
+                                           MON_BUFF_SAMPLE_CTL, 0x100);
        } else {
+               u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE);
+               u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL);
+
+               /* stop recording */
                iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-               /* wait before we collect the data till the DBGC stop */
                udelay(100);
-       }
+               iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0);
+               /* wait before we collect the data till the DBGC stop */
+               udelay(500);
 
-       iwl_mvm_fw_error_dump(mvm);
+               iwl_mvm_fw_error_dump(mvm);
 
-       /* start recording again if the firmware is not crashed */
-       WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) &&
-                    mvm->fw->dbg_dest_tlv &&
-                    iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf));
+               /* start recording again if the firmware is not crashed */
+               if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) &&
+                   mvm->fw->dbg_dest_tlv) {
+                       iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample);
+                       iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl);
+               }
+       }
 
        mutex_unlock(&mvm->mutex);
 
index 7788eefcd2bdd3066c0b40d5f5575e44f2102c0e..aa785cf3cf68399eb724b33d24d9168602f791a1 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
                rs_get_lower_rate_in_column(lq_sta, rate);
 }
 
-/* Check if both rates are identical
- * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B
- * with a rate indicating STBC/BFER and ANT_AB.
- */
-static inline bool rs_rate_equal(struct rs_rate *a,
-                                struct rs_rate *b,
-                                bool allow_ant_mismatch)
-
-{
-       bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) &&
-               (a->bfer == b->bfer);
-
-       if (allow_ant_mismatch) {
-               if (a->stbc || a->bfer) {
-                       WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d",
-                                 a->stbc, a->bfer, a->ant);
-                       ant_match |= (b->ant == ANT_A || b->ant == ANT_B);
-               } else if (b->stbc || b->bfer) {
-                       WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d",
-                                 b->stbc, b->bfer, b->ant);
-                       ant_match |= (a->ant == ANT_A || a->ant == ANT_B);
-               }
-       }
-
-       return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) &&
-               (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match;
-}
-
 /* Check if both rates share the same column */
 static inline bool rs_rate_column_match(struct rs_rate *a,
                                        struct rs_rate *b)
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u32 lq_hwrate;
        struct rs_rate lq_rate, tx_resp_rate;
        struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
-       u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0];
+       u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+       u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+       u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
        u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
-       bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa,
-                                            IWL_UCODE_TLV_API_LQ_SS_PARAMS);
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate);
 
        /* Here we actually compare this rate to the latest LQ command */
-       if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) {
+       if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
                IWL_DEBUG_RATE(mvm,
-                              "initial tx resp rate 0x%x does not match 0x%x\n",
-                              tx_resp_hwrate, lq_hwrate);
+                              "tx resp color 0x%x does not match 0x%x\n",
+                              lq_color, LQ_FLAG_COLOR_GET(table->flags));
 
                /*
                 * Since rates mis-match, the last LQ command may have failed.
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
        u8 valid_tx_ant = 0;
        struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
        bool toggle_ant = false;
+       u32 color;
 
        memcpy(&rate, initial_rate, sizeof(rate));
 
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
                                 num_rates, num_retries, valid_tx_ant,
                                 toggle_ant);
 
+       /* update the color of the LQ command (as a counter at bits 1-3) */
+       color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
+       lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
 }
 
 struct rs_bfer_active_iter_data {
index ee207f2c0a90c797e84659473f0bd1a455b00923..3abde1cb03034f9068230420072961371a187262 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -357,6 +358,20 @@ struct iwl_lq_sta {
        } pers;
 };
 
+/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
+ * Note, it's iwlmvm <-> mac80211 interface.
+ * bits 0-7: reduced tx power
+ * bits 8-10: LQ command's color
+ */
+#define RS_DRV_DATA_TXP_MSK 0xff
+#define RS_DRV_DATA_LQ_COLOR_POS 8
+#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
+                                     RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
+                                 (((uintptr_t)_p) |\
+                                  ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
+
 /* Initialize station's rate scaling information after adding station */
 void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                          enum nl80211_band band, bool init);
index f5c786ddc52631087b56067807bb4d48e0869664..614d67810d051c539bd093b9452c64dc4e9c8a2f 100644 (file)
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (!iwl_mvm_is_dqa_supported(mvm))
                return 0;
 
-       if (WARN_ON(vif->type != NL80211_IFTYPE_AP))
+       if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+                   vif->type != NL80211_IFTYPE_ADHOC))
                return -ENOTSUPP;
 
        /*
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                mvmvif->cab_queue = queue;
        } else if (!fw_has_api(&mvm->fw->ucode_capa,
                               IWL_UCODE_TLV_API_STA_TYPE)) {
+               /*
+                * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+                * invalid, so make sure we use the queue we want.
+                * Note that this is done here as we want to avoid making DQA
+                * changes in mac80211 layer.
+                */
+               if (vif->type == NL80211_IFTYPE_ADHOC) {
+                       vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+                       mvmvif->cab_queue = vif->cab_queue;
+               }
                iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
                                   &cfg, timeout);
        }
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 
        /* Get the station from the mvm local station table */
        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
-       if (!mvm_sta) {
-               IWL_ERR(mvm, "Failed to find station\n");
-               return -EINVAL;
-       }
-       sta_id = mvm_sta->sta_id;
+       if (mvm_sta)
+               sta_id = mvm_sta->sta_id;
 
        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
                      keyconf->keyidx, sta_id);
 
-       if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-           keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+       if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+                       keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+                       keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
                return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
 
        if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
index 2716cb5483bf5ab9851e7dbf1e5af04b887296e7..ad62b67dceb2836cefe354fd69af60aedc856f8e 100644 (file)
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state {
  *     This is basically (last acked packet++).
  * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
  *     Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
  * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
  * @state: state of the BA agreement establishment / tear down.
  * @txq_id: Tx queue used by the BA session / DQA
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data {
        u16 next_reclaimed;
        /* The rest is Tx AGG related */
        u32 rate_n_flags;
+       u8 lq_color;
        bool amsdu_in_ampdu_allowed;
        enum iwl_mvm_agg_state state;
        u16 txq_id;
index f9cbd197246f7ba6ba9e5e0af816cbec54eb9790..506d58104e1cc007ba9d767a16dd77f9df8f0481 100644 (file)
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
        struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
        int ret;
 
-       if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
-               return -EIO;
-
        mutex_lock(&mvm->mutex);
 
+       if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) {
+               ret = -EIO;
+               goto unlock;
+       }
+
        if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
                ret = -EINVAL;
                goto unlock;
index bcaceb64a6e8c230c5127ee5cc4b790b1f4f1d57..f21901cd4a4fdf75dac1b55d7b9525e84f9e65dd 100644 (file)
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
        struct iwl_mvm_sta *mvmsta;
        struct sk_buff_head skbs;
        u8 skb_freed = 0;
+       u8 lq_color;
        u16 next_reclaimed, seq_ctl;
        bool is_ndp = false;
 
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                info->status.tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
                BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+               lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
                info->status.status_driver_data[0] =
-                               (void *)(uintptr_t)tx_resp->reduced_tpc;
+                       RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
 
                ieee80211_tx_status(mvm->hw, skb);
        }
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                        le32_to_cpu(tx_resp->initial_rate);
                mvmsta->tid_data[tid].tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
+               mvmsta->tid_data[tid].lq_color =
+                       (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >>
+                       TX_RES_RATE_TABLE_COLOR_POS;
        }
 
        rcu_read_unlock();
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
        iwl_mvm_check_ratid_empty(mvm, sta, tid);
 
        freed = 0;
+
+       /* pack lq color from tid_data along the reduced txp */
+       ba_info->status.status_driver_data[0] =
+               RS_DRV_DATA_PACK(tid_data->lq_color,
+                                ba_info->status.status_driver_data[0]);
        ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
 
        skb_queue_walk(&reclaimed_skbs, skb) {
index 70acf850a9f19f9750296ae2019d8f4a8c277b67..93cbc7a69bcd55d3560529c88b6a127451606cd7 100644 (file)
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data
 #ifdef CONFIG_PM_SLEEP
 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 {
-       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+           (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
                return iwl_pci_fw_enter_d0i3(trans);
 
        return 0;
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
 
 static void iwl_trans_pcie_resume(struct iwl_trans *trans)
 {
-       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+           (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
                iwl_pci_fw_exit_d0i3(trans);
 }
 #endif /* CONFIG_PM_SLEEP */
index 9fb46a6f47cf416e55d4416b228be50cdd460e4e..9c9bfbbabdf11ee597dfa7a3614e59d2e49236fe 100644 (file)
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
 
        if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
                ret = -EINVAL;
-               goto error;
+               goto error_free_resp;
        }
 
        rsp = (void *)hcmd.resp_pkt->data;
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
        if (qid > ARRAY_SIZE(trans_pcie->txq)) {
                WARN_ONCE(1, "queue index %d unsupported", qid);
                ret = -EIO;
-               goto error;
+               goto error_free_resp;
        }
 
        if (test_and_set_bit(qid, trans_pcie->queue_used)) {
                WARN_ONCE(1, "queue %d already used", qid);
                ret = -EIO;
-               goto error;
+               goto error_free_resp;
        }
 
        txq->id = qid;
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
                           (txq->write_ptr) | (qid << 16));
        IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
 
+       iwl_free_resp(&hcmd);
        return qid;
 
+error_free_resp:
+       iwl_free_resp(&hcmd);
 error:
        iwl_pcie_gen2_txq_free_memory(trans, txq);
        return ret;
index 822198a75e96a599a7c9c55e14f61164bf35ba77..79eb9fb358d5315c677d580572866518b0c74f57 100644 (file)
@@ -186,7 +186,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
         * another kernel subsystem, and we just pass it through.
         */
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                goto out;
        }
 
@@ -205,7 +205,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
                                        "io error in %s sector %lld, len %d,\n",
                                        (rw == READ) ? "READ" : "WRITE",
                                        (unsigned long long) iter.bi_sector, len);
-                       bio->bi_error = err;
+                       bio->bi_status = errno_to_blk_status(err);
                        break;
                }
        }
index 983718b8fd9b44f9ab5af4dff0129fccc085689d..31b2d14e210d5ee139c0e934ce3a9d4de021a9a0 100644 (file)
@@ -1210,7 +1210,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
         * another kernel subsystem, and we just pass it through.
         */
        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                goto out;
        }
 
@@ -1232,7 +1232,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
                                        (op_is_write(bio_op(bio))) ? "WRITE" :
                                        "READ",
                                        (unsigned long long) iter.bi_sector, len);
-                       bio->bi_error = err;
+                       bio->bi_status = errno_to_blk_status(err);
                        break;
                }
        }
index c544d466ea51071a3c09a53544df61d8a1bae759..7bd383aeea14df8c95254aa3b440a81319d72461 100644 (file)
@@ -49,19 +49,19 @@ static struct nd_region *to_region(struct pmem_device *pmem)
        return to_nd_region(to_dev(pmem)->parent);
 }
 
-static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
-               unsigned int len)
+static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
+               phys_addr_t offset, unsigned int len)
 {
        struct device *dev = to_dev(pmem);
        sector_t sector;
        long cleared;
-       int rc = 0;
+       blk_status_t rc = BLK_STS_OK;
 
        sector = (offset - pmem->data_offset) / 512;
 
        cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
        if (cleared < len)
-               rc = -EIO;
+               rc = BLK_STS_IOERR;
        if (cleared > 0 && cleared / 512) {
                cleared /= 512;
                dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
@@ -84,7 +84,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
        kunmap_atomic(mem);
 }
 
-static int read_pmem(struct page *page, unsigned int off,
+static blk_status_t read_pmem(struct page *page, unsigned int off,
                void *pmem_addr, unsigned int len)
 {
        int rc;
@@ -93,15 +93,15 @@ static int read_pmem(struct page *page, unsigned int off,
        rc = memcpy_mcsafe(mem + off, pmem_addr, len);
        kunmap_atomic(mem);
        if (rc)
-               return -EIO;
-       return 0;
+               return BLK_STS_IOERR;
+       return BLK_STS_OK;
 }
 
-static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
+static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
                        unsigned int len, unsigned int off, bool is_write,
                        sector_t sector)
 {
-       int rc = 0;
+       blk_status_t rc = BLK_STS_OK;
        bool bad_pmem = false;
        phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
        void *pmem_addr = pmem->virt_addr + pmem_off;
@@ -111,7 +111,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 
        if (!is_write) {
                if (unlikely(bad_pmem))
-                       rc = -EIO;
+                       rc = BLK_STS_IOERR;
                else {
                        rc = read_pmem(page, off, pmem_addr, len);
                        flush_dcache_page(page);
@@ -149,7 +149,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
 
 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
 {
-       int rc = 0;
+       blk_status_t rc = 0;
        bool do_acct;
        unsigned long start;
        struct bio_vec bvec;
@@ -166,7 +166,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
                                bvec.bv_offset, op_is_write(bio_op(bio)),
                                iter.bi_sector);
                if (rc) {
-                       bio->bi_error = rc;
+                       bio->bi_status = rc;
                        break;
                }
        }
@@ -184,7 +184,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
                       struct page *page, bool is_write)
 {
        struct pmem_device *pmem = bdev->bd_queue->queuedata;
-       int rc;
+       blk_status_t rc;
 
        rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
 
@@ -197,7 +197,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
        if (rc == 0)
                page_endio(page, is_write, 0);
 
-       return rc;
+       return blk_status_to_errno(rc);
 }
 
 /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
index a60926410438b98c2e414de081f7c8093bac5862..032cce3311e747a8696706946990e7550632eea3 100644 (file)
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(max_retries, "max number of retries a command may have");
 static int nvme_char_major;
 module_param(nvme_char_major, int, 0);
 
-static unsigned long default_ps_max_latency_us = 25000;
+static unsigned long default_ps_max_latency_us = 100000;
 module_param(default_ps_max_latency_us, ulong, 0644);
 MODULE_PARM_DESC(default_ps_max_latency_us,
                 "max power saving latency for new devices; use PM QOS to change per device");
@@ -70,29 +70,21 @@ static DEFINE_SPINLOCK(dev_list_lock);
 
 static struct class *nvme_class;
 
-static int nvme_error_status(struct request *req)
+static blk_status_t nvme_error_status(struct request *req)
 {
        switch (nvme_req(req)->status & 0x7ff) {
        case NVME_SC_SUCCESS:
-               return 0;
+               return BLK_STS_OK;
        case NVME_SC_CAP_EXCEEDED:
-               return -ENOSPC;
-       default:
-               return -EIO;
-
-       /*
-        * XXX: these errors are a nasty side-band protocol to
-        * drivers/md/dm-mpath.c:noretry_error() that aren't documented
-        * anywhere..
-        */
-       case NVME_SC_CMD_SEQ_ERROR:
-               return -EILSEQ;
+               return BLK_STS_NOSPC;
        case NVME_SC_ONCS_NOT_SUPPORTED:
-               return -EOPNOTSUPP;
+               return BLK_STS_NOTSUPP;
        case NVME_SC_WRITE_FAULT:
        case NVME_SC_READ_ERROR:
        case NVME_SC_UNWRITTEN_BLOCK:
-               return -ENODATA;
+               return BLK_STS_MEDIUM;
+       default:
+               return BLK_STS_IOERR;
        }
 }
 
@@ -291,7 +283,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
        cmnd->common.nsid = cpu_to_le32(ns->ns_id);
 }
 
-static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
+static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmnd)
 {
        unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
@@ -300,7 +292,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
        range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
        if (!range)
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
 
        __rq_for_each_bio(bio, req) {
                u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
@@ -314,7 +306,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 
        if (WARN_ON_ONCE(n != segments)) {
                kfree(range);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        memset(cmnd, 0, sizeof(*cmnd));
@@ -328,7 +320,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
        req->special_vec.bv_len = sizeof(*range) * segments;
        req->rq_flags |= RQF_SPECIAL_PAYLOAD;
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -372,10 +364,10 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 }
 
-int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd)
 {
-       int ret = BLK_MQ_RQ_QUEUE_OK;
+       blk_status_t ret = BLK_STS_OK;
 
        if (!(req->rq_flags & RQF_DONTPREP)) {
                nvme_req(req)->retries = 0;
@@ -402,7 +394,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                break;
        default:
                WARN_ON_ONCE(1);
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 
        cmd->common.command_id = req->tag;
@@ -555,15 +547,16 @@ int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                        result, timeout);
 }
 
-static void nvme_keep_alive_end_io(struct request *rq, int error)
+static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
 {
        struct nvme_ctrl *ctrl = rq->end_io_data;
 
        blk_mq_free_request(rq);
 
-       if (error) {
+       if (status) {
                dev_err(ctrl->device,
-                       "failed nvme_keep_alive_end_io error=%d\n", error);
+                       "failed nvme_keep_alive_end_io error=%d\n",
+                               status);
                return;
        }
 
@@ -1342,7 +1335,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
         * transitioning between power states.  Therefore, when running
         * in any given state, we will enter the next lower-power
         * non-operational state after waiting 50 * (enlat + exlat)
-        * microseconds, as long as that state's total latency is under
+        * microseconds, as long as that state's exit latency is under
         * the requested maximum latency.
         *
         * We will not autonomously enter any non-operational state for
@@ -1387,7 +1380,7 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                 * lowest-power state, not the number of states.
                 */
                for (state = (int)ctrl->npss; state >= 0; state--) {
-                       u64 total_latency_us, transition_ms;
+                       u64 total_latency_us, exit_latency_us, transition_ms;
 
                        if (target)
                                table->entries[state] = target;
@@ -1408,12 +1401,15 @@ static void nvme_configure_apst(struct nvme_ctrl *ctrl)
                              NVME_PS_FLAGS_NON_OP_STATE))
                                continue;
 
-                       total_latency_us =
-                               (u64)le32_to_cpu(ctrl->psd[state].entry_lat) +
-                               + le32_to_cpu(ctrl->psd[state].exit_lat);
-                       if (total_latency_us > ctrl->ps_max_latency_us)
+                       exit_latency_us =
+                               (u64)le32_to_cpu(ctrl->psd[state].exit_lat);
+                       if (exit_latency_us > ctrl->ps_max_latency_us)
                                continue;
 
+                       total_latency_us =
+                               exit_latency_us +
+                               le32_to_cpu(ctrl->psd[state].entry_lat);
+
                        /*
                         * This state is good.  Use it as the APST idle
                         * target for higher power states.
@@ -2438,6 +2434,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
        struct nvme_ns *ns;
 
        mutex_lock(&ctrl->namespaces_mutex);
+
+       /* Forcibly start all queues to avoid having stuck requests */
+       blk_mq_start_hw_queues(ctrl->admin_q);
+
        list_for_each_entry(ns, &ctrl->namespaces, list) {
                /*
                 * Revalidating a dead namespace sets capacity to 0. This will
index 96b983bb44bd166bc3a6406d8d64edc3ac3fd608..1df653ae3638a5d764c9699eb554c85ac182fd4f 100644 (file)
@@ -1138,6 +1138,7 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
 /* *********************** NVME Ctrl Routines **************************** */
 
 static void __nvme_fc_final_op_cleanup(struct request *rq);
+static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
 
 static int
 nvme_fc_reinit_request(void *data, struct request *rq)
@@ -1264,7 +1265,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
        struct nvme_command *sqe = &op->cmd_iu.sqe;
        __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
        union nvme_result result;
-       bool complete_rq;
+       bool complete_rq, terminate_assoc = true;
 
        /*
         * WARNING:
@@ -1293,6 +1294,14 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
         * fabricate a CQE, the following fields will not be set as they
         * are not referenced:
         *      cqe.sqid,  cqe.sqhd,  cqe.command_id
+        *
+        * Failure or error of an individual i/o, in a transport
+        * detected fashion unrelated to the nvme completion status,
+        * potentially cause the initiator and target sides to get out
+        * of sync on SQ head/tail (aka outstanding io count allowed).
+        * Per FC-NVME spec, failure of an individual command requires
+        * the connection to be terminated, which in turn requires the
+        * association to be terminated.
         */
 
        fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
@@ -1358,6 +1367,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
                goto done;
        }
 
+       terminate_assoc = false;
+
 done:
        if (op->flags & FCOP_FLAGS_AEN) {
                nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
@@ -1365,7 +1376,7 @@ done:
                atomic_set(&op->state, FCPOP_STATE_IDLE);
                op->flags = FCOP_FLAGS_AEN;     /* clear other flags */
                nvme_fc_ctrl_put(ctrl);
-               return;
+               goto check_error;
        }
 
        complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
@@ -1378,6 +1389,10 @@ done:
                nvme_end_request(rq, status, result);
        } else
                __nvme_fc_final_op_cleanup(rq);
+
+check_error:
+       if (terminate_assoc)
+               nvme_fc_error_recovery(ctrl, "transport detected io error");
 }
 
 static int
@@ -1872,7 +1887,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  * level FC exchange resource that is also outstanding. This must be
  * considered in all cleanup operations.
  */
-static int
+static blk_status_t
 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
        struct nvme_fc_fcp_op *op, u32 data_len,
        enum nvmefc_fcp_datadir io_dir)
@@ -1887,10 +1902,10 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
         * the target device is present
         */
        if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
 
        if (!nvme_fc_ctrl_get(ctrl))
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
 
        /* format the FC-NVME CMD IU and fcp_req */
        cmdiu->connection_id = cpu_to_be64(queue->connection_id);
@@ -1938,8 +1953,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
                if (ret < 0) {
                        nvme_cleanup_cmd(op->rq);
                        nvme_fc_ctrl_put(ctrl);
-                       return (ret == -ENOMEM || ret == -EAGAIN) ?
-                               BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+                       if (ret == -ENOMEM || ret == -EAGAIN)
+                               return BLK_STS_RESOURCE;
+                       return BLK_STS_IOERR;
                }
        }
 
@@ -1965,19 +1981,19 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
                nvme_fc_ctrl_put(ctrl);
 
                if (ret != -EBUSY)
-                       return BLK_MQ_RQ_QUEUE_ERROR;
+                       return BLK_STS_IOERR;
 
                if (op->rq) {
                        blk_mq_stop_hw_queues(op->rq->q);
                        blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
                }
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
-static int
+static blk_status_t
 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
                        const struct blk_mq_queue_data *bd)
 {
@@ -1990,7 +2006,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_command *sqe = &cmdiu->sqe;
        enum nvmefc_fcp_datadir io_dir;
        u32 data_len;
-       int ret;
+       blk_status_t ret;
 
        ret = nvme_setup_cmd(ns, rq, sqe);
        if (ret)
@@ -2045,7 +2061,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
        struct nvme_fc_fcp_op *aen_op;
        unsigned long flags;
        bool terminating = false;
-       int ret;
+       blk_status_t ret;
 
        if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
                return;
@@ -2790,6 +2806,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
                ctrl->ctrl.opts = NULL;
                /* initiate nvme ctrl ref counting teardown */
                nvme_uninit_ctrl(&ctrl->ctrl);
+               nvme_put_ctrl(&ctrl->ctrl);
 
                /* as we're past the point where we transition to the ref
                 * counting teardown path, if we return a bad pointer here,
index f5df78ed1e10974ffb9e239b57ce260f0bb5bae9..2d7a2889866f09234874ac1a9b133dec5054ca29 100644 (file)
@@ -480,7 +480,7 @@ static inline void nvme_nvm_rqtocmd(struct nvm_rq *rqd, struct nvme_ns *ns,
                                        rqd->bio->bi_iter.bi_sector));
 }
 
-static void nvme_nvm_end_io(struct request *rq, int error)
+static void nvme_nvm_end_io(struct request *rq, blk_status_t status)
 {
        struct nvm_rq *rqd = rq->end_io_data;
 
@@ -571,13 +571,6 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = {
        .max_phys_sect          = 64,
 };
 
-static void nvme_nvm_end_user_vio(struct request *rq, int error)
-{
-       struct completion *waiting = rq->end_io_data;
-
-       complete(waiting);
-}
-
 static int nvme_nvm_submit_user_cmd(struct request_queue *q,
                                struct nvme_ns *ns,
                                struct nvme_nvm_command *vcmd,
@@ -608,7 +601,6 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
        rq->timeout = timeout ? timeout : ADMIN_TIMEOUT;
 
        rq->cmd_flags &= ~REQ_FAILFAST_DRIVER;
-       rq->end_io_data = &wait;
 
        if (ppa_buf && ppa_len) {
                ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma);
@@ -662,9 +654,7 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
        }
 
 submit:
-       blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_user_vio);
-
-       wait_for_completion_io(&wait);
+       blk_execute_rq(q, NULL, rq, 0);
 
        if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
                ret = -EINTR;
index 9d6a070d43914dcc5388b2a7a03f477a00b8bd1f..22ee60b2a3e8501c5becdacd62ace48559e561b4 100644 (file)
@@ -296,7 +296,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, unsigned int flags, int qid);
-int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
+blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                void *buf, unsigned bufflen);
index d52701df72457d0fa2b85a168c500fd022b8b717..f4b6ed9bccd0285ce0349a00c995c1bdce010cf1 100644 (file)
@@ -427,7 +427,7 @@ static __le64 **iod_list(struct request *req)
        return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
-static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
+static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
        int nseg = blk_rq_nr_phys_segments(rq);
@@ -436,7 +436,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
                iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
                if (!iod->sg)
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+                       return BLK_STS_RESOURCE;
        } else {
                iod->sg = iod->inline_sg;
        }
@@ -446,7 +446,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        iod->nents = 0;
        iod->length = size;
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -616,21 +616,21 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
        return true;
 }
 
-static int nvme_map_data(struct nvme_dev *dev, struct request *req,
+static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                struct nvme_command *cmnd)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct request_queue *q = req->q;
        enum dma_data_direction dma_dir = rq_data_dir(req) ?
                        DMA_TO_DEVICE : DMA_FROM_DEVICE;
-       int ret = BLK_MQ_RQ_QUEUE_ERROR;
+       blk_status_t ret = BLK_STS_IOERR;
 
        sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
        iod->nents = blk_rq_map_sg(q, req, iod->sg);
        if (!iod->nents)
                goto out;
 
-       ret = BLK_MQ_RQ_QUEUE_BUSY;
+       ret = BLK_STS_RESOURCE;
        if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
                                DMA_ATTR_NO_WARN))
                goto out;
@@ -638,7 +638,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
        if (!nvme_setup_prps(dev, req))
                goto out_unmap;
 
-       ret = BLK_MQ_RQ_QUEUE_ERROR;
+       ret = BLK_STS_IOERR;
        if (blk_integrity_rq(req)) {
                if (blk_rq_count_integrity_sg(q, req->bio) != 1)
                        goto out_unmap;
@@ -658,7 +658,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
        cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
        if (blk_integrity_rq(req))
                cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 
 out_unmap:
        dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
@@ -688,7 +688,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
 /*
  * NOTE: ns is NULL when called on the admin queue.
  */
-static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct nvme_ns *ns = hctx->queue->queuedata;
@@ -696,7 +696,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_dev *dev = nvmeq->dev;
        struct request *req = bd->rq;
        struct nvme_command cmnd;
-       int ret = BLK_MQ_RQ_QUEUE_OK;
+       blk_status_t ret = BLK_STS_OK;
 
        /*
         * If formated with metadata, require the block layer provide a buffer
@@ -705,38 +705,36 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
         */
        if (ns && ns->ms && !blk_integrity_rq(req)) {
                if (!(ns->pi_type && ns->ms == 8) &&
-                   !blk_rq_is_passthrough(req)) {
-                       blk_mq_end_request(req, -EFAULT);
-                       return BLK_MQ_RQ_QUEUE_OK;
-               }
+                   !blk_rq_is_passthrough(req))
+                       return BLK_STS_NOTSUPP;
        }
 
        ret = nvme_setup_cmd(ns, req, &cmnd);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                return ret;
 
        ret = nvme_init_iod(req, dev);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                goto out_free_cmd;
 
-       if (blk_rq_nr_phys_segments(req))
+       if (blk_rq_nr_phys_segments(req)) {
                ret = nvme_map_data(dev, req, &cmnd);
-
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
-               goto out_cleanup_iod;
+               if (ret)
+                       goto out_cleanup_iod;
+       }
 
        blk_mq_start_request(req);
 
        spin_lock_irq(&nvmeq->q_lock);
        if (unlikely(nvmeq->cq_vector < 0)) {
-               ret = BLK_MQ_RQ_QUEUE_ERROR;
+               ret = BLK_STS_IOERR;
                spin_unlock_irq(&nvmeq->q_lock);
                goto out_cleanup_iod;
        }
        __nvme_submit_cmd(nvmeq, &cmnd);
        nvme_process_cq(nvmeq);
        spin_unlock_irq(&nvmeq->q_lock);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 out_cleanup_iod:
        nvme_free_iod(dev, req);
 out_free_cmd:
@@ -939,7 +937,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
        return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
 }
 
-static void abort_endio(struct request *req, int error)
+static void abort_endio(struct request *req, blk_status_t error)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = iod->nvmeq;
@@ -1367,7 +1365,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
        bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO);
 
        /* If there is a reset ongoing, we shouldn't reset again. */
-       if (work_busy(&dev->reset_work))
+       if (dev->ctrl.state == NVME_CTRL_RESETTING)
                return false;
 
        /* We shouldn't reset unless the controller is on fatal error state
@@ -1586,7 +1584,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        return nvme_create_io_queues(dev);
 }
 
-static void nvme_del_queue_end(struct request *req, int error)
+static void nvme_del_queue_end(struct request *req, blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
@@ -1594,7 +1592,7 @@ static void nvme_del_queue_end(struct request *req, int error)
        complete(&nvmeq->dev->ioq_wait);
 }
 
-static void nvme_del_cq_end(struct request *req, int error)
+static void nvme_del_cq_end(struct request *req, blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
 
@@ -1903,7 +1901,7 @@ static void nvme_reset_work(struct work_struct *work)
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result = -ENODEV;
 
-       if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING))
+       if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
                goto out;
 
        /*
@@ -1913,9 +1911,6 @@ static void nvme_reset_work(struct work_struct *work)
        if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
                nvme_dev_disable(dev, false);
 
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
-               goto out;
-
        result = nvme_pci_enable(dev);
        if (result)
                goto out;
@@ -2009,8 +2004,8 @@ static int nvme_reset(struct nvme_dev *dev)
 {
        if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
                return -ENODEV;
-       if (work_busy(&dev->reset_work))
-               return -ENODEV;
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+               return -EBUSY;
        if (!queue_work(nvme_workq, &dev->reset_work))
                return -EBUSY;
        return 0;
@@ -2136,6 +2131,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release_pools;
 
+       nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING);
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
        queue_work(nvme_workq, &dev->reset_work);
@@ -2179,6 +2175,7 @@ static void nvme_remove(struct pci_dev *pdev)
 
        nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
 
+       cancel_work_sync(&dev->reset_work);
        pci_set_drvdata(pdev, NULL);
 
        if (!pci_device_is_present(pdev)) {
index 28bd255c144dcca10aa60cede2c9a51cd101426a..e84a74479dd8fee8572b7aa07541a56a0a021ddc 100644 (file)
@@ -753,28 +753,26 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        if (ret)
                goto requeue;
 
-       blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
-
        ret = nvmf_connect_admin_queue(&ctrl->ctrl);
        if (ret)
-               goto stop_admin_q;
+               goto requeue;
 
        set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags);
 
        ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
        if (ret)
-               goto stop_admin_q;
+               goto requeue;
 
        nvme_start_keep_alive(&ctrl->ctrl);
 
        if (ctrl->queue_count > 1) {
                ret = nvme_rdma_init_io_queues(ctrl);
                if (ret)
-                       goto stop_admin_q;
+                       goto requeue;
 
                ret = nvme_rdma_connect_io_queues(ctrl);
                if (ret)
-                       goto stop_admin_q;
+                       goto requeue;
        }
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
@@ -782,7 +780,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        ctrl->ctrl.opts->nr_reconnects = 0;
 
        if (ctrl->queue_count > 1) {
-               nvme_start_queues(&ctrl->ctrl);
                nvme_queue_scan(&ctrl->ctrl);
                nvme_queue_async_events(&ctrl->ctrl);
        }
@@ -791,8 +788,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
 
        return;
 
-stop_admin_q:
-       blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
                        ctrl->ctrl.opts->nr_reconnects);
@@ -823,6 +818,13 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
 
+       /*
+        * queues are not a live anymore, so restart the queues to fail fast
+        * new IO
+        */
+       blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
+       nvme_start_queues(&ctrl->ctrl);
+
        nvme_rdma_reconnect_or_remove(ctrl);
 }
 
@@ -1433,22 +1435,32 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
 /*
  * We cannot accept any other command until the Connect command has completed.
  */
-static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
-               struct request *rq)
+static inline blk_status_t
+nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
 {
        if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
                struct nvme_command *cmd = nvme_req(rq)->cmd;
 
                if (!blk_rq_is_passthrough(rq) ||
                    cmd->common.opcode != nvme_fabrics_command ||
-                   cmd->fabrics.fctype != nvme_fabrics_type_connect)
-                       return false;
+                   cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+                       /*
+                        * reconnecting state means transport disruption, which
+                        * can take a long time and even might fail permanently,
+                        * so we can't let incoming I/O be requeued forever.
+                        * fail it fast to allow upper layers a chance to
+                        * failover.
+                        */
+                       if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING)
+                               return BLK_STS_IOERR;
+                       return BLK_STS_RESOURCE; /* try again later */
+               }
        }
 
-       return true;
+       return 0;
 }
 
-static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct nvme_ns *ns = hctx->queue->queuedata;
@@ -1459,27 +1471,29 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_command *c = sqe->data;
        bool flush = false;
        struct ib_device *dev;
-       int ret;
+       blk_status_t ret;
+       int err;
 
        WARN_ON_ONCE(rq->tag < 0);
 
-       if (!nvme_rdma_queue_is_ready(queue, rq))
-               return BLK_MQ_RQ_QUEUE_BUSY;
+       ret = nvme_rdma_queue_is_ready(queue, rq);
+       if (unlikely(ret))
+               return ret;
 
        dev = queue->device->dev;
        ib_dma_sync_single_for_cpu(dev, sqe->dma,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
        ret = nvme_setup_cmd(ns, rq, c);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                return ret;
 
        blk_mq_start_request(rq);
 
-       ret = nvme_rdma_map_data(queue, rq, c);
-       if (ret < 0) {
+       err = nvme_rdma_map_data(queue, rq, c);
+       if (err < 0) {
                dev_err(queue->ctrl->ctrl.device,
-                            "Failed to map data (%d)\n", ret);
+                            "Failed to map data (%d)\n", err);
                nvme_cleanup_cmd(rq);
                goto err;
        }
@@ -1489,17 +1503,18 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (req_op(rq) == REQ_OP_FLUSH)
                flush = true;
-       ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
+       err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
                        req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
-       if (ret) {
+       if (err) {
                nvme_rdma_unmap_data(queue, rq);
                goto err;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 err:
-       return (ret == -ENOMEM || ret == -EAGAIN) ?
-               BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
+       if (err == -ENOMEM || err == -EAGAIN)
+               return BLK_STS_RESOURCE;
+       return BLK_STS_IOERR;
 }
 
 static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
index c77940d80fc8e7386e3e968efc4be058d1abfab0..40128793e61350f59c2bb136e91efcb6e83dc649 100644 (file)
@@ -21,7 +21,7 @@ static void nvmet_bio_done(struct bio *bio)
        struct nvmet_req *req = bio->bi_private;
 
        nvmet_req_complete(req,
-               bio->bi_error ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+               bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
 
        if (bio != &req->inline_bio)
                bio_put(bio);
@@ -145,7 +145,7 @@ static void nvmet_execute_discard(struct nvmet_req *req)
                bio->bi_private = req;
                bio->bi_end_io = nvmet_bio_done;
                if (status) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                } else {
                        submit_bio(bio);
index e503cfff03372fb9cc7605c800743dd4c5891318..db8ebadf885b9d8f139bd44d4b4f641028f569bc 100644 (file)
@@ -159,17 +159,17 @@ nvme_loop_timeout(struct request *rq, bool reserved)
        return BLK_EH_HANDLED;
 }
 
-static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
        struct nvme_ns *ns = hctx->queue->queuedata;
        struct nvme_loop_queue *queue = hctx->driver_data;
        struct request *req = bd->rq;
        struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
-       int ret;
+       blk_status_t ret;
 
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret)
                return ret;
 
        iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -179,16 +179,15 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                nvme_cleanup_cmd(req);
                blk_mq_start_request(req);
                nvme_loop_queue_response(&iod->req);
-               return BLK_MQ_RQ_QUEUE_OK;
+               return BLK_STS_OK;
        }
 
        if (blk_rq_bytes(req)) {
                iod->sg_table.sgl = iod->first_sgl;
-               ret = sg_alloc_table_chained(&iod->sg_table,
+               if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
-                               iod->sg_table.sgl);
-               if (ret)
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+                               iod->sg_table.sgl))
+                       return BLK_STS_RESOURCE;
 
                iod->req.sg = iod->sg_table.sgl;
                iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
@@ -197,7 +196,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_mq_start_request(req);
 
        schedule_work(&iod->work);
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 }
 
 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
index 9416d052cb89474e811d8c4e8ca5ca3a81732fbb..28c38c756f92858906ca2aee54a9c922522f9253 100644 (file)
@@ -144,8 +144,8 @@ int of_dma_configure(struct device *dev, struct device_node *np)
                coherent ? " " : " not ");
 
        iommu = of_iommu_configure(dev, np);
-       if (IS_ERR(iommu))
-               return PTR_ERR(iommu);
+       if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
 
        dev_dbg(dev, "device is%sbehind an iommu\n",
                iommu ? " " : " not ");
index 727e23be7cac40cfff7ac7103fcf75e6e176c420..78ca62897784abf2704e5f8748155c8e8995515e 100644 (file)
@@ -844,7 +844,7 @@ static int qcom_qmp_phy_vreg_init(struct device *dev)
        int num = qmp->cfg->num_vregs;
        int i;
 
-       qmp->vregs = devm_kcalloc(dev, num, sizeof(qmp->vregs), GFP_KERNEL);
+       qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
        if (!qmp->vregs)
                return -ENOMEM;
 
@@ -983,16 +983,16 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id)
         * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
         */
        qphy->tx = of_iomap(np, 0);
-       if (IS_ERR(qphy->tx))
-               return PTR_ERR(qphy->tx);
+       if (!qphy->tx)
+               return -ENOMEM;
 
        qphy->rx = of_iomap(np, 1);
-       if (IS_ERR(qphy->rx))
-               return PTR_ERR(qphy->rx);
+       if (!qphy->rx)
+               return -ENOMEM;
 
        qphy->pcs = of_iomap(np, 2);
-       if (IS_ERR(qphy->pcs))
-               return PTR_ERR(qphy->pcs);
+       if (!qphy->pcs)
+               return -ENOMEM;
 
        /*
         * Get PHY's Pipe clock, if any. USB3 and PCIe are PIPE3
index 2de1e603bd2b1d60afdf4261ce6b8c265c630d01..5f3672153b123b90bdfa4e93fd1fd8f7ec215546 100644 (file)
@@ -704,7 +704,7 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
                /* Reallocate the array */
                u32 new_capacity = 2 * dev->pipes_capacity;
                struct goldfish_pipe **pipes =
-                       kcalloc(new_capacity, sizeof(*pipes), GFP_KERNEL);
+                       kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
                if (!pipes)
                        return -ENOMEM;
                memcpy(pipes, dev->pipes, sizeof(*pipes) * dev->pipes_capacity);
index 35ce53edabf90009efcd228e71a109e410337708..d5e5229308f2291136ebe7c6061346507a733907 100644 (file)
@@ -155,3 +155,5 @@ static int __init hi6220_reset_init(void)
 }
 
 postcore_initcall(hi6220_reset_init);
+
+MODULE_LICENSE("GPL v2");
index 6fb3fd5efc11a2f777245255820021b269481cc3..b7cbd5d2cdea177d4aa58b7f1bdab33c0c077509 100644 (file)
@@ -2672,7 +2672,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
         */
        if (basedev->state < DASD_STATE_READY) {
                while ((req = blk_fetch_request(block->request_queue)))
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                return;
        }
 
@@ -2692,7 +2692,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "Rejecting write request %p",
                                      req);
                        blk_start_request(req);
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        continue;
                }
                if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
@@ -2702,7 +2702,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "Rejecting failfast request %p",
                                      req);
                        blk_start_request(req);
-                       __blk_end_request_all(req, -ETIMEDOUT);
+                       __blk_end_request_all(req, BLK_STS_TIMEOUT);
                        continue;
                }
                cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -2734,7 +2734,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                                      "on request %p",
                                      PTR_ERR(cqr), req);
                        blk_start_request(req);
-                       __blk_end_request_all(req, -EIO);
+                       __blk_end_request_all(req, BLK_STS_IOERR);
                        continue;
                }
                /*
@@ -2755,21 +2755,29 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
 {
        struct request *req;
        int status;
-       int error = 0;
+       blk_status_t error = BLK_STS_OK;
 
        req = (struct request *) cqr->callback_data;
        dasd_profile_end(cqr->block, cqr, req);
+
        status = cqr->block->base->discipline->free_cp(cqr, req);
        if (status < 0)
-               error = status;
+               error = errno_to_blk_status(status);
        else if (status == 0) {
-               if (cqr->intrc == -EPERM)
-                       error = -EBADE;
-               else if (cqr->intrc == -ENOLINK ||
-                        cqr->intrc == -ETIMEDOUT)
-                       error = cqr->intrc;
-               else
-                       error = -EIO;
+               switch (cqr->intrc) {
+               case -EPERM:
+                       error = BLK_STS_NEXUS;
+                       break;
+               case -ENOLINK:
+                       error = BLK_STS_TRANSPORT;
+                       break;
+               case -ETIMEDOUT:
+                       error = BLK_STS_TIMEOUT;
+                       break;
+               default:
+                       error = BLK_STS_IOERR;
+                       break;
+               }
        }
        __blk_end_request_all(req, error);
 }
@@ -3190,7 +3198,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
 
        spin_lock_irq(&block->request_queue_lock);
        while ((req = blk_fetch_request(block->request_queue)))
-               __blk_end_request_all(req, -EIO);
+               __blk_end_request_all(req, BLK_STS_IOERR);
        spin_unlock_irq(&block->request_queue_lock);
 }
 
index 152de6817875cb6d47cfe45c76c6f677daf83520..3c2c84b728772d78dd8d0cb22a2611f198854356 100644 (file)
@@ -231,7 +231,7 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
        aob->request.data = (u64) aobrq;
        scmrq->bdev = bdev;
        scmrq->retries = 4;
-       scmrq->error = 0;
+       scmrq->error = BLK_STS_OK;
        /* We don't use all msbs - place aidaws at the end of the aob page. */
        scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
        scm_request_cluster_init(scmrq);
@@ -364,7 +364,7 @@ static void __scmrq_log_error(struct scm_request *scmrq)
 {
        struct aob *aob = scmrq->aob;
 
-       if (scmrq->error == -ETIMEDOUT)
+       if (scmrq->error == BLK_STS_TIMEOUT)
                SCM_LOG(1, "Request timeout");
        else {
                SCM_LOG(1, "Request error");
@@ -377,7 +377,7 @@ static void __scmrq_log_error(struct scm_request *scmrq)
                       scmrq->error);
 }
 
-void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
+void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
 {
        struct scm_request *scmrq = data;
        struct scm_blk_dev *bdev = scmrq->bdev;
@@ -397,7 +397,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
        struct scm_blk_dev *bdev = scmrq->bdev;
        unsigned long flags;
 
-       if (scmrq->error != -EIO)
+       if (scmrq->error != BLK_STS_IOERR)
                goto restart;
 
        /* For -EIO the response block is valid. */
index 09218cdc51299d42326cbdec1bcbbae332d8dc0e..cd598d1a4eaedc1464f22702ab9b7ed2c304f06b 100644 (file)
@@ -35,7 +35,7 @@ struct scm_request {
        struct aob *aob;
        struct list_head list;
        u8 retries;
-       int error;
+       blk_status_t error;
 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
        struct {
                enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
@@ -50,7 +50,7 @@ struct scm_request {
 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
 void scm_blk_dev_cleanup(struct scm_blk_dev *);
 void scm_blk_set_available(struct scm_blk_dev *);
-void scm_blk_irq(struct scm_device *, void *, int);
+void scm_blk_irq(struct scm_device *, void *, blk_status_t);
 
 void scm_request_finish(struct scm_request *);
 void scm_request_requeue(struct scm_request *);
index b3f44bc7f64489e32c40d76304b7c73b88456bc8..0f11f3bcac8284d9a5191913ad9aaf3647cd3293 100644 (file)
@@ -135,7 +135,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
        struct eadm_private *private = get_eadm_private(sch);
        struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
        struct irb *irb = this_cpu_ptr(&cio_irb);
-       int error = 0;
+       blk_status_t error = BLK_STS_OK;
 
        EADM_LOG(6, "irq");
        EADM_LOG_HEX(6, irb, sizeof(*irb));
@@ -144,10 +144,10 @@ static void eadm_subchannel_irq(struct subchannel *sch)
 
        if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
            && scsw->eswf == 1 && irb->esw.eadm.erw.r)
-               error = -EIO;
+               error = BLK_STS_IOERR;
 
        if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
-               error = -ETIMEDOUT;
+               error = BLK_STS_TIMEOUT;
 
        eadm_subchannel_set_timeout(sch, 0);
 
index 15268edc54aea979c581e9135bb3c325b0d31dde..1fa53ecdc2aaa2ec1a81b7bf65b5d0dcf32a16c2 100644 (file)
@@ -71,7 +71,7 @@ void scm_driver_unregister(struct scm_driver *scmdrv)
 }
 EXPORT_SYMBOL_GPL(scm_driver_unregister);
 
-void scm_irq_handler(struct aob *aob, int error)
+void scm_irq_handler(struct aob *aob, blk_status_t error)
 {
        struct aob_rq_header *aobrq = (void *) aob->request.data;
        struct scm_device *scmdev = aobrq->scmdev;
index 62fed9dc893ef41ba6cdaafba34b24027a7c03f1..35a69949f92d484bd06c27a61a79e158f58b4ca8 100644 (file)
@@ -214,7 +214,7 @@ static void jsfd_request(void)
                struct jsfd_part *jdp = req->rq_disk->private_data;
                unsigned long offset = blk_rq_pos(req) << 9;
                size_t len = blk_rq_cur_bytes(req);
-               int err = -EIO;
+               blk_status_t err = BLK_STS_IOERR;
 
                if ((offset + len) > jdp->dsize)
                        goto end;
@@ -230,7 +230,7 @@ static void jsfd_request(void)
                }
 
                jsfd_read(bio_data(req->bio), jdp->dbase + offset, len);
-               err = 0;
+               err = BLK_STS_OK;
        end:
                if (!__blk_end_request_cur(req, err))
                        req = jsfd_next_request();
index 4fc8ed5fe067e1dfb4ca38a934c1f8f21904dba9..1f424e40afdf5c0a608b8a9bd72208c9e2b20e04 100644 (file)
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
        struct bnx2fc_cmd_mgr *cmd_mgr;
        spinlock_t hba_lock;
        struct mutex hba_mutex;
+       struct mutex hba_stats_mutex;
        unsigned long adapter_state;
                #define ADAPTER_STATE_UP                0
                #define ADAPTER_STATE_GOING_DOWN        1
index 93b5a0012417dbc0c6aab8dd8e657e502cbf8681..902722dc4ce3d0d7d1a38d076458a480bc4197ff 100644 (file)
@@ -663,15 +663,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
        if (!fw_stats)
                return NULL;
 
+       mutex_lock(&hba->hba_stats_mutex);
+
        bnx2fc_stats = fc_get_host_stats(shost);
 
        init_completion(&hba->stat_req_done);
        if (bnx2fc_send_stat_req(hba))
-               return bnx2fc_stats;
+               goto unlock_stats_mutex;
        rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
        if (!rc) {
                BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
-               return bnx2fc_stats;
+               goto unlock_stats_mutex;
        }
        BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
        bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -693,6 +695,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
 
        memcpy(&hba->prev_stats, hba->stats_buffer,
               sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+       mutex_unlock(&hba->hba_stats_mutex);
        return bnx2fc_stats;
 }
 
@@ -1340,6 +1345,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
        }
        spin_lock_init(&hba->hba_lock);
        mutex_init(&hba->hba_mutex);
+       mutex_init(&hba->hba_stats_mutex);
 
        hba->cnic = cnic;
 
index 1076c157832291b4794f2d4eaf0ea49489fb1abb..0aae094ab91c8543722fe93e0d8a9c96dd7b72aa 100644 (file)
@@ -1595,7 +1595,6 @@ static void release_offload_resources(struct cxgbi_sock *csk)
                cxgbi_sock_put(csk);
        }
        csk->dst = NULL;
-       csk->cdev = NULL;
 }
 
 static int init_act_open(struct cxgbi_sock *csk)
index fb06974c88c15c2b23864e44779e7d61826546bf..e4c83b7c96a8180c856627562549c4aae362dcdf 100644 (file)
@@ -867,7 +867,8 @@ static void need_active_close(struct cxgbi_sock *csk)
        log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
                csk, (csk)->state, (csk)->flags, (csk)->tid);
        spin_lock_bh(&csk->lock);
-       dst_confirm(csk->dst);
+       if (csk->dst)
+               dst_confirm(csk->dst);
        data_lost = skb_queue_len(&csk->receive_queue);
        __skb_queue_purge(&csk->receive_queue);
 
@@ -882,7 +883,8 @@ static void need_active_close(struct cxgbi_sock *csk)
        }
 
        if (close_req) {
-               if (data_lost)
+               if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) ||
+                   data_lost)
                        csk->cdev->csk_send_abort_req(csk);
                else
                        csk->cdev->csk_send_close_req(csk);
@@ -1186,9 +1188,10 @@ static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
                                cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
                skb = next;
        }
-done:
+
        if (likely(skb_queue_len(&csk->write_queue)))
                cdev->csk_push_tx_frames(csk, 1);
+done:
        spin_unlock_bh(&csk->lock);
        return copied;
 
@@ -1568,9 +1571,12 @@ static inline int read_pdu_skb(struct iscsi_conn *conn,
        }
 }
 
-static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
+static int
+skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn,
+                struct sk_buff *skb)
 {
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+       int err;
 
        log_debug(1 << CXGBI_DBG_PDU_RX,
                "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
@@ -1608,7 +1614,16 @@ static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
                }
        }
 
-       return read_pdu_skb(conn, skb, 0, 0);
+       err = read_pdu_skb(conn, skb, 0, 0);
+       if (likely(err >= 0)) {
+               struct iscsi_hdr *hdr = (struct iscsi_hdr *)skb->data;
+               u8 opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+               if (unlikely(opcode == ISCSI_OP_LOGOUT_RSP))
+                       cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD);
+       }
+
+       return err;
 }
 
 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
@@ -1713,7 +1728,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
                        cxgbi_skcb_rx_pdulen(skb));
 
                if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
-                       err = skb_read_pdu_bhs(conn, skb);
+                       err = skb_read_pdu_bhs(csk, conn, skb);
                        if (err < 0) {
                                pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
                                        "f 0x%lx, plen %u.\n",
@@ -1731,7 +1746,7 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
                                        cxgbi_skcb_flags(skb),
                                        cxgbi_skcb_rx_pdulen(skb));
                } else {
-                       err = skb_read_pdu_bhs(conn, skb);
+                       err = skb_read_pdu_bhs(csk, conn, skb);
                        if (err < 0) {
                                pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
                                        "f 0x%lx, plen %u.\n",
index 239462a7576051dca167ad246ba34f07777209cf..37f07aaab1e463bc78a46e3f363365ed814ac491 100644 (file)
@@ -187,6 +187,7 @@ enum cxgbi_sock_flags {
        CTPF_HAS_ATID,          /* reserved atid */
        CTPF_HAS_TID,           /* reserved hw tid */
        CTPF_OFFLOAD_DOWN,      /* offload function off */
+       CTPF_LOGOUT_RSP_RCVD,   /* received logout response */
 };
 
 struct cxgbi_skb_rx_cb {
index 8912767e7bc88cc407ea3fb372f242e2cbccd0de..da669dce12feb09c6df8564d16f3b0a3fc14c150 100644 (file)
@@ -127,7 +127,7 @@ int lpfc_disc_state_machine(struct lpfc_vport *, struct lpfc_nodelist *, void *,
 void lpfc_do_scr_ns_plogi(struct lpfc_hba *, struct lpfc_vport *);
 int lpfc_check_sparm(struct lpfc_vport *, struct lpfc_nodelist *,
                     struct serv_parm *, uint32_t, int);
-int lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
+void lpfc_els_abort(struct lpfc_hba *, struct lpfc_nodelist *);
 void lpfc_more_plogi(struct lpfc_vport *);
 void lpfc_more_adisc(struct lpfc_vport *);
 void lpfc_end_rscn(struct lpfc_vport *);
index f2cd19c6c2df9fd77516d18fddf2de04cf531437..24ce96dcc94d4ce0fffd8109e89a80193b71bb16 100644 (file)
@@ -978,9 +978,10 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
                                         ndlp, did, ndlp->nlp_fc4_type,
                                         FC_TYPE_FCP, FC_TYPE_NVME);
                        ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+
+                       lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+                       lpfc_issue_els_prli(vport, ndlp, 0);
                }
-               lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
-               lpfc_issue_els_prli(vport, ndlp, 0);
        } else
                lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
                                 "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
index bff3de053df475365193ea47b153c13795f9c816..f74cb0142fd4edac4e1378db1e85d14ec21c71a8 100644 (file)
@@ -206,7 +206,7 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
  * associated with a LPFC_NODELIST entry. This
  * routine effectively results in a "software abort".
  */
-int
+void
 lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 {
        LIST_HEAD(abort_list);
@@ -215,6 +215,10 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
 
        pring = lpfc_phba_elsring(phba);
 
+       /* In case of error recovery path, we might have a NULL pring here */
+       if (!pring)
+               return;
+
        /* Abort outstanding I/O on NPort <nlp_DID> */
        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
                         "2819 Abort outstanding I/O on NPort x%x "
@@ -273,7 +277,6 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
                              IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
 
        lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
-       return 0;
 }
 
 static int
index 074a6b5e7763510555d9b7f9f7e34e095af1b0f4..518b15e6f22236c7289a174388ad6ae5916db725 100644 (file)
@@ -799,8 +799,8 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
        }
        spin_unlock_irqrestore(&ctxp->ctxlock, flags);
 
-       lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
-                        ctxp->state, 0);
+       lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
+                        ctxp->state, aborting);
 
        atomic_inc(&lpfc_nvmep->xmt_fcp_release);
 
index 8a1b948164191c322aa01e97b54a930efadd301a..1e69a43b279d335867210ff828098632389e21be 100644 (file)
@@ -446,7 +446,7 @@ static void _put_request(struct request *rq)
         *       code paths.
         */
        if (unlikely(rq->bio))
-               blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq));
+               blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
        else
                blk_put_request(rq);
 }
@@ -474,10 +474,10 @@ void osd_end_request(struct osd_request *or)
 EXPORT_SYMBOL(osd_end_request);
 
 static void _set_error_resid(struct osd_request *or, struct request *req,
-                            int error)
+                            blk_status_t error)
 {
        or->async_error = error;
-       or->req_errors = scsi_req(req)->result ? : error;
+       or->req_errors = scsi_req(req)->result;
        or->sense_len = scsi_req(req)->sense_len;
        if (or->sense_len)
                memcpy(or->sense, scsi_req(req)->sense, or->sense_len);
@@ -489,17 +489,19 @@ static void _set_error_resid(struct osd_request *or, struct request *req,
 
 int osd_execute_request(struct osd_request *or)
 {
-       int error;
-
        blk_execute_rq(or->request->q, NULL, or->request, 0);
-       error = scsi_req(or->request)->result ? -EIO : 0;
 
-       _set_error_resid(or, or->request, error);
-       return error;
+       if (scsi_req(or->request)->result) {
+               _set_error_resid(or, or->request, BLK_STS_IOERR);
+               return -EIO;
+       }
+
+       _set_error_resid(or, or->request, BLK_STS_OK);
+       return 0;
 }
 EXPORT_SYMBOL(osd_execute_request);
 
-static void osd_request_async_done(struct request *req, int error)
+static void osd_request_async_done(struct request *req, blk_status_t error)
 {
        struct osd_request *or = req->end_io_data;
 
@@ -1914,7 +1916,7 @@ analyze:
                /* scsi sense is Empty, the request was never issued to target
                 * linux return code might tell us what happened.
                 */
-               if (or->async_error == -ENOMEM)
+               if (or->async_error == BLK_STS_RESOURCE)
                        osi->osd_err_pri = OSD_ERR_PRI_RESOURCE;
                else
                        osi->osd_err_pri = OSD_ERR_PRI_UNREACHABLE;
index 67cbed92f07dd05001f1e3f5a3ad40a00e59a4e6..d54689c9216ee29ee0d023d57334ad56b4a2f1ef 100644 (file)
@@ -320,7 +320,7 @@ static int osst_chk_result(struct osst_tape * STp, struct osst_request * SRpnt)
 
 
 /* Wakeup from interrupt */
-static void osst_end_async(struct request *req, int update)
+static void osst_end_async(struct request *req, blk_status_t status)
 {
        struct scsi_request *rq = scsi_req(req);
        struct osst_request *SRpnt = req->end_io_data;
index 16d1cd50feed5eee4744ff168709d589672a132b..ca3420de5a013f4e39760fb6da1edbb5358d26b1 100644 (file)
@@ -730,6 +730,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
                return -EIO;
        }
 
+       memset(&elreq, 0, sizeof(elreq));
+
        elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
                bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
                DMA_TO_DEVICE);
@@ -795,10 +797,9 @@ qla2x00_process_loopback(struct bsg_job *bsg_job)
 
        if (atomic_read(&vha->loop_state) == LOOP_READY &&
            (ha->current_topology == ISP_CFG_F ||
-           ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
-           le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
-           && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
-               elreq.options == EXTERNAL_LOOPBACK) {
+           (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
+            req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
+           elreq.options == EXTERNAL_LOOPBACK) {
                type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
                ql_dbg(ql_dbg_user, vha, 0x701e,
                    "BSG request type: %s.\n", type);
index 51b4179469d1851be96872ee39b24739fc34135e..88748a6ab73f6fc59f4a2b9fb202fa9efe24c7de 100644 (file)
@@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Mailbox registers. */
        mbx_reg = &reg->mailbox0;
-       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
                fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
        /* Transfer sequence registers. */
@@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        /* Mailbox registers. */
        mbx_reg = &reg->mailbox0;
-       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++)
+       for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
                fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
 
        /* Transfer sequence registers. */
index ae119018dfaae9fe65c5cfe1869cdc655b27a3ea..eddbc1218a39ba511700d3d4f446771eb06524f8 100644 (file)
@@ -3425,6 +3425,7 @@ struct qla_hw_data {
        uint8_t         max_req_queues;
        uint8_t         max_rsp_queues;
        uint8_t         max_qpairs;
+       uint8_t         num_qpairs;
        struct qla_qpair *base_qpair;
        struct qla_npiv_entry *npiv_info;
        uint16_t        nvram_npiv_size;
index 034743309adaa93f7093cf79d9548fe05fb4bb3b..0391fc3170035e10bd5a34423cd58601eca0850e 100644 (file)
@@ -7543,12 +7543,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
                /* Assign available que pair id */
                mutex_lock(&ha->mq_lock);
                qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
-               if (qpair_id >= ha->max_qpairs) {
+               if (ha->num_qpairs >= ha->max_qpairs) {
                        mutex_unlock(&ha->mq_lock);
                        ql_log(ql_log_warn, vha, 0x0183,
                            "No resources to create additional q pair.\n");
                        goto fail_qid_map;
                }
+               ha->num_qpairs++;
                set_bit(qpair_id, ha->qpair_qid_map);
                ha->queue_pair_map[qpair_id] = qpair;
                qpair->id = qpair_id;
@@ -7635,6 +7636,7 @@ fail_rsp:
 fail_msix:
        ha->queue_pair_map[qpair_id] = NULL;
        clear_bit(qpair_id, ha->qpair_qid_map);
+       ha->num_qpairs--;
        mutex_unlock(&ha->mq_lock);
 fail_qid_map:
        kfree(qpair);
@@ -7660,6 +7662,7 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
        mutex_lock(&ha->mq_lock);
        ha->queue_pair_map[qpair->id] = NULL;
        clear_bit(qpair->id, ha->qpair_qid_map);
+       ha->num_qpairs--;
        list_del(&qpair->qp_list_elem);
        if (list_empty(&vha->qp_list))
                vha->flags.qpairs_available = 0;
index 66df6cec59da4059f064410536c48995635c80ee..c61a6a871c8e0d5bee96b566639abf84d4fe544f 100644 (file)
@@ -129,28 +129,16 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
 }
 
 static inline void
-qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
-       struct qla_tgt_cmd *tc)
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
 {
-       struct dsd_dma *dsd_ptr, *tdsd_ptr;
-       struct crc_context *ctx;
-
-       if (sp)
-               ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
-       else if (tc)
-               ctx = (struct crc_context *)tc->ctx;
-       else {
-               BUG();
-               return;
-       }
+       struct dsd_dma *dsd, *tdsd;
 
        /* clean up allocated prev pool */
-       list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
-           &ctx->dsd_list, list) {
-               dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
-                   dsd_ptr->dsd_list_dma);
-               list_del(&dsd_ptr->list);
-               kfree(dsd_ptr);
+       list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
+               dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
+                   dsd->dsd_list_dma);
+               list_del(&dsd->list);
+               kfree(dsd);
        }
        INIT_LIST_HEAD(&ctx->dsd_list);
 }
index aac03504d9a359c9d48a88bf12ff2b3c51eba308..2572121b765b488b4ee5dbec726e9240e0df25a9 100644 (file)
@@ -3282,7 +3282,7 @@ msix_register_fail:
        }
 
        /* Enable MSI-X vector for response queue update for queue 0 */
-       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+       if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                if (ha->msixbase && ha->mqiobase &&
                    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
                     ql2xmqsupport))
index a113ab3592a7f86eb16ce8f76d82337557cab029..cba1fc5e8be9d58fce789694b508734790689a83 100644 (file)
@@ -3676,15 +3676,6 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                qlt_update_host_map(vha, id);
                        }
 
-                       fc_host_port_name(vha->host) =
-                           wwn_to_u64(vha->port_name);
-
-                       if (qla_ini_mode_enabled(vha))
-                               ql_dbg(ql_dbg_mbx, vha, 0x1018,
-                                   "FA-WWN portname %016llx (%x)\n",
-                                   fc_host_port_name(vha->host),
-                                   rptid_entry->vp_status);
-
                        set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
                        set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
                } else {
@@ -4821,9 +4812,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
 
        memset(mcp->mb, 0 , sizeof(mcp->mb));
        mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
-       mcp->mb[1] = mreq->options | BIT_6;     /* BIT_6 specifies 64bit address */
+       /* BIT_6 specifies 64bit address */
+       mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
        if (IS_CNA_CAPABLE(ha)) {
-               mcp->mb[1] |= BIT_15;
                mcp->mb[2] = vha->fcoe_fcf_idx;
        }
        mcp->mb[16] = LSW(mreq->rcv_dma);
index 1c79579032835af29aefa337448f50548f7fb37d..79f050256c55c735c612c4553124ccf9e80fda0c 100644 (file)
@@ -630,29 +630,34 @@ qla2x00_sp_free_dma(void *ptr)
                sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
        }
 
+       if (!ctx)
+               goto end;
+
        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
                /* List assured to be having elements */
-               qla2x00_clean_dsd_pool(ha, sp, NULL);
+               qla2x00_clean_dsd_pool(ha, ctx);
                sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
        }
 
        if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-               dma_pool_free(ha->dl_dma_pool, ctx,
-                   ((struct crc_context *)ctx)->crc_ctx_dma);
+               struct crc_context *ctx0 = ctx;
+
+               dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
                sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
        }
 
        if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-               struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
+               struct ct6_dsd *ctx1 = ctx;
 
                dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
-                       ctx1->fcp_cmnd_dma);
+                   ctx1->fcp_cmnd_dma);
                list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
                ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
                ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
                mempool_free(ctx1, ha->ctx_mempool);
        }
 
+end:
        CMD_SP(cmd) = NULL;
        qla2x00_rel_sp(sp);
 }
@@ -699,21 +704,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
                sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
        }
 
+       if (!ctx)
+               goto end;
+
        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
                /* List assured to be having elements */
-               qla2x00_clean_dsd_pool(ha, sp, NULL);
+               qla2x00_clean_dsd_pool(ha, ctx);
                sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
        }
 
        if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
-               dma_pool_free(ha->dl_dma_pool, ctx,
-                   ((struct crc_context *)ctx)->crc_ctx_dma);
+               struct crc_context *ctx0 = ctx;
+
+               dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma);
                sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
        }
 
        if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
-               struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
-
+               struct ct6_dsd *ctx1 = ctx;
                dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
                    ctx1->fcp_cmnd_dma);
                list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
@@ -721,7 +729,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr)
                ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
                mempool_free(ctx1, ha->ctx_mempool);
        }
-
+end:
        CMD_SP(cmd) = NULL;
        qla2xxx_rel_qpair_sp(sp->qpair, sp);
 }
@@ -1632,7 +1640,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
 void
 qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 {
-       int que, cnt;
+       int que, cnt, status;
        unsigned long flags;
        srb_t *sp;
        struct qla_hw_data *ha = vha->hw;
@@ -1662,8 +1670,12 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
                                         */
                                        sp_get(sp);
                                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-                                       qla2xxx_eh_abort(GET_CMD_SP(sp));
+                                       status = qla2xxx_eh_abort(GET_CMD_SP(sp));
                                        spin_lock_irqsave(&ha->hardware_lock, flags);
+                                       /* Get rid of extra reference if immediate exit
+                                        * from ql2xxx_eh_abort */
+                                       if (status == FAILED && (qla2x00_isp_reg_stat(ha)))
+                                               atomic_dec(&sp->ref_count);
                                }
                                req->outstanding_cmds[cnt] = NULL;
                                sp->done(sp, res);
@@ -2623,10 +2635,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
        if (mem_only) {
                if (pci_enable_device_mem(pdev))
-                       goto probe_out;
+                       return ret;
        } else {
                if (pci_enable_device(pdev))
-                       goto probe_out;
+                       return ret;
        }
 
        /* This may fail but that's ok */
@@ -2636,7 +2648,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        if (!ha) {
                ql_log_pci(ql_log_fatal, pdev, 0x0009,
                    "Unable to allocate memory for ha.\n");
-               goto probe_out;
+               goto disable_device;
        }
        ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
            "Memory allocated for ha=%p.\n", ha);
@@ -3254,7 +3266,7 @@ iospace_config_failed:
        pci_release_selected_regions(ha->pdev, ha->bars);
        kfree(ha);
 
-probe_out:
+disable_device:
        pci_disable_device(pdev);
        return ret;
 }
index 0e03ca2ab3e52358c817cdd2cdc667ba2bfb1ba3..e766d8412384fd63ec598354cf6f75aa7a8decfe 100644 (file)
@@ -2245,11 +2245,13 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
                pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
                        cmd->dma_data_direction);
 
+       if (!cmd->ctx)
+               return;
+
        if (cmd->ctx_dsd_alloced)
-               qla2x00_clean_dsd_pool(ha, NULL, cmd);
+               qla2x00_clean_dsd_pool(ha, cmd->ctx);
 
-       if (cmd->ctx)
-               dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
+       dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
 }
 
 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
index 8a58ef3adab4425ba69a992dd2f51bd9357f44c9..c197972a3e2d465e1c64509c6c34839390ec1db7 100644 (file)
@@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
                goto done;
        }
 
-       if (end <= start || start == 0 || end == 0) {
+       if (end < start || start == 0 || end == 0) {
                ql_dbg(ql_dbg_misc, vha, 0xd023,
                    "%s: unusable range (start=%x end=%x)\n", __func__,
                    ent->t262.end_addr, ent->t262.start_addr);
index 35ee09644cfb6319e0081a39e153350c501fbe0d..3be980d472681a4410d681e99d649b99cddb3cea 100644 (file)
@@ -1404,7 +1404,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        arr[4] = SDEBUG_LONG_INQ_SZ - 5;
        arr[5] = (int)have_dif_prot;    /* PROTECT bit */
        if (sdebug_vpd_use_hostno == 0)
-               arr[5] = 0x10; /* claim: implicit TGPS */
+               arr[5] |= 0x10; /* claim: implicit TPGS */
        arr[6] = 0x10; /* claim: MultiP */
        /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
        arr[7] = 0xa; /* claim: LINKED + CMDQUE */
index ecc07dab893dc473c831227e567cddbbb04e1852..44904f41924cdb579f090014962b530bf3580efd 100644 (file)
@@ -1874,7 +1874,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
        }
 }
 
-static void eh_lock_door_done(struct request *req, int uptodate)
+static void eh_lock_door_done(struct request *req, blk_status_t status)
 {
        __blk_put_request(req->q, req);
 }
index 99e16ac479e365d343840f44c9f7b0c50b4042cc..b5f310b9e91079802f152b6e5ed9f03a7f5b6569 100644 (file)
@@ -635,7 +635,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
        cmd->request->next_rq->special = NULL;
 }
 
-static bool scsi_end_request(struct request *req, int error,
+static bool scsi_end_request(struct request *req, blk_status_t error,
                unsigned int bytes, unsigned int bidi_bytes)
 {
        struct scsi_cmnd *cmd = req->special;
@@ -694,45 +694,28 @@ static bool scsi_end_request(struct request *req, int error,
  * @cmd:       SCSI command (unused)
  * @result:    scsi error code
  *
- * Translate SCSI error code into standard UNIX errno.
- * Return values:
- * -ENOLINK    temporary transport failure
- * -EREMOTEIO  permanent target failure, do not retry
- * -EBADE      permanent nexus failure, retry on other path
- * -ENOSPC     No write space available
- * -ENODATA    Medium error
- * -EIO                unspecified I/O error
+ * Translate SCSI error code into block errors.
  */
-static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
+               int result)
 {
-       int error = 0;
-
-       switch(host_byte(result)) {
+       switch (host_byte(result)) {
        case DID_TRANSPORT_FAILFAST:
-               error = -ENOLINK;
-               break;
+               return BLK_STS_TRANSPORT;
        case DID_TARGET_FAILURE:
                set_host_byte(cmd, DID_OK);
-               error = -EREMOTEIO;
-               break;
+               return BLK_STS_TARGET;
        case DID_NEXUS_FAILURE:
-               set_host_byte(cmd, DID_OK);
-               error = -EBADE;
-               break;
+               return BLK_STS_NEXUS;
        case DID_ALLOC_FAILURE:
                set_host_byte(cmd, DID_OK);
-               error = -ENOSPC;
-               break;
+               return BLK_STS_NOSPC;
        case DID_MEDIUM_ERROR:
                set_host_byte(cmd, DID_OK);
-               error = -ENODATA;
-               break;
+               return BLK_STS_MEDIUM;
        default:
-               error = -EIO;
-               break;
+               return BLK_STS_IOERR;
        }
-
-       return error;
 }
 
 /*
@@ -769,7 +752,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        int result = cmd->result;
        struct request_queue *q = cmd->device->request_queue;
        struct request *req = cmd->request;
-       int error = 0;
+       blk_status_t error = BLK_STS_OK;
        struct scsi_sense_hdr sshdr;
        bool sense_valid = false;
        int sense_deferred = 0, level = 0;
@@ -808,7 +791,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                         * both sides at once.
                         */
                        scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
-                       if (scsi_end_request(req, 0, blk_rq_bytes(req),
+                       if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req),
                                        blk_rq_bytes(req->next_rq)))
                                BUG();
                        return;
@@ -850,7 +833,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        scsi_print_sense(cmd);
                result = 0;
                /* for passthrough error may be set */
-               error = 0;
+               error = BLK_STS_OK;
        }
 
        /*
@@ -922,18 +905,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                action = ACTION_REPREP;
                        } else if (sshdr.asc == 0x10) /* DIX */ {
                                action = ACTION_FAIL;
-                               error = -EILSEQ;
+                               error = BLK_STS_PROTECTION;
                        /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
                        } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
                                action = ACTION_FAIL;
-                               error = -EREMOTEIO;
+                               error = BLK_STS_TARGET;
                        } else
                                action = ACTION_FAIL;
                        break;
                case ABORTED_COMMAND:
                        action = ACTION_FAIL;
                        if (sshdr.asc == 0x10) /* DIF */
-                               error = -EILSEQ;
+                               error = BLK_STS_PROTECTION;
                        break;
                case NOT_READY:
                        /* If the device is in the process of becoming
@@ -1829,15 +1812,15 @@ out_delay:
                blk_delay_queue(q, SCSI_QUEUE_DELAY);
 }
 
-static inline int prep_to_mq(int ret)
+static inline blk_status_t prep_to_mq(int ret)
 {
        switch (ret) {
        case BLKPREP_OK:
-               return BLK_MQ_RQ_QUEUE_OK;
+               return BLK_STS_OK;
        case BLKPREP_DEFER:
-               return BLK_MQ_RQ_QUEUE_BUSY;
+               return BLK_STS_RESOURCE;
        default:
-               return BLK_MQ_RQ_QUEUE_ERROR;
+               return BLK_STS_IOERR;
        }
 }
 
@@ -1909,7 +1892,7 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
        blk_mq_complete_request(cmd->request);
 }
 
-static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
+static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
                         const struct blk_mq_queue_data *bd)
 {
        struct request *req = bd->rq;
@@ -1917,14 +1900,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct scsi_device *sdev = q->queuedata;
        struct Scsi_Host *shost = sdev->host;
        struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
-       int ret;
+       blk_status_t ret;
        int reason;
 
        ret = prep_to_mq(scsi_prep_state_check(sdev, req));
-       if (ret != BLK_MQ_RQ_QUEUE_OK)
+       if (ret != BLK_STS_OK)
                goto out;
 
-       ret = BLK_MQ_RQ_QUEUE_BUSY;
+       ret = BLK_STS_RESOURCE;
        if (!get_device(&sdev->sdev_gendev))
                goto out;
 
@@ -1937,7 +1920,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        if (!(req->rq_flags & RQF_DONTPREP)) {
                ret = prep_to_mq(scsi_mq_prep_fn(req));
-               if (ret != BLK_MQ_RQ_QUEUE_OK)
+               if (ret != BLK_STS_OK)
                        goto out_dec_host_busy;
                req->rq_flags |= RQF_DONTPREP;
        } else {
@@ -1955,11 +1938,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
        reason = scsi_dispatch_cmd(cmd);
        if (reason) {
                scsi_set_blocked(cmd, reason);
-               ret = BLK_MQ_RQ_QUEUE_BUSY;
+               ret = BLK_STS_RESOURCE;
                goto out_dec_host_busy;
        }
 
-       return BLK_MQ_RQ_QUEUE_OK;
+       return BLK_STS_OK;
 
 out_dec_host_busy:
        atomic_dec(&shost->host_busy);
@@ -1972,12 +1955,14 @@ out_put_device:
        put_device(&sdev->sdev_gendev);
 out:
        switch (ret) {
-       case BLK_MQ_RQ_QUEUE_BUSY:
+       case BLK_STS_OK:
+               break;
+       case BLK_STS_RESOURCE:
                if (atomic_read(&sdev->device_busy) == 0 &&
                    !scsi_device_blocked(sdev))
                        blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
                break;
-       case BLK_MQ_RQ_QUEUE_ERROR:
+       default:
                /*
                 * Make sure to release all allocated ressources when
                 * we hit an error, as we will never see this command
@@ -1986,8 +1971,6 @@ out:
                if (req->rq_flags & RQF_DONTPREP)
                        scsi_mq_uninit_cmd(cmd);
                break;
-       default:
-               break;
        }
        return ret;
 }
@@ -2057,6 +2040,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
 {
        struct device *dev = shost->dma_dev;
 
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+
        /*
         * this limit is imposed by hardware restrictions
         */
index 0ebe2f1bb908c594123887d5f78f58219b79f427..cc970c811bcbc207cc6f366faf0bad85e7e160ab 100644 (file)
@@ -172,7 +172,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
                            struct sas_rphy *rphy)
 {
        struct request *req;
-       int ret;
+       blk_status_t ret;
        int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
 
        while ((req = blk_fetch_request(q)) != NULL) {
@@ -264,6 +264,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
                q->queuedata = shost;
 
        queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
+       queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
        return 0;
 
 out_cleanup_queue:
index 82c33a6edbeaa7a00e6f7840ef4b5d8cdb8a084f..f3387c6089c51c99668455a040458aefd8265b20 100644 (file)
@@ -177,7 +177,7 @@ typedef struct sg_device { /* holds the state of each scsi generic device */
 } Sg_device;
 
 /* tasklet or soft irq callback */
-static void sg_rq_end_io(struct request *rq, int uptodate);
+static void sg_rq_end_io(struct request *rq, blk_status_t status);
 static int sg_start_req(Sg_request *srp, unsigned char *cmd);
 static int sg_finish_rem_req(Sg_request * srp);
 static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
@@ -808,7 +808,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
        if (atomic_read(&sdp->detaching)) {
                if (srp->bio) {
                        scsi_req_free_cmd(scsi_req(srp->rq));
-                       blk_end_request_all(srp->rq, -EIO);
+                       blk_end_request_all(srp->rq, BLK_STS_IOERR);
                        srp->rq = NULL;
                }
 
@@ -1300,7 +1300,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
  * level when a command is completed (or has failed).
  */
 static void
-sg_rq_end_io(struct request *rq, int uptodate)
+sg_rq_end_io(struct request *rq, blk_status_t status)
 {
        struct sg_request *srp = rq->end_io_data;
        struct scsi_request *req = scsi_req(rq);
index 1ea34d6f54370f0beece52e2de1d7b3aa76516fe..6b1c4ac54e662b77751cfad349545aeddb31ddbb 100644 (file)
@@ -511,7 +511,7 @@ static void st_do_stats(struct scsi_tape *STp, struct request *req)
        atomic64_dec(&STp->stats->in_flight);
 }
 
-static void st_scsi_execute_end(struct request *req, int uptodate)
+static void st_scsi_execute_end(struct request *req, blk_status_t status)
 {
        struct st_request *SRpnt = req->end_io_data;
        struct scsi_request *rq = scsi_req(req);
index ae627049c499aaaa716f80864a77f4dd796a6eaa..4be87f503e3be8f611fcd893edd53255032c93cd 100644 (file)
@@ -1,6 +1,6 @@
 config CRYPTO_DEV_CCREE
        tristate "Support for ARM TrustZone CryptoCell C7XX family of Crypto accelerators"
-       depends on CRYPTO_HW && OF && HAS_DMA
+       depends on CRYPTO && CRYPTO_HW && OF && HAS_DMA
        default n
        select CRYPTO_HASH
        select CRYPTO_BLKCIPHER
index 038e2ff5e545f4b10e7c6ef34a30926aaffc5496..6471d3d2d3752ff52ff6f7ba064858b81bfed9a4 100644 (file)
@@ -216,7 +216,8 @@ void ssi_buffer_mgr_copy_scatterlist_portion(
        uint32_t nents, lbytes;
 
        nents = ssi_buffer_mgr_get_sgl_nents(sg, end, &lbytes, NULL);
-       sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip), 0, (direct == SSI_SG_TO_BUF));
+       sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
+                      (direct == SSI_SG_TO_BUF));
 }
 
 static inline int ssi_buffer_mgr_render_buff_to_mlli(
index 2e1bd47337fd84954f37965b379f86d1d363b9d4..e6727cefde05bb536cb29f600edb7ab8312861ac 100644 (file)
@@ -293,18 +293,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
        size_t lmmk_size;
        size_t lum_size;
        int rc;
-       mm_segment_t seg;
 
        if (!lsm)
                return -ENODATA;
 
-       /*
-        * "Switch to kernel segment" to allow copying from kernel space by
-        * copy_{to,from}_user().
-        */
-       seg = get_fs();
-       set_fs(KERNEL_DS);
-
        if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
                CERROR("bad LSM MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
                       lsm->lsm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
@@ -406,6 +398,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
 out_free:
        kvfree(lmmk);
 out:
-       set_fs(seg);
        return rc;
 }
index 8ea01904c0eae72b9273919c0214510072e89813..466517c7c8e618112dcdea3716631be9f277d400 100644 (file)
@@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302)     += ap1302.o
 
 obj-$(CONFIG_VIDEO_LM3554) += lm3554.o
 
-ccflags-y += -Werror
-
index 1d7f7ab94cac3b7ebf454b16a890dab9dc255314..6b13a3a66e49e3ee064887fa324685ddf05d1a0c 100644 (file)
@@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o
 
 ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o
 obj-$(CONFIG_VIDEO_OV8858)     += ov8858_driver.o
-
-ccflags-y += -Werror
index fceb9e9b881bac608e81ae76ed50dad139e2432e..c9c0e1245858470147768987c12de5727e68ad22 100644 (file)
@@ -1,3 +1 @@
 obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
-
-ccflags-y += -Werror
index 3fa7c1c1479f330367b7ab01bf41d8c502a69349..f126a89a08e93ff6511b603960285bfbe3a3c4f7 100644 (file)
@@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
 DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0
 DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400
 
-ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror
+ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
 
index bb069ebe4aa6c1abcce203c7133a446493abc7eb..75373624604b6fcce1b651071c6d06c927255579 100644 (file)
@@ -296,8 +296,8 @@ static void iblock_bio_done(struct bio *bio)
        struct se_cmd *cmd = bio->bi_private;
        struct iblock_req *ibr = cmd->priv;
 
-       if (bio->bi_error) {
-               pr_err("bio error: %p,  err: %d\n", bio, bio->bi_error);
+       if (bio->bi_status) {
+               pr_err("bio error: %p,  err: %d\n", bio, bio->bi_status);
                /*
                 * Bump the ib_bio_err_cnt and release bio.
                 */
@@ -354,11 +354,11 @@ static void iblock_end_io_flush(struct bio *bio)
 {
        struct se_cmd *cmd = bio->bi_private;
 
-       if (bio->bi_error)
-               pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error);
+       if (bio->bi_status)
+               pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status);
 
        if (cmd) {
-               if (bio->bi_error)
+               if (bio->bi_status)
                        target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
                else
                        target_complete_cmd(cmd, SAM_STAT_GOOD);
index 3e4abb13f8ea4b46ad78de13eae74fd17ccda67a..323ab47645d01f3dd153253beb1dad0c672ae325 100644 (file)
@@ -55,7 +55,7 @@ static inline struct pscsi_dev_virt *PSCSI_DEV(struct se_device *dev)
 }
 
 static sense_reason_t pscsi_execute_cmd(struct se_cmd *cmd);
-static void pscsi_req_done(struct request *, int);
+static void pscsi_req_done(struct request *, blk_status_t);
 
 /*     pscsi_attach_hba():
  *
@@ -1045,7 +1045,7 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
        return 0;
 }
 
-static void pscsi_req_done(struct request *req, int uptodate)
+static void pscsi_req_done(struct request *req, blk_status_t status)
 {
        struct se_cmd *cmd = req->end_io_data;
        struct pscsi_plugin_task *pt = cmd->priv;
index 9e217b1361ea76af9b9a5bb999b81559643e8e25..fe4fe24407296d52ba28a0408e39ba131b4e3737 100644 (file)
@@ -843,7 +843,10 @@ static ssize_t ci_role_show(struct device *dev, struct device_attribute *attr,
 {
        struct ci_hdrc *ci = dev_get_drvdata(dev);
 
-       return sprintf(buf, "%s\n", ci_role(ci)->name);
+       if (ci->role != CI_ROLE_END)
+               return sprintf(buf, "%s\n", ci_role(ci)->name);
+
+       return 0;
 }
 
 static ssize_t ci_role_store(struct device *dev,
index 6d23eede4d8cda5014b041f965f3e060d201be74..1c31e8a088101ff70d3aa37553d1e9b2d906b324 100644 (file)
@@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data)
 {
        struct ci_hdrc *ci = s->private;
 
-       seq_printf(s, "%s\n", ci_role(ci)->name);
+       if (ci->role != CI_ROLE_END)
+               seq_printf(s, "%s\n", ci_role(ci)->name);
 
        return 0;
 }
index 56d2d32130765dfc655ecbb6e105f8d411070d1d..d68b125796f987d7133fef50fc8486363a5ef305 100644 (file)
@@ -1993,6 +1993,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
 int ci_hdrc_gadget_init(struct ci_hdrc *ci)
 {
        struct ci_role_driver *rdrv;
+       int ret;
 
        if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
                return -ENXIO;
@@ -2005,7 +2006,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
        rdrv->stop      = udc_id_switch_for_host;
        rdrv->irq       = udc_irq;
        rdrv->name      = "gadget";
-       ci->roles[CI_ROLE_GADGET] = rdrv;
 
-       return udc_start(ci);
+       ret = udc_start(ci);
+       if (!ret)
+               ci->roles[CI_ROLE_GADGET] = rdrv;
+
+       return ret;
 }
index e77a4ed4f021da17f5c1f366e22b8447c366474a..9f4a0185dd609c09f6873f85b849c9553821dca1 100644 (file)
@@ -108,6 +108,8 @@ struct imx_usbmisc {
        const struct usbmisc_ops *ops;
 };
 
+static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data);
+
 static int usbmisc_imx25_init(struct imx_usbmisc_data *data)
 {
        struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
@@ -242,10 +244,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
                        val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
                                | MX53_USB_UHx_CTRL_ULPI_INT_EN;
                        writel(val, reg);
-                       /* Disable internal 60Mhz clock */
-                       reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
-                       val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
-                       writel(val, reg);
+                       if (is_imx53_usbmisc(data)) {
+                               /* Disable internal 60Mhz clock */
+                               reg = usbmisc->base +
+                                       MX53_USB_CLKONOFF_CTRL_OFFSET;
+                               val = readl(reg) |
+                                       MX53_USB_CLKONOFF_CTRL_H2_INT60CKOFF;
+                               writel(val, reg);
+                       }
+
                }
                if (data->disable_oc) {
                        reg = usbmisc->base + MX53_USB_UH2_CTRL_OFFSET;
@@ -267,10 +274,15 @@ static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
                        val = readl(reg) | MX53_USB_UHx_CTRL_WAKE_UP_EN
                                | MX53_USB_UHx_CTRL_ULPI_INT_EN;
                        writel(val, reg);
-                       /* Disable internal 60Mhz clock */
-                       reg = usbmisc->base + MX53_USB_CLKONOFF_CTRL_OFFSET;
-                       val = readl(reg) | MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
-                       writel(val, reg);
+
+                       if (is_imx53_usbmisc(data)) {
+                               /* Disable internal 60Mhz clock */
+                               reg = usbmisc->base +
+                                       MX53_USB_CLKONOFF_CTRL_OFFSET;
+                               val = readl(reg) |
+                                       MX53_USB_CLKONOFF_CTRL_H3_INT60CKOFF;
+                               writel(val, reg);
+                       }
                }
                if (data->disable_oc) {
                        reg = usbmisc->base + MX53_USB_UH3_CTRL_OFFSET;
@@ -456,6 +468,10 @@ static const struct usbmisc_ops imx27_usbmisc_ops = {
        .init = usbmisc_imx27_init,
 };
 
+static const struct usbmisc_ops imx51_usbmisc_ops = {
+       .init = usbmisc_imx53_init,
+};
+
 static const struct usbmisc_ops imx53_usbmisc_ops = {
        .init = usbmisc_imx53_init,
 };
@@ -479,6 +495,13 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = {
        .set_wakeup = usbmisc_imx7d_set_wakeup,
 };
 
+static inline bool is_imx53_usbmisc(struct imx_usbmisc_data *data)
+{
+       struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+
+       return usbmisc->ops == &imx53_usbmisc_ops;
+}
+
 int imx_usbmisc_init(struct imx_usbmisc_data *data)
 {
        struct imx_usbmisc *usbmisc;
@@ -536,7 +559,7 @@ static const struct of_device_id usbmisc_imx_dt_ids[] = {
        },
        {
                .compatible = "fsl,imx51-usbmisc",
-               .data = &imx53_usbmisc_ops,
+               .data = &imx51_usbmisc_ops,
        },
        {
                .compatible = "fsl,imx53-usbmisc",
index 9cd8722f24f65994d83cdb0378387c021e773e5f..a3ffe97170ffd7ccdaf42cc892963ec22793d380 100644 (file)
@@ -144,6 +144,8 @@ const struct of_device_id dwc2_of_match_table[] = {
        { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
        { .compatible = "snps,dwc2" },
        { .compatible = "samsung,s3c6400-hsotg" },
+       { .compatible = "amlogic,meson8-usb",
+         .data = dwc2_set_amlogic_params },
        { .compatible = "amlogic,meson8b-usb",
          .data = dwc2_set_amlogic_params },
        { .compatible = "amlogic,meson-gxbb-usb",
index 4c8aacc232c07b300b5efffebe25999f8d9d244c..74d57d6994da1602ad72798aabc922ecbea9a0b1 100644 (file)
@@ -396,7 +396,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
 /* Caller must hold fsg->lock */
 static void wakeup_thread(struct fsg_common *common)
 {
-       smp_wmb();      /* ensure the write of bh->state is complete */
+       /*
+        * Ensure the reading of thread_wakeup_needed
+        * and the writing of bh->state are completed
+        */
+       smp_mb();
        /* Tell the main thread that something has happened */
        common->thread_wakeup_needed = 1;
        if (common->thread_task)
@@ -627,7 +631,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
        }
        __set_current_state(TASK_RUNNING);
        common->thread_wakeup_needed = 0;
-       smp_rmb();      /* ensure the latest bh->state is visible */
+
+       /*
+        * Ensure the writing of thread_wakeup_needed
+        * and the reading of bh->state are completed
+        */
+       smp_mb();
        return rc;
 }
 
index 5a2d845fb1a68708a3ea27b3822ae8de7afb85df..cd4c885297213bd3e1a5b024d0f2fdde3e6232d6 100644 (file)
@@ -623,7 +623,6 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
 {
        usb3_disconnect(usb3);
        usb3_write(usb3, 0, USB3_P0_INT_ENA);
-       usb3_write(usb3, 0, USB3_PN_INT_ENA);
        usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA);
        usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
        usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
@@ -1475,7 +1474,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3,
                                    struct renesas_usb3_request *usb3_req,
                                    int status)
 {
-       usb3_pn_stop(usb3);
+       unsigned long flags;
+
+       spin_lock_irqsave(&usb3->lock, flags);
+       if (usb3_pn_change(usb3, usb3_ep->num))
+               usb3_pn_stop(usb3);
+       spin_unlock_irqrestore(&usb3->lock, flags);
+
        usb3_disable_pipe_irq(usb3, usb3_ep->num);
        usb3_request_done(usb3_ep, usb3_req, status);
 
@@ -1504,30 +1509,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num)
 {
        struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num);
        struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep);
+       bool done = false;
 
        if (!usb3_req)
                return;
 
+       spin_lock(&usb3->lock);
+       if (usb3_pn_change(usb3, num))
+               goto out;
+
        if (usb3_ep->dir_in) {
                /* Do not stop the IN pipe here to detect LSTTR interrupt */
                if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE))
                        usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA);
        } else {
                if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ))
-                       usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
+                       done = true;
        }
+
+out:
+       /* need to unlock because usb3_request_done_pipen() locks it */
+       spin_unlock(&usb3->lock);
+
+       if (done)
+               usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0);
 }
 
 static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num)
 {
        u32 pn_int_sta;
 
-       if (usb3_pn_change(usb3, num) < 0)
+       spin_lock(&usb3->lock);
+       if (usb3_pn_change(usb3, num) < 0) {
+               spin_unlock(&usb3->lock);
                return;
+       }
 
        pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA);
        pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA);
        usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA);
+       spin_unlock(&usb3->lock);
        if (pn_int_sta & PN_INT_LSTTR)
                usb3_irq_epc_pipen_lsttr(usb3, num);
        if (pn_int_sta & PN_INT_BFRDY)
@@ -1660,6 +1681,7 @@ static int usb3_disable_pipe_n(struct renesas_usb3_ep *usb3_ep)
 
        spin_lock_irqsave(&usb3->lock, flags);
        if (!usb3_pn_change(usb3, usb3_ep->num)) {
+               usb3_write(usb3, 0, USB3_PN_INT_ENA);
                usb3_write(usb3, 0, USB3_PN_RAMMAP);
                usb3_clear_bit(usb3, PN_CON_EN, USB3_PN_CON);
        }
@@ -1799,6 +1821,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
        /* hook up the driver */
        usb3->driver = driver;
 
+       pm_runtime_enable(usb3_to_dev(usb3));
+       pm_runtime_get_sync(usb3_to_dev(usb3));
+
        renesas_usb3_init_controller(usb3);
 
        return 0;
@@ -1807,14 +1832,14 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
 static int renesas_usb3_stop(struct usb_gadget *gadget)
 {
        struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget);
-       unsigned long flags;
 
-       spin_lock_irqsave(&usb3->lock, flags);
        usb3->softconnect = false;
        usb3->gadget.speed = USB_SPEED_UNKNOWN;
        usb3->driver = NULL;
        renesas_usb3_stop_controller(usb3);
-       spin_unlock_irqrestore(&usb3->lock, flags);
+
+       pm_runtime_put(usb3_to_dev(usb3));
+       pm_runtime_disable(usb3_to_dev(usb3));
 
        return 0;
 }
@@ -1891,9 +1916,6 @@ static int renesas_usb3_remove(struct platform_device *pdev)
 
        device_remove_file(&pdev->dev, &dev_attr_role);
 
-       pm_runtime_put(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-
        usb_del_gadget_udc(&usb3->gadget);
 
        __renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2099,9 +2121,6 @@ static int renesas_usb3_probe(struct platform_device *pdev)
 
        usb3->workaround_for_vbus = priv->workaround_for_vbus;
 
-       pm_runtime_enable(&pdev->dev);
-       pm_runtime_get_sync(&pdev->dev);
-
        dev_info(&pdev->dev, "probed\n");
 
        return 0;
index 9c7ee26ef388062bdc5e1f1fb2097fc010882950..bc6a9be2ccc55696cb703f04b97e3d0fb08ca59c 100644 (file)
@@ -245,6 +245,11 @@ static int dsps_check_status(struct musb *musb, void *unused)
                dsps_mod_timer_optional(glue);
                break;
        case OTG_STATE_A_WAIT_BCON:
+               /* keep VBUS on for host-only mode */
+               if (musb->port_mode == MUSB_PORT_MODE_HOST) {
+                       dsps_mod_timer_optional(glue);
+                       break;
+               }
                musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
                skip_session = 1;
                /* fall */
index 7a92a5e1d40c6f17227936ee2b6925286deab511..feca75b07fddce01e6e121542d3e0b74d321f85f 100644 (file)
@@ -362,8 +362,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
                                st->global_error = 1;
                }
        }
-       st->va += PAGE_SIZE * nr;
-       st->index += nr;
+       st->va += XEN_PAGE_SIZE * nr;
+       st->index += nr / XEN_PFN_PER_PAGE;
 
        return 0;
 }
index 519599dddd3692ee373a9eb00d95d5757556ad42..bcd8e16a34e185c0c1cfb12420b8943f8e44d11a 100644 (file)
@@ -262,8 +262,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
        if (vecs != inline_vecs)
                kfree(vecs);
 
-       if (unlikely(bio.bi_error))
-               return bio.bi_error;
+       if (unlikely(bio.bi_status))
+               return blk_status_to_errno(bio.bi_status);
        return ret;
 }
 
@@ -288,16 +288,18 @@ static void blkdev_bio_end_io(struct bio *bio)
        bool should_dirty = dio->should_dirty;
 
        if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
-               if (bio->bi_error && !dio->bio.bi_error)
-                       dio->bio.bi_error = bio->bi_error;
+               if (bio->bi_status && !dio->bio.bi_status)
+                       dio->bio.bi_status = bio->bi_status;
        } else {
                if (!dio->is_sync) {
                        struct kiocb *iocb = dio->iocb;
-                       ssize_t ret = dio->bio.bi_error;
+                       ssize_t ret;
 
-                       if (likely(!ret)) {
+                       if (likely(!dio->bio.bi_status)) {
                                ret = dio->size;
                                iocb->ki_pos += ret;
+                       } else {
+                               ret = blk_status_to_errno(dio->bio.bi_status);
                        }
 
                        dio->iocb->ki_complete(iocb, ret, 0);
@@ -334,7 +336,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        bool is_read = (iov_iter_rw(iter) == READ), is_sync;
        loff_t pos = iocb->ki_pos;
        blk_qc_t qc = BLK_QC_T_NONE;
-       int ret;
+       int ret = 0;
 
        if ((pos | iov_iter_alignment(iter)) &
            (bdev_logical_block_size(bdev) - 1))
@@ -363,7 +365,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 
                ret = bio_iov_iter_get_pages(bio, iter);
                if (unlikely(ret)) {
-                       bio->bi_error = ret;
+                       bio->bi_status = BLK_STS_IOERR;
                        bio_endio(bio);
                        break;
                }
@@ -412,7 +414,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        }
        __set_current_state(TASK_RUNNING);
 
-       ret = dio->bio.bi_error;
+       if (!ret)
+               ret = blk_status_to_errno(dio->bio.bi_status);
        if (likely(!ret))
                ret = dio->size;
 
index b8622e4d1744de68180f96036ad5ddbc3c195ca8..d87ac27a5f2b4cbf2e415e6a34ac88f66647b008 100644 (file)
@@ -310,7 +310,8 @@ struct btrfs_dio_private {
         * The original bio may be split to several sub-bios, this is
         * done during endio of sub-bios
         */
-       int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
+       blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
+                       blk_status_t);
 };
 
 /*
index ab14c2e635ca9bc5a0c61330bbd6f7ade04af18c..4ded1c3f92b8b6de1c0eacbba7829a269d18bdff 100644 (file)
@@ -2129,7 +2129,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
        /* mutex is not held! This is not save if IO is not yet completed
         * on umount */
        iodone_w_error = 0;
-       if (bp->bi_error)
+       if (bp->bi_status)
                iodone_w_error = 1;
 
        BUG_ON(NULL == block);
@@ -2143,7 +2143,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
                if ((dev_state->state->print_mask &
                     BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
                        pr_info("bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
-                              bp->bi_error,
+                              bp->bi_status,
                               btrfsic_get_block_type(dev_state->state, block),
                               block->logical_bytenr, dev_state->name,
                               block->dev_bytenr, block->mirror_num);
index 10e6b282d09d6e8d31b4ac8740d0119ccad3e786..9ac55b266e78d865bfbdb4cf8ee3c417162ec906 100644 (file)
@@ -155,7 +155,7 @@ static void end_compressed_bio_read(struct bio *bio)
        unsigned long index;
        int ret;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                cb->errors = 1;
 
        /* if there are more bios still pending for this compressed
@@ -268,7 +268,7 @@ static void end_compressed_bio_write(struct bio *bio)
        struct page *page;
        unsigned long index;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                cb->errors = 1;
 
        /* if there are more bios still pending for this compressed
@@ -287,7 +287,7 @@ static void end_compressed_bio_write(struct bio *bio)
                                         cb->start,
                                         cb->start + cb->len - 1,
                                         NULL,
-                                        bio->bi_error ? 0 : 1);
+                                        bio->bi_status ? 0 : 1);
        cb->compressed_pages[0]->mapping = NULL;
 
        end_compressed_writeback(inode, cb);
@@ -320,7 +320,7 @@ out:
  * This also checksums the file bytes and gets things ready for
  * the end io hooks.
  */
-int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                 unsigned long len, u64 disk_start,
                                 unsigned long compressed_len,
                                 struct page **compressed_pages,
@@ -335,13 +335,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        struct page *page;
        u64 first_byte = disk_start;
        struct block_device *bdev;
-       int ret;
+       blk_status_t ret;
        int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
        WARN_ON(start & ((u64)PAGE_SIZE - 1));
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
        if (!cb)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
        refcount_set(&cb->pending_bios, 0);
        cb->errors = 0;
        cb->inode = inode;
@@ -358,7 +358,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
        if (!bio) {
                kfree(cb);
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
        }
        bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
        bio->bi_private = cb;
@@ -368,17 +368,17 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        /* create and submit bios for the compressed pages */
        bytes_left = compressed_len;
        for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
+               int submit = 0;
+
                page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                if (bio->bi_iter.bi_size)
-                       ret = io_tree->ops->merge_bio_hook(page, 0,
+                       submit = io_tree->ops->merge_bio_hook(page, 0,
                                                           PAGE_SIZE,
                                                           bio, 0);
-               else
-                       ret = 0;
 
                page->mapping = NULL;
-               if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
+               if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
                    PAGE_SIZE) {
                        bio_get(bio);
 
@@ -400,7 +400,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
                        ret = btrfs_map_bio(fs_info, bio, 0, 1);
                        if (ret) {
-                               bio->bi_error = ret;
+                               bio->bi_status = ret;
                                bio_endio(bio);
                        }
 
@@ -434,7 +434,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
        ret = btrfs_map_bio(fs_info, bio, 0, 1);
        if (ret) {
-               bio->bi_error = ret;
+               bio->bi_status = ret;
                bio_endio(bio);
        }
 
@@ -569,7 +569,7 @@ next:
  * After the compressed pages are read, we copy the bytes into the
  * bio we were passed and then call the bio end_io calls
  */
-int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -586,7 +586,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        u64 em_len;
        u64 em_start;
        struct extent_map *em;
-       int ret = -ENOMEM;
+       blk_status_t ret = BLK_STS_RESOURCE;
        int faili = 0;
        u32 *sums;
 
@@ -600,7 +600,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                   PAGE_SIZE);
        read_unlock(&em_tree->lock);
        if (!em)
-               return -EIO;
+               return BLK_STS_IOERR;
 
        compressed_len = em->block_len;
        cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
@@ -659,19 +659,19 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        refcount_set(&cb->pending_bios, 1);
 
        for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+               int submit = 0;
+
                page = cb->compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_SHIFT;
 
                if (comp_bio->bi_iter.bi_size)
-                       ret = tree->ops->merge_bio_hook(page, 0,
+                       submit = tree->ops->merge_bio_hook(page, 0,
                                                        PAGE_SIZE,
                                                        comp_bio, 0);
-               else
-                       ret = 0;
 
                page->mapping = NULL;
-               if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
+               if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
                    PAGE_SIZE) {
                        bio_get(comp_bio);
 
@@ -697,7 +697,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
                        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
                        if (ret) {
-                               comp_bio->bi_error = ret;
+                               comp_bio->bi_status = ret;
                                bio_endio(comp_bio);
                        }
 
@@ -726,7 +726,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
        if (ret) {
-               comp_bio->bi_error = ret;
+               comp_bio->bi_status = ret;
                bio_endio(comp_bio);
        }
 
index 39ec43ab8df1b72bf9a7cc57b0a90ea4e5fb8602..680d4265d601a7dbc0ad84750751373a751a6910 100644 (file)
@@ -48,12 +48,12 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
                              unsigned long total_out, u64 disk_start,
                              struct bio *bio);
 
-int btrfs_submit_compressed_write(struct inode *inode, u64 start,
+blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
                                  unsigned long len, u64 disk_start,
                                  unsigned long compressed_len,
                                  struct page **compressed_pages,
                                  unsigned long nr_pages);
-int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags);
 
 enum btrfs_compression_type {
index 643c70d2b2e65ab96a93ff4c022756ea7e59d179..a0d0c79d95eddbba273e377d0ab2d9091fb857c9 100644 (file)
@@ -2563,7 +2563,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
                                                 unsigned num_items)
 {
-       return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
+       return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
 }
 
 /*
@@ -2573,7 +2573,7 @@ static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
 static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
                                                 unsigned num_items)
 {
-       return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
+       return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
 }
 
 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
@@ -3078,8 +3078,8 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
 struct btrfs_dio_private;
 int btrfs_del_csums(struct btrfs_trans_handle *trans,
                    struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
-int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
-int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
+blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
                              u64 logical_offset);
 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
@@ -3094,7 +3094,7 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_ordered_sum *sums);
-int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
                       u64 file_start, int contig);
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
                             struct list_head *list, int search_commit);
index 60a750678a82b335ab7fd0e9432288d013c52e64..c24d615e3d7f60694d25d6a5c34f2dff45121ca7 100644 (file)
@@ -468,7 +468,7 @@ int verify_dir_item(struct btrfs_fs_info *fs_info,
 
        if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
                btrfs_crit(fs_info, "invalid dir item name len: %u",
-                      (unsigned)btrfs_dir_data_len(leaf, dir_item));
+                      (unsigned)btrfs_dir_name_len(leaf, dir_item));
                return 1;
        }
 
index 8685d67185d01bf90bcd2cf6d7cdd168e044c777..6036d15b47b851e6d2acff6615f2316eb415cd60 100644 (file)
@@ -87,7 +87,7 @@ struct btrfs_end_io_wq {
        bio_end_io_t *end_io;
        void *private;
        struct btrfs_fs_info *info;
-       int error;
+       blk_status_t status;
        enum btrfs_wq_endio_type metadata;
        struct list_head list;
        struct btrfs_work work;
@@ -131,7 +131,7 @@ struct async_submit_bio {
         */
        u64 bio_offset;
        struct btrfs_work work;
-       int error;
+       blk_status_t status;
 };
 
 /*
@@ -799,7 +799,7 @@ static void end_workqueue_bio(struct bio *bio)
        btrfs_work_func_t func;
 
        fs_info = end_io_wq->info;
-       end_io_wq->error = bio->bi_error;
+       end_io_wq->status = bio->bi_status;
 
        if (bio_op(bio) == REQ_OP_WRITE) {
                if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
@@ -836,19 +836,19 @@ static void end_workqueue_bio(struct bio *bio)
        btrfs_queue_work(wq, &end_io_wq->work);
 }
 
-int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        enum btrfs_wq_endio_type metadata)
 {
        struct btrfs_end_io_wq *end_io_wq;
 
        end_io_wq = kmem_cache_alloc(btrfs_end_io_wq_cache, GFP_NOFS);
        if (!end_io_wq)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        end_io_wq->private = bio->bi_private;
        end_io_wq->end_io = bio->bi_end_io;
        end_io_wq->info = info;
-       end_io_wq->error = 0;
+       end_io_wq->status = 0;
        end_io_wq->bio = bio;
        end_io_wq->metadata = metadata;
 
@@ -868,14 +868,14 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
 static void run_one_async_start(struct btrfs_work *work)
 {
        struct async_submit_bio *async;
-       int ret;
+       blk_status_t ret;
 
        async = container_of(work, struct  async_submit_bio, work);
        ret = async->submit_bio_start(async->inode, async->bio,
                                      async->mirror_num, async->bio_flags,
                                      async->bio_offset);
        if (ret)
-               async->error = ret;
+               async->status = ret;
 }
 
 static void run_one_async_done(struct btrfs_work *work)
@@ -898,8 +898,8 @@ static void run_one_async_done(struct btrfs_work *work)
                wake_up(&fs_info->async_submit_wait);
 
        /* If an error occurred we just want to clean up the bio and move on */
-       if (async->error) {
-               async->bio->bi_error = async->error;
+       if (async->status) {
+               async->bio->bi_status = async->status;
                bio_endio(async->bio);
                return;
        }
@@ -916,18 +916,17 @@ static void run_one_async_free(struct btrfs_work *work)
        kfree(async);
 }
 
-int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-                       struct bio *bio, int mirror_num,
-                       unsigned long bio_flags,
-                       u64 bio_offset,
-                       extent_submit_bio_hook_t *submit_bio_start,
-                       extent_submit_bio_hook_t *submit_bio_done)
+blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
+               struct inode *inode, struct bio *bio, int mirror_num,
+               unsigned long bio_flags, u64 bio_offset,
+               extent_submit_bio_hook_t *submit_bio_start,
+               extent_submit_bio_hook_t *submit_bio_done)
 {
        struct async_submit_bio *async;
 
        async = kmalloc(sizeof(*async), GFP_NOFS);
        if (!async)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        async->inode = inode;
        async->bio = bio;
@@ -941,7 +940,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        async->bio_flags = bio_flags;
        async->bio_offset = bio_offset;
 
-       async->error = 0;
+       async->status = 0;
 
        atomic_inc(&fs_info->nr_async_submits);
 
@@ -959,7 +958,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
        return 0;
 }
 
-static int btree_csum_one_bio(struct bio *bio)
+static blk_status_t btree_csum_one_bio(struct bio *bio)
 {
        struct bio_vec *bvec;
        struct btrfs_root *root;
@@ -972,12 +971,12 @@ static int btree_csum_one_bio(struct bio *bio)
                        break;
        }
 
-       return ret;
+       return errno_to_blk_status(ret);
 }
 
-static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
-                                   int mirror_num, unsigned long bio_flags,
-                                   u64 bio_offset)
+static blk_status_t __btree_submit_bio_start(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
        /*
         * when we're called for a write, we're already in the async
@@ -986,11 +985,11 @@ static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
        return btree_csum_one_bio(bio);
 }
 
-static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
-                                int mirror_num, unsigned long bio_flags,
-                                u64 bio_offset)
+static blk_status_t __btree_submit_bio_done(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
-       int ret;
+       blk_status_t ret;
 
        /*
         * when we're called for a write, we're already in the async
@@ -998,7 +997,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
         */
        ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
        if (ret) {
-               bio->bi_error = ret;
+               bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@ -1015,13 +1014,13 @@ static int check_async_write(unsigned long bio_flags)
        return 1;
 }
 
-static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
+static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio,
                                 int mirror_num, unsigned long bio_flags,
                                 u64 bio_offset)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        int async = check_async_write(bio_flags);
-       int ret;
+       blk_status_t ret;
 
        if (bio_op(bio) != REQ_OP_WRITE) {
                /*
@@ -1054,7 +1053,7 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
        return 0;
 
 out_w_error:
-       bio->bi_error = ret;
+       bio->bi_status = ret;
        bio_endio(bio);
        return ret;
 }
@@ -1820,7 +1819,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
        end_io_wq = container_of(work, struct btrfs_end_io_wq, work);
        bio = end_io_wq->bio;
 
-       bio->bi_error = end_io_wq->error;
+       bio->bi_status = end_io_wq->status;
        bio->bi_private = end_io_wq->private;
        bio->bi_end_io = end_io_wq->end_io;
        kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq);
@@ -3467,10 +3466,12 @@ static int write_dev_supers(struct btrfs_device *device,
                 * we fua the first super.  The others we allow
                 * to go down lazy.
                 */
-               if (i == 0)
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
-               else
+               if (i == 0) {
+                       ret = btrfsic_submit_bh(REQ_OP_WRITE,
+                                               REQ_SYNC | REQ_FUA, bh);
+               } else {
                        ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
+               }
                if (ret)
                        errors++;
        }
@@ -3495,11 +3496,11 @@ static void btrfs_end_empty_barrier(struct bio *bio)
  * any device where the flush fails with eopnotsupp are flagged as not-barrier
  * capable
  */
-static int write_dev_flush(struct btrfs_device *device, int wait)
+static blk_status_t write_dev_flush(struct btrfs_device *device, int wait)
 {
        struct request_queue *q = bdev_get_queue(device->bdev);
        struct bio *bio;
-       int ret = 0;
+       blk_status_t ret = 0;
 
        if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
                return 0;
@@ -3511,8 +3512,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
                wait_for_completion(&device->flush_wait);
 
-               if (bio->bi_error) {
-                       ret = bio->bi_error;
+               if (bio->bi_status) {
+                       ret = bio->bi_status;
                        btrfs_dev_stat_inc_and_print(device,
                                BTRFS_DEV_STAT_FLUSH_ERRS);
                }
@@ -3531,11 +3532,11 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
        device->flush_bio = NULL;
        bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
        if (!bio)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        bio->bi_end_io = btrfs_end_empty_barrier;
        bio->bi_bdev = device->bdev;
-       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
        init_completion(&device->flush_wait);
        bio->bi_private = &device->flush_wait;
        device->flush_bio = bio;
@@ -3556,7 +3557,7 @@ static int barrier_all_devices(struct btrfs_fs_info *info)
        struct btrfs_device *dev;
        int errors_send = 0;
        int errors_wait = 0;
-       int ret;
+       blk_status_t ret;
 
        /* send down all the barriers */
        head = &info->fs_devices->devices;
index 21f1ceb85b76737a67c1ffbc02cbd725b09fb510..c581927555f3da9e2eb90f86aff23149175c4274 100644 (file)
@@ -118,13 +118,13 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(const char *data, u32 seed, size_t len);
 void btrfs_csum_final(u32 crc, u8 *result);
-int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
+blk_status_t btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        enum btrfs_wq_endio_type metadata);
-int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
-                       struct bio *bio, int mirror_num,
-                       unsigned long bio_flags, u64 bio_offset,
-                       extent_submit_bio_hook_t *submit_bio_start,
-                       extent_submit_bio_hook_t *submit_bio_done);
+blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info,
+               struct inode *inode, struct bio *bio, int mirror_num,
+               unsigned long bio_flags, u64 bio_offset,
+               extent_submit_bio_hook_t *submit_bio_start,
+               extent_submit_bio_hook_t *submit_bio_done);
 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
 int btrfs_write_tree_block(struct extent_buffer *buf);
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
index e390451c72e6cdb93492e519cea82d5d7b3dfaf9..33d979e9ea2a307802434f8425a6b3a33c5d59d6 100644 (file)
@@ -3993,6 +3993,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
                                    info->space_info_kobj, "%s",
                                    alloc_name(found->flags));
        if (ret) {
+               percpu_counter_destroy(&found->total_bytes_pinned);
                kfree(found);
                return ret;
        }
@@ -4844,7 +4845,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
        spin_unlock(&delayed_rsv->lock);
 
 commit:
-       trans = btrfs_join_transaction(fs_info->fs_root);
+       trans = btrfs_join_transaction(fs_info->extent_root);
        if (IS_ERR(trans))
                return -ENOSPC;
 
@@ -4862,7 +4863,7 @@ static int flush_space(struct btrfs_fs_info *fs_info,
                       struct btrfs_space_info *space_info, u64 num_bytes,
                       u64 orig_bytes, int state)
 {
-       struct btrfs_root *root = fs_info->fs_root;
+       struct btrfs_root *root = fs_info->extent_root;
        struct btrfs_trans_handle *trans;
        int nr;
        int ret = 0;
@@ -5062,7 +5063,7 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
        int flush_state = FLUSH_DELAYED_ITEMS_NR;
 
        spin_lock(&space_info->lock);
-       to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
+       to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->extent_root,
                                                      space_info);
        if (!to_reclaim) {
                spin_unlock(&space_info->lock);
index d8da3edf2ac39ebcc0bde0ede7da74f0f81ad9ea..8f66e55e7ba1b7cefabee4729c5d483bb2a7c49f 100644 (file)
@@ -2399,6 +2399,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
        struct bio *bio;
        int read_mode = 0;
+       blk_status_t status;
        int ret;
 
        BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -2431,11 +2432,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
                "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
                read_mode, failrec->this_mirror, failrec->in_validation);
 
-       ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
+       status = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
                                         failrec->bio_flags, 0);
-       if (ret) {
+       if (status) {
                free_io_failure(BTRFS_I(inode), failrec);
                bio_put(bio);
+               ret = blk_status_to_errno(status);
        }
 
        return ret;
@@ -2458,7 +2460,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
        if (!uptodate) {
                ClearPageUptodate(page);
                SetPageError(page);
-               ret = ret < 0 ? ret : -EIO;
+               ret = err < 0 ? err : -EIO;
                mapping_set_error(page->mapping, ret);
        }
 }
@@ -2474,6 +2476,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
  */
 static void end_bio_extent_writepage(struct bio *bio)
 {
+       int error = blk_status_to_errno(bio->bi_status);
        struct bio_vec *bvec;
        u64 start;
        u64 end;
@@ -2503,7 +2506,7 @@ static void end_bio_extent_writepage(struct bio *bio)
                start = page_offset(page);
                end = start + bvec->bv_offset + bvec->bv_len - 1;
 
-               end_extent_writepage(page, bio->bi_error, start, end);
+               end_extent_writepage(page, error, start, end);
                end_page_writeback(page);
        }
 
@@ -2536,7 +2539,7 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
 static void end_bio_extent_readpage(struct bio *bio)
 {
        struct bio_vec *bvec;
-       int uptodate = !bio->bi_error;
+       int uptodate = !bio->bi_status;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct extent_io_tree *tree;
        u64 offset = 0;
@@ -2556,7 +2559,7 @@ static void end_bio_extent_readpage(struct bio *bio)
 
                btrfs_debug(fs_info,
                        "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
-                       (u64)bio->bi_iter.bi_sector, bio->bi_error,
+                       (u64)bio->bi_iter.bi_sector, bio->bi_status,
                        io_bio->mirror_num);
                tree = &BTRFS_I(inode)->io_tree;
 
@@ -2615,7 +2618,7 @@ static void end_bio_extent_readpage(struct bio *bio)
                                ret = bio_readpage_error(bio, offset, page,
                                                         start, end, mirror);
                                if (ret == 0) {
-                                       uptodate = !bio->bi_error;
+                                       uptodate = !bio->bi_status;
                                        offset += len;
                                        continue;
                                }
@@ -2673,7 +2676,7 @@ readpage_ok:
                endio_readpage_release_extent(tree, extent_start, extent_len,
                                              uptodate);
        if (io_bio->end_io)
-               io_bio->end_io(io_bio, bio->bi_error);
+               io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
        bio_put(bio);
 }
 
@@ -2743,7 +2746,7 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
                                       unsigned long bio_flags)
 {
-       int ret = 0;
+       blk_status_t ret = 0;
        struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
        struct page *page = bvec->bv_page;
        struct extent_io_tree *tree = bio->bi_private;
@@ -2761,7 +2764,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
                btrfsic_submit_bio(bio);
 
        bio_put(bio);
-       return ret;
+       return blk_status_to_errno(ret);
 }
 
 static int merge_bio(struct extent_io_tree *tree, struct page *page,
@@ -3707,7 +3710,7 @@ static void end_bio_extent_buffer_writepage(struct bio *bio)
                BUG_ON(!eb);
                done = atomic_dec_and_test(&eb->io_pages);
 
-               if (bio->bi_error ||
+               if (bio->bi_status ||
                    test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) {
                        ClearPageUptodate(page);
                        set_btree_ioerr(page);
@@ -4377,6 +4380,123 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
        return NULL;
 }
 
+/*
+ * To cache previous fiemap extent
+ *
+ * Will be used for merging fiemap extent
+ */
+struct fiemap_cache {
+       u64 offset;
+       u64 phys;
+       u64 len;
+       u32 flags;
+       bool cached;
+};
+
+/*
+ * Helper to submit fiemap extent.
+ *
+ * Will try to merge current fiemap extent specified by @offset, @phys,
+ * @len and @flags with cached one.
+ * And only when we fails to merge, cached one will be submitted as
+ * fiemap extent.
+ *
+ * Return value is the same as fiemap_fill_next_extent().
+ */
+static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
+                               struct fiemap_cache *cache,
+                               u64 offset, u64 phys, u64 len, u32 flags)
+{
+       int ret = 0;
+
+       if (!cache->cached)
+               goto assign;
+
+       /*
+        * Sanity check, extent_fiemap() should have ensured that new
+        * fiemap extent won't overlap with cahced one.
+        * Not recoverable.
+        *
+        * NOTE: Physical address can overlap, due to compression
+        */
+       if (cache->offset + cache->len > offset) {
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       /*
+        * Only merges fiemap extents if
+        * 1) Their logical addresses are continuous
+        *
+        * 2) Their physical addresses are continuous
+        *    So truly compressed (physical size smaller than logical size)
+        *    extents won't get merged with each other
+        *
+        * 3) Share same flags except FIEMAP_EXTENT_LAST
+        *    So regular extent won't get merged with prealloc extent
+        */
+       if (cache->offset + cache->len  == offset &&
+           cache->phys + cache->len == phys  &&
+           (cache->flags & ~FIEMAP_EXTENT_LAST) ==
+                       (flags & ~FIEMAP_EXTENT_LAST)) {
+               cache->len += len;
+               cache->flags |= flags;
+               goto try_submit_last;
+       }
+
+       /* Not mergeable, need to submit cached one */
+       ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+                                     cache->len, cache->flags);
+       cache->cached = false;
+       if (ret)
+               return ret;
+assign:
+       cache->cached = true;
+       cache->offset = offset;
+       cache->phys = phys;
+       cache->len = len;
+       cache->flags = flags;
+try_submit_last:
+       if (cache->flags & FIEMAP_EXTENT_LAST) {
+               ret = fiemap_fill_next_extent(fieinfo, cache->offset,
+                               cache->phys, cache->len, cache->flags);
+               cache->cached = false;
+       }
+       return ret;
+}
+
+/*
+ * Sanity check for fiemap cache
+ *
+ * All fiemap cache should be submitted by emit_fiemap_extent()
+ * Iteration should be terminated either by last fiemap extent or
+ * fieinfo->fi_extents_max.
+ * So no cached fiemap should exist.
+ */
+static int check_fiemap_cache(struct btrfs_fs_info *fs_info,
+                              struct fiemap_extent_info *fieinfo,
+                              struct fiemap_cache *cache)
+{
+       int ret;
+
+       if (!cache->cached)
+               return 0;
+
+       /* Small and recoverbale problem, only to info developer */
+#ifdef CONFIG_BTRFS_DEBUG
+       WARN_ON(1);
+#endif
+       btrfs_warn(fs_info,
+                  "unhandled fiemap cache detected: offset=%llu phys=%llu len=%llu flags=0x%x",
+                  cache->offset, cache->phys, cache->len, cache->flags);
+       ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
+                                     cache->len, cache->flags);
+       cache->cached = false;
+       if (ret > 0)
+               ret = 0;
+       return ret;
+}
+
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len, get_extent_t *get_extent)
 {
@@ -4394,6 +4514,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        struct extent_state *cached_state = NULL;
        struct btrfs_path *path;
        struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct fiemap_cache cache = { 0 };
        int end = 0;
        u64 em_start = 0;
        u64 em_len = 0;
@@ -4573,8 +4694,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        flags |= FIEMAP_EXTENT_LAST;
                        end = 1;
                }
-               ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
-                                             em_len, flags);
+               ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko,
+                                          em_len, flags);
                if (ret) {
                        if (ret == 1)
                                ret = 0;
@@ -4582,6 +4703,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                }
        }
 out_free:
+       if (!ret)
+               ret = check_fiemap_cache(root->fs_info, fieinfo, &cache);
        free_extent_map(em);
 out:
        btrfs_free_path(path);
index 1eafa2f0ede370ae802bb882557b3d4ad5c26340..487ca0207cb659d327bb4a8135d2e1f0fc1f9f9d 100644 (file)
@@ -92,9 +92,9 @@ struct btrfs_inode;
 struct btrfs_io_bio;
 struct io_failure_record;
 
-typedef        int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
-                                      int mirror_num, unsigned long bio_flags,
-                                      u64 bio_offset);
+typedef        blk_status_t (extent_submit_bio_hook_t)(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset);
 struct extent_io_ops {
        /*
         * The following callbacks must be allways defined, the function
index 64fcb31d71633c2731d6241b1236f7c57b1f5b6f..5b1c7090e546f0198e82bf15b6df789a20430c37 100644 (file)
@@ -160,7 +160,7 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
        kfree(bio->csum_allocated);
 }
 
-static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
+static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
                                   u64 logical_offset, u32 *dst, int dio)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
 
        path = btrfs_alloc_path();
        if (!path)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
        if (!dst) {
@@ -191,7 +191,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
                                        csum_size, GFP_NOFS);
                        if (!btrfs_bio->csum_allocated) {
                                btrfs_free_path(path);
-                               return -ENOMEM;
+                               return BLK_STS_RESOURCE;
                        }
                        btrfs_bio->csum = btrfs_bio->csum_allocated;
                        btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
@@ -303,12 +303,12 @@ next:
        return 0;
 }
 
-int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
+blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
 {
        return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
 }
 
-int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
+blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
 {
        return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
 }
@@ -433,7 +433,7 @@ fail:
        return ret;
 }
 
-int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
+blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
                       u64 file_start, int contig)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -452,7 +452,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
        sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
                       GFP_NOFS);
        if (!sums)
-               return -ENOMEM;
+               return BLK_STS_RESOURCE;
 
        sums->len = bio->bi_iter.bi_size;
        INIT_LIST_HEAD(&sums->list);
index 17cbe9306fafd9b9e7247bec4308494ed770b28b..f942293dd7e796f89a5602ad9afad295ea018403 100644 (file)
@@ -842,13 +842,12 @@ retry:
                                NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
                                PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
                                PAGE_SET_WRITEBACK);
-               ret = btrfs_submit_compressed_write(inode,
+               if (btrfs_submit_compressed_write(inode,
                                    async_extent->start,
                                    async_extent->ram_size,
                                    ins.objectid,
                                    ins.offset, async_extent->pages,
-                                   async_extent->nr_pages);
-               if (ret) {
+                                   async_extent->nr_pages)) {
                        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
                        struct page *p = async_extent->pages[0];
                        const u64 start = async_extent->start;
@@ -1901,11 +1900,11 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
-                                   int mirror_num, unsigned long bio_flags,
-                                   u64 bio_offset)
+static blk_status_t __btrfs_submit_bio_start(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
-       int ret = 0;
+       blk_status_t ret = 0;
 
        ret = btrfs_csum_one_bio(inode, bio, 0, 0);
        BUG_ON(ret); /* -ENOMEM */
@@ -1920,16 +1919,16 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
  * At IO completion time the cums attached on the ordered extent record
  * are inserted into the btree
  */
-static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
-                         int mirror_num, unsigned long bio_flags,
-                         u64 bio_offset)
+static blk_status_t __btrfs_submit_bio_done(struct inode *inode,
+               struct bio *bio, int mirror_num, unsigned long bio_flags,
+               u64 bio_offset)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       int ret;
+       blk_status_t ret;
 
        ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
        if (ret) {
-               bio->bi_error = ret;
+               bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@ -1939,14 +1938,14 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
  * extent_io.c submission hook. This does the right thing for csum calculation
  * on write, or reading the csums from the tree before a read
  */
-static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
+static blk_status_t btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
                          int mirror_num, unsigned long bio_flags,
                          u64 bio_offset)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        enum btrfs_wq_endio_type metadata = BTRFS_WQ_ENDIO_DATA;
-       int ret = 0;
+       blk_status_t ret = 0;
        int skip_sum;
        int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
 
@@ -1991,8 +1990,8 @@ mapit:
        ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
 
 out:
-       if (ret < 0) {
-               bio->bi_error = ret;
+       if (ret) {
+               bio->bi_status = ret;
                bio_endio(bio);
        }
        return ret;
@@ -2952,7 +2951,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
 
        ret = test_range_bit(io_tree, ordered_extent->file_offset,
                        ordered_extent->file_offset + ordered_extent->len - 1,
-                       EXTENT_DEFRAG, 1, cached_state);
+                       EXTENT_DEFRAG, 0, cached_state);
        if (ret) {
                u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
                if (0 && last_snapshot >= BTRFS_I(inode)->generation)
@@ -7483,8 +7482,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
        int found = false;
        void **pagep = NULL;
        struct page *page = NULL;
-       int start_idx;
-       int end_idx;
+       unsigned long start_idx;
+       unsigned long end_idx;
 
        start_idx = start >> PAGE_SHIFT;
 
@@ -8037,7 +8036,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio)
        struct bio_vec *bvec;
        int i;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                goto end;
 
        ASSERT(bio->bi_vcnt == 1);
@@ -8116,7 +8115,7 @@ static void btrfs_retry_endio(struct bio *bio)
        int ret;
        int i;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                goto end;
 
        uptodate = 1;
@@ -8141,8 +8140,8 @@ end:
        bio_put(bio);
 }
 
-static int __btrfs_subio_endio_read(struct inode *inode,
-                                   struct btrfs_io_bio *io_bio, int err)
+static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
+               struct btrfs_io_bio *io_bio, blk_status_t err)
 {
        struct btrfs_fs_info *fs_info;
        struct bio_vec *bvec;
@@ -8184,7 +8183,7 @@ try_again:
                                io_bio->mirror_num,
                                btrfs_retry_endio, &done);
                if (ret) {
-                       err = ret;
+                       err = errno_to_blk_status(ret);
                        goto next;
                }
 
@@ -8211,8 +8210,8 @@ next:
        return err;
 }
 
-static int btrfs_subio_endio_read(struct inode *inode,
-                                 struct btrfs_io_bio *io_bio, int err)
+static blk_status_t btrfs_subio_endio_read(struct inode *inode,
+               struct btrfs_io_bio *io_bio, blk_status_t err)
 {
        bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
@@ -8232,7 +8231,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
        struct inode *inode = dip->inode;
        struct bio *dio_bio;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
-       int err = bio->bi_error;
+       blk_status_t err = bio->bi_status;
 
        if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
                err = btrfs_subio_endio_read(inode, io_bio, err);
@@ -8243,11 +8242,11 @@ static void btrfs_endio_direct_read(struct bio *bio)
 
        kfree(dip);
 
-       dio_bio->bi_error = bio->bi_error;
-       dio_end_io(dio_bio, bio->bi_error);
+       dio_bio->bi_status = bio->bi_status;
+       dio_end_io(dio_bio);
 
        if (io_bio->end_io)
-               io_bio->end_io(io_bio, err);
+               io_bio->end_io(io_bio, blk_status_to_errno(err));
        bio_put(bio);
 }
 
@@ -8299,20 +8298,20 @@ static void btrfs_endio_direct_write(struct bio *bio)
        struct bio *dio_bio = dip->dio_bio;
 
        __endio_write_update_ordered(dip->inode, dip->logical_offset,
-                                    dip->bytes, !bio->bi_error);
+                                    dip->bytes, !bio->bi_status);
 
        kfree(dip);
 
-       dio_bio->bi_error = bio->bi_error;
-       dio_end_io(dio_bio, bio->bi_error);
+       dio_bio->bi_status = bio->bi_status;
+       dio_end_io(dio_bio);
        bio_put(bio);
 }
 
-static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
+static blk_status_t __btrfs_submit_bio_start_direct_io(struct inode *inode,
                                    struct bio *bio, int mirror_num,
                                    unsigned long bio_flags, u64 offset)
 {
-       int ret;
+       blk_status_t ret;
        ret = btrfs_csum_one_bio(inode, bio, offset, 1);
        BUG_ON(ret); /* -ENOMEM */
        return 0;
@@ -8321,7 +8320,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
 static void btrfs_end_dio_bio(struct bio *bio)
 {
        struct btrfs_dio_private *dip = bio->bi_private;
-       int err = bio->bi_error;
+       blk_status_t err = bio->bi_status;
 
        if (err)
                btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
@@ -8351,7 +8350,7 @@ static void btrfs_end_dio_bio(struct bio *bio)
        if (dip->errors) {
                bio_io_error(dip->orig_bio);
        } else {
-               dip->dio_bio->bi_error = 0;
+               dip->dio_bio->bi_status = 0;
                bio_endio(dip->orig_bio);
        }
 out:
@@ -8368,14 +8367,14 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
        return bio;
 }
 
-static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
+static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
                                                 struct btrfs_dio_private *dip,
                                                 struct bio *bio,
                                                 u64 file_offset)
 {
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
        struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
-       int ret;
+       blk_status_t ret;
 
        /*
         * We load all the csum data we need when we submit
@@ -8406,7 +8405,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_dio_private *dip = bio->bi_private;
        bool write = bio_op(bio) == REQ_OP_WRITE;
-       int ret;
+       blk_status_t ret;
 
        if (async_submit)
                async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
@@ -8649,7 +8648,7 @@ free_ordered:
         * callbacks - they require an allocated dip and a clone of dio_bio.
         */
        if (io_bio && dip) {
-               io_bio->bi_error = -EIO;
+               io_bio->bi_status = BLK_STS_IOERR;
                bio_endio(io_bio);
                /*
                 * The end io callbacks free our dip, do the final put on io_bio
@@ -8668,12 +8667,12 @@ free_ordered:
                        unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
                              file_offset + dio_bio->bi_iter.bi_size - 1);
 
-               dio_bio->bi_error = -EIO;
+               dio_bio->bi_status = BLK_STS_IOERR;
                /*
                 * Releases and cleans up our dio_bio, no need to bio_put()
                 * nor bio_endio()/bio_io_error() against dio_bio.
                 */
-               dio_end_io(dio_bio, ret);
+               dio_end_io(dio_bio);
        }
        if (io_bio)
                bio_put(io_bio);
index d8ea0eb76325e9b25d42dfa4a99c63918981aa2f..f3d30d9ea8f93c652015d2c6a0e71b3bb3de1d32 100644 (file)
@@ -871,7 +871,7 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
  * this frees the rbio and runs through all the bios in the
  * bio_list and calls end_io on them
  */
-static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
 {
        struct bio *cur = bio_list_get(&rbio->bio_list);
        struct bio *next;
@@ -884,7 +884,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
        while (cur) {
                next = cur->bi_next;
                cur->bi_next = NULL;
-               cur->bi_error = err;
+               cur->bi_status = err;
                bio_endio(cur);
                cur = next;
        }
@@ -897,7 +897,7 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
 static void raid_write_end_io(struct bio *bio)
 {
        struct btrfs_raid_bio *rbio = bio->bi_private;
-       int err = bio->bi_error;
+       blk_status_t err = bio->bi_status;
        int max_errors;
 
        if (err)
@@ -914,7 +914,7 @@ static void raid_write_end_io(struct bio *bio)
        max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
                     0 : rbio->bbio->max_errors;
        if (atomic_read(&rbio->error) > max_errors)
-               err = -EIO;
+               err = BLK_STS_IOERR;
 
        rbio_orig_end_io(rbio, err);
 }
@@ -1092,7 +1092,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
                 * devices or if they are not contiguous
                 */
                if (last_end == disk_start && stripe->dev->bdev &&
-                   !last->bi_error &&
+                   !last->bi_status &&
                    last->bi_bdev == stripe->dev->bdev) {
                        ret = bio_add_page(last, page, PAGE_SIZE, 0);
                        if (ret == PAGE_SIZE)
@@ -1448,7 +1448,7 @@ static void raid_rmw_end_io(struct bio *bio)
 {
        struct btrfs_raid_bio *rbio = bio->bi_private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
@@ -1991,7 +1991,7 @@ static void raid_recover_end_io(struct bio *bio)
         * we only read stripe pages off the disk, set them
         * up to date if there were no errors
         */
-       if (bio->bi_error)
+       if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
@@ -2530,7 +2530,7 @@ static void raid56_parity_scrub_end_io(struct bio *bio)
 {
        struct btrfs_raid_bio *rbio = bio->bi_private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                fail_bio_stripe(rbio, bio);
        else
                set_bio_pages_uptodate(bio);
index c7b45eb2403d09e94b2538dabcb5a1f0116c55dd..ba5595d19de105e0ca0a94eb002d995a55f04094 100644 (file)
@@ -95,7 +95,7 @@ struct scrub_bio {
        struct scrub_ctx        *sctx;
        struct btrfs_device     *dev;
        struct bio              *bio;
-       int                     err;
+       blk_status_t            status;
        u64                     logical;
        u64                     physical;
 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
@@ -1668,14 +1668,14 @@ leave_nomem:
 
 struct scrub_bio_ret {
        struct completion event;
-       int error;
+       blk_status_t status;
 };
 
 static void scrub_bio_wait_endio(struct bio *bio)
 {
        struct scrub_bio_ret *ret = bio->bi_private;
 
-       ret->error = bio->bi_error;
+       ret->status = bio->bi_status;
        complete(&ret->event);
 }
 
@@ -1693,7 +1693,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
        int ret;
 
        init_completion(&done.event);
-       done.error = 0;
+       done.status = 0;
        bio->bi_iter.bi_sector = page->logical >> 9;
        bio->bi_private = &done;
        bio->bi_end_io = scrub_bio_wait_endio;
@@ -1705,7 +1705,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
                return ret;
 
        wait_for_completion(&done.event);
-       if (done.error)
+       if (done.status)
                return -EIO;
 
        return 0;
@@ -1937,7 +1937,7 @@ again:
                bio->bi_bdev = sbio->dev->bdev;
                bio->bi_iter.bi_sector = sbio->physical >> 9;
                bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
-               sbio->err = 0;
+               sbio->status = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical_for_dev_replace ||
                   sbio->logical + sbio->page_count * PAGE_SIZE !=
@@ -1992,7 +1992,7 @@ static void scrub_wr_bio_end_io(struct bio *bio)
        struct scrub_bio *sbio = bio->bi_private;
        struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
-       sbio->err = bio->bi_error;
+       sbio->status = bio->bi_status;
        sbio->bio = bio;
 
        btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
@@ -2007,7 +2007,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
        int i;
 
        WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
-       if (sbio->err) {
+       if (sbio->status) {
                struct btrfs_dev_replace *dev_replace =
                        &sbio->sctx->fs_info->dev_replace;
 
@@ -2341,7 +2341,7 @@ again:
                bio->bi_bdev = sbio->dev->bdev;
                bio->bi_iter.bi_sector = sbio->physical >> 9;
                bio_set_op_attrs(bio, REQ_OP_READ, 0);
-               sbio->err = 0;
+               sbio->status = 0;
        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
                   spage->physical ||
                   sbio->logical + sbio->page_count * PAGE_SIZE !=
@@ -2377,7 +2377,7 @@ static void scrub_missing_raid56_end_io(struct bio *bio)
        struct scrub_block *sblock = bio->bi_private;
        struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                sblock->no_io_error_seen = 0;
 
        bio_put(bio);
@@ -2588,7 +2588,7 @@ static void scrub_bio_end_io(struct bio *bio)
        struct scrub_bio *sbio = bio->bi_private;
        struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
 
-       sbio->err = bio->bi_error;
+       sbio->status = bio->bi_status;
        sbio->bio = bio;
 
        btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
@@ -2601,7 +2601,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
        int i;
 
        BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
-       if (sbio->err) {
+       if (sbio->status) {
                for (i = 0; i < sbio->page_count; i++) {
                        struct scrub_page *spage = sbio->pagev[i];
 
@@ -3004,7 +3004,7 @@ static void scrub_parity_bio_endio(struct bio *bio)
        struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
        struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
                          sparity->nsectors);
 
index 017b67daa3bbf375919019e5c089b94f97d3e2a5..84a495967e0a8bb76afcaad47897b7565000c1d5 100644 (file)
@@ -6042,9 +6042,10 @@ static void btrfs_end_bio(struct bio *bio)
        struct btrfs_bio *bbio = bio->bi_private;
        int is_orig_bio = 0;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                atomic_inc(&bbio->error);
-               if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) {
+               if (bio->bi_status == BLK_STS_IOERR ||
+                   bio->bi_status == BLK_STS_TARGET) {
                        unsigned int stripe_index =
                                btrfs_io_bio(bio)->stripe_index;
                        struct btrfs_device *dev;
@@ -6082,13 +6083,13 @@ static void btrfs_end_bio(struct bio *bio)
                 * beyond the tolerance of the btrfs bio
                 */
                if (atomic_read(&bbio->error) > bbio->max_errors) {
-                       bio->bi_error = -EIO;
+                       bio->bi_status = BLK_STS_IOERR;
                } else {
                        /*
                         * this bio is actually up to date, we didn't
                         * go over the max number of errors
                         */
-                       bio->bi_error = 0;
+                       bio->bi_status = 0;
                }
 
                btrfs_end_bbio(bbio, bio);
@@ -6199,7 +6200,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
 
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                bio->bi_iter.bi_sector = logical >> 9;
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
                btrfs_end_bbio(bbio, bio);
        }
 }
index 161be58c5cb0f738754b79d87eda879aa3bb9553..306b720f73838228b20b3a70921c951e54e9c7b1 100644 (file)
@@ -3038,7 +3038,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
        if (unlikely(bio_flagged(bio, BIO_QUIET)))
                set_bit(BH_Quiet, &bh->b_state);
 
-       bh->b_end_io(bh, !bio->bi_error);
+       bh->b_end_io(bh, !bio->bi_status);
        bio_put(bio);
 }
 
index a409a84f1bcab289d676bfd1a2c9e0c39e14c1ad..6181e9526860708df8f906830f76ba454e2bb2af 100644 (file)
@@ -129,7 +129,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
                        goto errout;
                }
                err = submit_bio_wait(bio);
-               if ((err == 0) && bio->bi_error)
+               if (err == 0 && bio->bi_status)
                        err = -EIO;
                bio_put(bio);
                if (err)
index a04ebea77de89b4a9bd74cd99f7a1740db7bbb4a..e8baaabebf13b3368a13e706c3209f304339606e 100644 (file)
@@ -294,7 +294,7 @@ static void dio_aio_complete_work(struct work_struct *work)
        dio_complete(dio, 0, true);
 }
 
-static int dio_bio_complete(struct dio *dio, struct bio *bio);
+static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
 
 /*
  * Asynchronous IO callback. 
@@ -348,13 +348,12 @@ static void dio_bio_end_io(struct bio *bio)
 /**
  * dio_end_io - handle the end io action for the given bio
  * @bio: The direct io bio thats being completed
- * @error: Error if there was one
  *
  * This is meant to be called by any filesystem that uses their own dio_submit_t
  * so that the DIO specific endio actions are dealt with after the filesystem
  * has done it's completion work.
  */
-void dio_end_io(struct bio *bio, int error)
+void dio_end_io(struct bio *bio)
 {
        struct dio *dio = bio->bi_private;
 
@@ -474,17 +473,16 @@ static struct bio *dio_await_one(struct dio *dio)
 /*
  * Process one completed BIO.  No locks are held.
  */
-static int dio_bio_complete(struct dio *dio, struct bio *bio)
+static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio)
 {
        struct bio_vec *bvec;
        unsigned i;
-       int err;
+       blk_status_t err = bio->bi_status;
 
-       if (bio->bi_error)
+       if (err)
                dio->io_error = -EIO;
 
        if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
-               err = bio->bi_error;
                bio_check_pages_dirty(bio);     /* transfers ownership */
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
@@ -495,7 +493,6 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
                                set_page_dirty_lock(page);
                        put_page(page);
                }
-               err = bio->bi_error;
                bio_put(bio);
        }
        return err;
@@ -539,7 +536,7 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
                        bio = dio->bio_list;
                        dio->bio_list = bio->bi_private;
                        spin_unlock_irqrestore(&dio->bio_lock, flags);
-                       ret2 = dio_bio_complete(dio, bio);
+                       ret2 = blk_status_to_errno(dio_bio_complete(dio, bio));
                        if (ret == 0)
                                ret = ret2;
                }
index fd389935ecd1629f402128518f63fd7d851e2a84..3ec0e46de95fc8dece943c4d753eb401e6343cad 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  */
 
+#include <linux/quotaops.h>
 #include "ext4_jbd2.h"
 #include "ext4.h"
 #include "xattr.h"
@@ -232,6 +233,9 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
        handle_t *handle;
        int error, retries = 0;
 
+       error = dquot_initialize(inode);
+       if (error)
+               return error;
 retry:
        handle = ext4_journal_start(inode, EXT4_HT_XATTR,
                                    ext4_jbd2_credits_xattr(inode));
index 8e8046104f4d8a945f2eae5e3bca7cb214d3cbb8..32191548abed3b02eb107bfd5a23196f40200a70 100644 (file)
@@ -2523,7 +2523,6 @@ extern int ext4_search_dir(struct buffer_head *bh,
                           int buf_size,
                           struct inode *dir,
                           struct ext4_filename *fname,
-                          const struct qstr *d_name,
                           unsigned int offset,
                           struct ext4_dir_entry_2 **res_dir);
 extern int ext4_generic_delete_entry(handle_t *handle,
@@ -3007,7 +3006,6 @@ extern int htree_inlinedir_to_tree(struct file *dir_file,
                                   int *has_inline_data);
 extern struct buffer_head *ext4_find_inline_entry(struct inode *dir,
                                        struct ext4_filename *fname,
-                                       const struct qstr *d_name,
                                        struct ext4_dir_entry_2 **res_dir,
                                        int *has_inline_data);
 extern int ext4_delete_inline_entry(handle_t *handle,
index 2a97dff87b961771383c33e97dc57997eb6586fb..3e36508610b796b6612cc1e670a0fc2c8c3c282a 100644 (file)
@@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        struct ext4_sb_info *sbi;
        struct ext4_extent_header *eh;
        struct ext4_map_blocks split_map;
-       struct ext4_extent zero_ex;
+       struct ext4_extent zero_ex1, zero_ex2;
        struct ext4_extent *ex, *abut_ex;
        ext4_lblk_t ee_block, eof_block;
        unsigned int ee_len, depth, map_len = map->m_len;
        int allocated = 0, max_zeroout = 0;
        int err = 0;
-       int split_flag = 0;
+       int split_flag = EXT4_EXT_DATA_VALID2;
 
        ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
                "block %llu, max_blocks %u\n", inode->i_ino,
@@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        ex = path[depth].p_ext;
        ee_block = le32_to_cpu(ex->ee_block);
        ee_len = ext4_ext_get_actual_len(ex);
-       zero_ex.ee_len = 0;
+       zero_ex1.ee_len = 0;
+       zero_ex2.ee_len = 0;
 
        trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
 
@@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        if (ext4_encrypted_inode(inode))
                max_zeroout = 0;
 
-       /* If extent is less than s_max_zeroout_kb, zeroout directly */
-       if (max_zeroout && (ee_len <= max_zeroout)) {
-               err = ext4_ext_zeroout(inode, ex);
-               if (err)
-                       goto out;
-               zero_ex.ee_block = ex->ee_block;
-               zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
-               ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
-
-               err = ext4_ext_get_access(handle, inode, path + depth);
-               if (err)
-                       goto out;
-               ext4_ext_mark_initialized(ex);
-               ext4_ext_try_to_merge(handle, inode, path, ex);
-               err = ext4_ext_dirty(handle, inode, path + path->p_depth);
-               goto out;
-       }
-
        /*
-        * four cases:
+        * five cases:
         * 1. split the extent into three extents.
-        * 2. split the extent into two extents, zeroout the first half.
-        * 3. split the extent into two extents, zeroout the second half.
+        * 2. split the extent into two extents, zeroout the head of the first
+        *    extent.
+        * 3. split the extent into two extents, zeroout the tail of the second
+        *    extent.
         * 4. split the extent into two extents with out zeroout.
+        * 5. no splitting needed, just possibly zeroout the head and / or the
+        *    tail of the extent.
         */
        split_map.m_lblk = map->m_lblk;
        split_map.m_len = map->m_len;
 
-       if (max_zeroout && (allocated > map->m_len)) {
+       if (max_zeroout && (allocated > split_map.m_len)) {
                if (allocated <= max_zeroout) {
-                       /* case 3 */
-                       zero_ex.ee_block =
-                                        cpu_to_le32(map->m_lblk);
-                       zero_ex.ee_len = cpu_to_le16(allocated);
-                       ext4_ext_store_pblock(&zero_ex,
-                               ext4_ext_pblock(ex) + map->m_lblk - ee_block);
-                       err = ext4_ext_zeroout(inode, &zero_ex);
+                       /* case 3 or 5 */
+                       zero_ex1.ee_block =
+                                cpu_to_le32(split_map.m_lblk +
+                                            split_map.m_len);
+                       zero_ex1.ee_len =
+                               cpu_to_le16(allocated - split_map.m_len);
+                       ext4_ext_store_pblock(&zero_ex1,
+                               ext4_ext_pblock(ex) + split_map.m_lblk +
+                               split_map.m_len - ee_block);
+                       err = ext4_ext_zeroout(inode, &zero_ex1);
                        if (err)
                                goto out;
-                       split_map.m_lblk = map->m_lblk;
                        split_map.m_len = allocated;
-               } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
-                       /* case 2 */
-                       if (map->m_lblk != ee_block) {
-                               zero_ex.ee_block = ex->ee_block;
-                               zero_ex.ee_len = cpu_to_le16(map->m_lblk -
+               }
+               if (split_map.m_lblk - ee_block + split_map.m_len <
+                                                               max_zeroout) {
+                       /* case 2 or 5 */
+                       if (split_map.m_lblk != ee_block) {
+                               zero_ex2.ee_block = ex->ee_block;
+                               zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
                                                        ee_block);
-                               ext4_ext_store_pblock(&zero_ex,
+                               ext4_ext_store_pblock(&zero_ex2,
                                                      ext4_ext_pblock(ex));
-                               err = ext4_ext_zeroout(inode, &zero_ex);
+                               err = ext4_ext_zeroout(inode, &zero_ex2);
                                if (err)
                                        goto out;
                        }
 
+                       split_map.m_len += split_map.m_lblk - ee_block;
                        split_map.m_lblk = ee_block;
-                       split_map.m_len = map->m_lblk - ee_block + map->m_len;
                        allocated = map->m_len;
                }
        }
@@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                err = 0;
 out:
        /* If we have gotten a failure, don't zero out status tree */
-       if (!err)
-               err = ext4_zeroout_es(inode, &zero_ex);
+       if (!err) {
+               err = ext4_zeroout_es(inode, &zero_ex1);
+               if (!err)
+                       err = ext4_zeroout_es(inode, &zero_ex2);
+       }
        return err ? err : allocated;
 }
 
@@ -4883,6 +4877,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
 
        /* Zero out partial block at the edges of the range */
        ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+       if (ret >= 0)
+               ext4_update_inode_fsync_trans(handle, inode, 1);
 
        if (file->f_flags & O_SYNC)
                ext4_handle_sync(handle);
@@ -5569,6 +5565,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
                ext4_handle_sync(handle);
        inode->i_mtime = inode->i_ctime = current_time(inode);
        ext4_mark_inode_dirty(handle, inode);
+       ext4_update_inode_fsync_trans(handle, inode, 1);
 
 out_stop:
        ext4_journal_stop(handle);
@@ -5742,6 +5739,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
        up_write(&EXT4_I(inode)->i_data_sem);
        if (IS_SYNC(inode))
                ext4_handle_sync(handle);
+       if (ret >= 0)
+               ext4_update_inode_fsync_trans(handle, inode, 1);
 
 out_stop:
        ext4_journal_stop(handle);
index 831fd6beebf01bfa65c1c4a3ae505ed84bd35526..02ce7e7bbdf5ba4da0db50e50d8d4cab49cde4a3 100644 (file)
@@ -474,57 +474,37 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
        endoff = (loff_t)end_blk << blkbits;
 
        index = startoff >> PAGE_SHIFT;
-       end = endoff >> PAGE_SHIFT;
+       end = (endoff - 1) >> PAGE_SHIFT;
 
        pagevec_init(&pvec, 0);
        do {
                int i, num;
                unsigned long nr_pages;
 
-               num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+               num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
                nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
                                          (pgoff_t)num);
-               if (nr_pages == 0) {
-                       if (whence == SEEK_DATA)
-                               break;
-
-                       BUG_ON(whence != SEEK_HOLE);
-                       /*
-                        * If this is the first time to go into the loop and
-                        * offset is not beyond the end offset, it will be a
-                        * hole at this offset
-                        */
-                       if (lastoff == startoff || lastoff < endoff)
-                               found = 1;
-                       break;
-               }
-
-               /*
-                * If this is the first time to go into the loop and
-                * offset is smaller than the first page offset, it will be a
-                * hole at this offset.
-                */
-               if (lastoff == startoff && whence == SEEK_HOLE &&
-                   lastoff < page_offset(pvec.pages[0])) {
-                       found = 1;
+               if (nr_pages == 0)
                        break;
-               }
 
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
                        struct buffer_head *bh, *head;
 
                        /*
-                        * If the current offset is not beyond the end of given
-                        * range, it will be a hole.
+                        * If current offset is smaller than the page offset,
+                        * there is a hole at this offset.
                         */
-                       if (lastoff < endoff && whence == SEEK_HOLE &&
-                           page->index > end) {
+                       if (whence == SEEK_HOLE && lastoff < endoff &&
+                           lastoff < page_offset(pvec.pages[i])) {
                                found = 1;
                                *offset = lastoff;
                                goto out;
                        }
 
+                       if (page->index > end)
+                               goto out;
+
                        lock_page(page);
 
                        if (unlikely(page->mapping != inode->i_mapping)) {
@@ -564,20 +544,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
                        unlock_page(page);
                }
 
-               /*
-                * The no. of pages is less than our desired, that would be a
-                * hole in there.
-                */
-               if (nr_pages < num && whence == SEEK_HOLE) {
-                       found = 1;
-                       *offset = lastoff;
+               /* The no. of pages is less than our desired, we are done. */
+               if (nr_pages < num)
                        break;
-               }
 
                index = pvec.pages[i - 1]->index + 1;
                pagevec_release(&pvec);
        } while (index <= end);
 
+       if (whence == SEEK_HOLE && lastoff < endoff) {
+               found = 1;
+               *offset = lastoff;
+       }
 out:
        pagevec_release(&pvec);
        return found;
index d5dea4c293ef46be022c538d0465e60284b20fe4..8d141c0c8ff9de32a5be08a48f573de7fa618dc5 100644 (file)
@@ -1627,7 +1627,6 @@ out:
 
 struct buffer_head *ext4_find_inline_entry(struct inode *dir,
                                        struct ext4_filename *fname,
-                                       const struct qstr *d_name,
                                        struct ext4_dir_entry_2 **res_dir,
                                        int *has_inline_data)
 {
@@ -1649,7 +1648,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
                                                EXT4_INLINE_DOTDOT_SIZE;
        inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
        ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
-                             dir, fname, d_name, 0, res_dir);
+                             dir, fname, 0, res_dir);
        if (ret == 1)
                goto out_find;
        if (ret < 0)
@@ -1662,7 +1661,7 @@ struct buffer_head *ext4_find_inline_entry(struct inode *dir,
        inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
 
        ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
-                             dir, fname, d_name, 0, res_dir);
+                             dir, fname, 0, res_dir);
        if (ret == 1)
                goto out_find;
 
index 1bd0bfa547f6deee46d8dfe039625ed1736d14ed..5cf82d03968ca2c0eb240461fa696e784694c1ea 100644 (file)
@@ -2124,15 +2124,29 @@ static int ext4_writepage(struct page *page,
 static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
 {
        int len;
-       loff_t size = i_size_read(mpd->inode);
+       loff_t size;
        int err;
 
        BUG_ON(page->index != mpd->first_page);
+       clear_page_dirty_for_io(page);
+       /*
+        * We have to be very careful here!  Nothing protects writeback path
+        * against i_size changes and the page can be writeably mapped into
+        * page tables. So an application can be growing i_size and writing
+        * data through mmap while writeback runs. clear_page_dirty_for_io()
+        * write-protects our page in page tables and the page cannot get
+        * written to again until we release page lock. So only after
+        * clear_page_dirty_for_io() we are safe to sample i_size for
+        * ext4_bio_write_page() to zero-out tail of the written page. We rely
+        * on the barrier provided by TestClearPageDirty in
+        * clear_page_dirty_for_io() to make sure i_size is really sampled only
+        * after page tables are updated.
+        */
+       size = i_size_read(mpd->inode);
        if (page->index == size >> PAGE_SHIFT)
                len = size & ~PAGE_MASK;
        else
                len = PAGE_SIZE;
-       clear_page_dirty_for_io(page);
        err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
        if (!err)
                mpd->wbc->nr_to_write--;
@@ -3629,9 +3643,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
                get_block_func = ext4_dio_get_block_unwritten_async;
                dio_flags = DIO_LOCKING;
        }
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-       BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
-#endif
        ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
                                   get_block_func, ext4_end_io_dio, NULL,
                                   dio_flags);
@@ -3713,7 +3724,7 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
         */
        inode_lock_shared(inode);
        ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
-                                          iocb->ki_pos + count);
+                                          iocb->ki_pos + count - 1);
        if (ret)
                goto out_unlock;
        ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
@@ -4207,6 +4218,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 
        inode->i_mtime = inode->i_ctime = current_time(inode);
        ext4_mark_inode_dirty(handle, inode);
+       if (ret >= 0)
+               ext4_update_inode_fsync_trans(handle, inode, 1);
 out_stop:
        ext4_journal_stop(handle);
 out_dio:
@@ -5637,8 +5650,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
        /* No extended attributes present */
        if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
            header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
-               memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
-                       new_extra_isize);
+               memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+                      EXT4_I(inode)->i_extra_isize, 0,
+                      new_extra_isize - EXT4_I(inode)->i_extra_isize);
                EXT4_I(inode)->i_extra_isize = new_extra_isize;
                return 0;
        }
index 5083bce20ac4dc2243ace1bba19ef506b90d9c29..b7928cddd539ee223584db33ab8cb3341faa2e86 100644 (file)
@@ -3887,7 +3887,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
 
        err = ext4_mb_load_buddy(sb, group, &e4b);
        if (err) {
-               ext4_error(sb, "Error loading buddy information for %u", group);
+               ext4_warning(sb, "Error %d loading buddy information for %u",
+                            err, group);
                put_bh(bitmap_bh);
                return 0;
        }
@@ -4044,10 +4045,11 @@ repeat:
                BUG_ON(pa->pa_type != MB_INODE_PA);
                group = ext4_get_group_number(sb, pa->pa_pstart);
 
-               err = ext4_mb_load_buddy(sb, group, &e4b);
+               err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+                                            GFP_NOFS|__GFP_NOFAIL);
                if (err) {
-                       ext4_error(sb, "Error loading buddy information for %u",
-                                       group);
+                       ext4_error(sb, "Error %d loading buddy information for %u",
+                                  err, group);
                        continue;
                }
 
@@ -4303,11 +4305,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
        spin_unlock(&lg->lg_prealloc_lock);
 
        list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
+               int err;
 
                group = ext4_get_group_number(sb, pa->pa_pstart);
-               if (ext4_mb_load_buddy(sb, group, &e4b)) {
-                       ext4_error(sb, "Error loading buddy information for %u",
-                                       group);
+               err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
+                                            GFP_NOFS|__GFP_NOFAIL);
+               if (err) {
+                       ext4_error(sb, "Error %d loading buddy information for %u",
+                                  err, group);
                        continue;
                }
                ext4_lock_group(sb, group);
@@ -5127,8 +5132,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
 
        ret = ext4_mb_load_buddy(sb, group, &e4b);
        if (ret) {
-               ext4_error(sb, "Error in loading buddy "
-                               "information for %u", group);
+               ext4_warning(sb, "Error %d loading buddy information for %u",
+                            ret, group);
                return ret;
        }
        bitmap = e4b.bd_bitmap;
index b81f7d46f344d482d4a7ab15a5cc1ad3654b3259..404256caf9cff0df28517350113dff0d8b239cc9 100644 (file)
@@ -1155,12 +1155,11 @@ errout:
 static inline int search_dirblock(struct buffer_head *bh,
                                  struct inode *dir,
                                  struct ext4_filename *fname,
-                                 const struct qstr *d_name,
                                  unsigned int offset,
                                  struct ext4_dir_entry_2 **res_dir)
 {
        return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
-                              fname, d_name, offset, res_dir);
+                              fname, offset, res_dir);
 }
 
 /*
@@ -1262,7 +1261,6 @@ static inline bool ext4_match(const struct ext4_filename *fname,
  */
 int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
                    struct inode *dir, struct ext4_filename *fname,
-                   const struct qstr *d_name,
                    unsigned int offset, struct ext4_dir_entry_2 **res_dir)
 {
        struct ext4_dir_entry_2 * de;
@@ -1355,7 +1353,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
 
        if (ext4_has_inline_data(dir)) {
                int has_inline_data = 1;
-               ret = ext4_find_inline_entry(dir, &fname, d_name, res_dir,
+               ret = ext4_find_inline_entry(dir, &fname, res_dir,
                                             &has_inline_data);
                if (has_inline_data) {
                        if (inlined)
@@ -1447,7 +1445,7 @@ restart:
                        goto next;
                }
                set_buffer_verified(bh);
-               i = search_dirblock(bh, dir, &fname, d_name,
+               i = search_dirblock(bh, dir, &fname,
                            block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
                if (i == 1) {
                        EXT4_I(dir)->i_dir_start_lookup = block;
@@ -1488,7 +1486,6 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 {
        struct super_block * sb = dir->i_sb;
        struct dx_frame frames[2], *frame;
-       const struct qstr *d_name = fname->usr_fname;
        struct buffer_head *bh;
        ext4_lblk_t block;
        int retval;
@@ -1505,7 +1502,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
                if (IS_ERR(bh))
                        goto errout;
 
-               retval = search_dirblock(bh, dir, fname, d_name,
+               retval = search_dirblock(bh, dir, fname,
                                         block << EXT4_BLOCK_SIZE_BITS(sb),
                                         res_dir);
                if (retval == 1)
@@ -1530,7 +1527,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 
        bh = NULL;
 errout:
-       dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
+       dxtrace(printk(KERN_DEBUG "%s not found\n", fname->usr_fname->name));
 success:
        dx_release(frames);
        return bh;
index 1a82138ba7391ac8fd960c0294715d13051fb232..930ca0fc9a0fa227941011451958fdec42e91f41 100644 (file)
@@ -85,7 +85,7 @@ static void ext4_finish_bio(struct bio *bio)
                }
 #endif
 
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        SetPageError(page);
                        mapping_set_error(page->mapping, -EIO);
                }
@@ -104,7 +104,7 @@ static void ext4_finish_bio(struct bio *bio)
                                continue;
                        }
                        clear_buffer_async_write(bh);
-                       if (bio->bi_error)
+                       if (bio->bi_status)
                                buffer_io_error(bh);
                } while ((bh = bh->b_this_page) != head);
                bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
@@ -303,24 +303,25 @@ static void ext4_end_bio(struct bio *bio)
                      bdevname(bio->bi_bdev, b),
                      (long long) bio->bi_iter.bi_sector,
                      (unsigned) bio_sectors(bio),
-                     bio->bi_error)) {
+                     bio->bi_status)) {
                ext4_finish_bio(bio);
                bio_put(bio);
                return;
        }
        bio->bi_end_io = NULL;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                struct inode *inode = io_end->inode;
 
                ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
                             "(offset %llu size %ld starting block %llu)",
-                            bio->bi_error, inode->i_ino,
+                            bio->bi_status, inode->i_ino,
                             (unsigned long long) io_end->offset,
                             (long) io_end->size,
                             (unsigned long long)
                             bi_sector >> (inode->i_blkbits - 9));
-               mapping_set_error(inode->i_mapping, bio->bi_error);
+               mapping_set_error(inode->i_mapping,
+                               blk_status_to_errno(bio->bi_status));
        }
 
        if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
index a81b829d56def34de7e6b1f7402784cb21011742..40a5497b0f605c8bbc9b2192591bb49acdb0d2b7 100644 (file)
@@ -73,7 +73,7 @@ static void mpage_end_io(struct bio *bio)
        int i;
 
        if (ext4_bio_encrypted(bio)) {
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        fscrypt_release_ctx(bio->bi_private);
                } else {
                        fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@@ -83,7 +83,7 @@ static void mpage_end_io(struct bio *bio)
        bio_for_each_segment_all(bv, bio, i) {
                struct page *page = bv->bv_page;
 
-               if (!bio->bi_error) {
+               if (!bio->bi_status) {
                        SetPageUptodate(page);
                } else {
                        ClearPageUptodate(page);
index 6e3b4186a22fad11eb58ce26171c243c0b2a6705..9006cb5857b802e301fa5923feafb8a3e87db884 100644 (file)
@@ -848,14 +848,9 @@ static inline void ext4_quota_off_umount(struct super_block *sb)
 {
        int type;
 
-       if (ext4_has_feature_quota(sb)) {
-               dquot_disable(sb, -1,
-                             DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
-       } else {
-               /* Use our quota_off function to clear inode flags etc. */
-               for (type = 0; type < EXT4_MAXQUOTAS; type++)
-                       ext4_quota_off(sb, type);
-       }
+       /* Use our quota_off function to clear inode flags etc. */
+       for (type = 0; type < EXT4_MAXQUOTAS; type++)
+               ext4_quota_off(sb, type);
 }
 #else
 static inline void ext4_quota_off_umount(struct super_block *sb)
@@ -1179,6 +1174,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
                return res;
        }
 
+       res = dquot_initialize(inode);
+       if (res)
+               return res;
 retry:
        handle = ext4_journal_start(inode, EXT4_HT_MISC,
                        ext4_jbd2_credits_xattr(inode));
@@ -5485,7 +5483,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
                goto out;
 
        err = dquot_quota_off(sb, type);
-       if (err)
+       if (err || ext4_has_feature_quota(sb))
                goto out_put;
 
        inode_lock(inode);
@@ -5505,6 +5503,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
 out_unlock:
        inode_unlock(inode);
 out_put:
+       lockdep_set_quota_inode(inode, I_DATA_SEM_NORMAL);
        iput(inode);
        return err;
 out:
index 8fb7ce14e6ebe3a4ba1f2bd2da36b314819d24a5..5d3c2536641c7db6c1fa5b19b4ad10e630e09986 100644 (file)
@@ -888,6 +888,8 @@ inserted:
                        else {
                                u32 ref;
 
+                               WARN_ON_ONCE(dquot_initialize_needed(inode));
+
                                /* The old block is released after updating
                                   the inode. */
                                error = dquot_alloc_block(inode,
@@ -954,6 +956,8 @@ inserted:
                        /* We need to allocate a new block */
                        ext4_fsblk_t goal, block;
 
+                       WARN_ON_ONCE(dquot_initialize_needed(inode));
+
                        goal = ext4_group_first_block_no(sb,
                                                EXT4_I(inode)->i_block_group);
 
@@ -1166,6 +1170,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
                return -EINVAL;
        if (strlen(name) > 255)
                return -ERANGE;
+
        ext4_write_lock_xattr(inode, &no_expand);
 
        error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -1267,6 +1272,9 @@ ext4_xattr_set(struct inode *inode, int name_index, const char *name,
        int error, retries = 0;
        int credits = ext4_jbd2_credits_xattr(inode);
 
+       error = dquot_initialize(inode);
+       if (error)
+               return error;
 retry:
        handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
        if (IS_ERR(handle)) {
index 7c0f6bdf817d4370b74b36de9096fad73c75fbd4..36fe82012a337ec35eaa8bed5edc3a05288a4b42 100644 (file)
@@ -58,12 +58,12 @@ static void f2fs_read_end_io(struct bio *bio)
 #ifdef CONFIG_F2FS_FAULT_INJECTION
        if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
                f2fs_show_injection_info(FAULT_IO);
-               bio->bi_error = -EIO;
+               bio->bi_status = BLK_STS_IOERR;
        }
 #endif
 
        if (f2fs_bio_encrypted(bio)) {
-               if (bio->bi_error) {
+               if (bio->bi_status) {
                        fscrypt_release_ctx(bio->bi_private);
                } else {
                        fscrypt_decrypt_bio_pages(bio->bi_private, bio);
@@ -74,7 +74,7 @@ static void f2fs_read_end_io(struct bio *bio)
        bio_for_each_segment_all(bvec, bio, i) {
                struct page *page = bvec->bv_page;
 
-               if (!bio->bi_error) {
+               if (!bio->bi_status) {
                        if (!PageUptodate(page))
                                SetPageUptodate(page);
                } else {
@@ -102,14 +102,14 @@ static void f2fs_write_end_io(struct bio *bio)
                        unlock_page(page);
                        mempool_free(page, sbi->write_io_dummy);
 
-                       if (unlikely(bio->bi_error))
+                       if (unlikely(bio->bi_status))
                                f2fs_stop_checkpoint(sbi, true);
                        continue;
                }
 
                fscrypt_pullback_bio_page(&page, true);
 
-               if (unlikely(bio->bi_error)) {
+               if (unlikely(bio->bi_status)) {
                        mapping_set_error(page->mapping, -EIO);
                        f2fs_stop_checkpoint(sbi, true);
                }
index 96845854e7ee808c2df5227ddf68a614fcc779ba..ea9f455d94ba76d6a4d128b6333021d1301b1f70 100644 (file)
@@ -749,7 +749,7 @@ static void f2fs_submit_discard_endio(struct bio *bio)
 {
        struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
 
-       dc->error = bio->bi_error;
+       dc->error = blk_status_to_errno(bio->bi_status);
        dc->state = D_DONE;
        complete(&dc->wait);
        bio_put(bio);
index b7cf65d13561fedf98a72dfc66b36e70d84100cd..aa3d44527fa2e92a9372761cc727e3b5c404e785 100644 (file)
@@ -815,7 +815,6 @@ struct gfs2_sbd {
        atomic_t sd_log_in_flight;
        struct bio *sd_log_bio;
        wait_queue_head_t sd_log_flush_wait;
-       int sd_log_error;
 
        atomic_t sd_reserving_log;
        wait_queue_head_t sd_reserving_log_wait;
index b1f9144b42c7fd6ae4f0f24f82adaf142bfa4922..885d36e7a29f4ad44e3b9458c158349776fecd50 100644 (file)
@@ -170,7 +170,7 @@ static u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
  */
 
 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
-                                 int error)
+                                 blk_status_t error)
 {
        struct buffer_head *bh, *next;
        struct page *page = bvec->bv_page;
@@ -209,15 +209,13 @@ static void gfs2_end_log_write(struct bio *bio)
        struct page *page;
        int i;
 
-       if (bio->bi_error) {
-               sdp->sd_log_error = bio->bi_error;
-               fs_err(sdp, "Error %d writing to log\n", bio->bi_error);
-       }
+       if (bio->bi_status)
+               fs_err(sdp, "Error %d writing to log\n", bio->bi_status);
 
        bio_for_each_segment_all(bvec, bio, i) {
                page = bvec->bv_page;
                if (page_has_buffers(page))
-                       gfs2_end_log_write_bh(sdp, bvec, bio->bi_error);
+                       gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
                else
                        mempool_free(page, gfs2_page_pool);
        }
index 663ffc135ef365a436ae658d3f71965b31cee06e..fabe1614f879525827290d05eb79ca53ad76a57e 100644 (file)
@@ -201,7 +201,7 @@ static void gfs2_meta_read_endio(struct bio *bio)
                do {
                        struct buffer_head *next = bh->b_this_page;
                        len -= bh->b_size;
-                       bh->b_end_io(bh, !bio->bi_error);
+                       bh->b_end_io(bh, !bio->bi_status);
                        bh = next;
                } while (bh && len);
        }
index b92135c202c25cc409812d095ac5b7ea4936d5dc..e76058d34b7468b3762a6955f401af822c7a8e9a 100644 (file)
@@ -176,10 +176,10 @@ static void end_bio_io_page(struct bio *bio)
 {
        struct page *page = bio->bi_private;
 
-       if (!bio->bi_error)
+       if (!bio->bi_status)
                SetPageUptodate(page);
        else
-               pr_warn("error %d reading superblock\n", bio->bi_error);
+               pr_warn("error %d reading superblock\n", bio->bi_status);
        unlock_page(page);
 }
 
index 4b10892967a5ae9a4ece5d8571e47c01c72f74da..18f2f2b8ba2c8802585e651131cfebb8375efc89 100644 (file)
@@ -672,8 +672,8 @@ static void iomap_dio_bio_end_io(struct bio *bio)
        struct iomap_dio *dio = bio->bi_private;
        bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
 
-       if (bio->bi_error)
-               iomap_dio_set_error(dio, bio->bi_error);
+       if (bio->bi_status)
+               iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
 
        if (atomic_dec_and_test(&dio->ref)) {
                if (is_sync_kiocb(dio->iocb)) {
index 9ee4832b6f8b3664430e31bcf3775b412e1b81d8..2d30a6da7013112adafd08545b32cb00d179311c 100644 (file)
@@ -680,6 +680,12 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
 
        rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
        handle->h_buffer_credits = nblocks;
+       /*
+        * Restore the original nofs context because the journal restart
+        * is basically the same thing as journal stop and start.
+        * start_this_handle will start a new nofs context.
+        */
+       memalloc_nofs_restore(handle->saved_alloc_context);
        ret = start_this_handle(journal, handle, gfp_mask);
        return ret;
 }
index bb1da1feafeb8202d904b4215f13dd1868dd2b4e..a21f0e9eecd45ef34765a4ae567cd4e3df498dbf 100644 (file)
@@ -2205,7 +2205,7 @@ static void lbmIODone(struct bio *bio)
 
        bp->l_flag |= lbmDONE;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                bp->l_flag |= lbmERROR;
 
                jfs_err("lbmIODone: I/O error in JFS log");
index 489aaa1403e57c0c0886ba2f127bc16dc0dc2274..ce93db3aef3c450990ecec5c4daac05c9d1c066e 100644 (file)
@@ -280,7 +280,7 @@ static void metapage_read_end_io(struct bio *bio)
 {
        struct page *page = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                printk(KERN_ERR "metapage_read_end_io: I/O error\n");
                SetPageError(page);
        }
@@ -337,7 +337,7 @@ static void metapage_write_end_io(struct bio *bio)
 
        BUG_ON(!PagePrivate(page));
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                printk(KERN_ERR "metapage_write_end_io: I/O error\n");
                SetPageError(page);
        }
index baff8f820c290e6256c274056171eee1f18c1cf7..9524fdde00c2fa01dc26edd56c16e636fe17d350 100644 (file)
@@ -50,7 +50,8 @@ static void mpage_end_io(struct bio *bio)
 
        bio_for_each_segment_all(bv, bio, i) {
                struct page *page = bv->bv_page;
-               page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
+               page_endio(page, op_is_write(bio_op(bio)),
+                               blk_status_to_errno(bio->bi_status));
        }
 
        bio_put(bio);
index 0ca370d23ddb2a771b9172b260b010da2969d81d..d8863a804b15756632a15dcd3d0076db4a0bf136 100644 (file)
@@ -188,7 +188,7 @@ static void bl_end_io_read(struct bio *bio)
 {
        struct parallel_io *par = bio->bi_private;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                struct nfs_pgio_header *header = par->data;
 
                if (!header->pnfs_error)
@@ -319,7 +319,7 @@ static void bl_end_io_write(struct bio *bio)
        struct parallel_io *par = bio->bi_private;
        struct nfs_pgio_header *header = par->data;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                if (!header->pnfs_error)
                        header->pnfs_error = -EIO;
                pnfs_set_lo_fail(header->lseg);
index fb5213afc854e2c28edafc238f374a5a93c01d9f..47ed19c53f2e37bb3e3c8f4cc37d4e2aed1545c7 100644 (file)
@@ -219,6 +219,9 @@ static int nfsd4_scsi_identify_device(struct block_device *bdev,
        u8 *buf, *d, type, assoc;
        int error;
 
+       if (WARN_ON_ONCE(!blk_queue_scsi_passthrough(q)))
+               return -EINVAL;
+
        buf = kzalloc(bufflen, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
index 6f87b2ac1aeb001c4e6c73997e584b5894b67a6c..e73c86d9855ccd186b4c3f75f7a56b6e61060e47 100644 (file)
@@ -338,7 +338,7 @@ static void nilfs_end_bio_write(struct bio *bio)
 {
        struct nilfs_segment_buffer *segbuf = bio->bi_private;
 
-       if (bio->bi_error)
+       if (bio->bi_status)
                atomic_inc(&segbuf->sb_err);
 
        bio_put(bio);
index 0da0332725aafcf253707f7ca59c9385cd6b4fb4..ffe003982d95622decb1ff65ddaa22605cedea58 100644 (file)
@@ -516,9 +516,9 @@ static void o2hb_bio_end_io(struct bio *bio)
 {
        struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
 
-       if (bio->bi_error) {
-               mlog(ML_ERROR, "IO Error %d\n", bio->bi_error);
-               wc->wc_error = bio->bi_error;
+       if (bio->bi_status) {
+               mlog(ML_ERROR, "IO Error %d\n", bio->bi_status);
+               wc->wc_error = blk_status_to_errno(bio->bi_status);
        }
 
        o2hb_bio_wait_dec(wc, 1);
index ebf80c7739e15ebdd2bc55896d66943f3aecef9a..48813aeaab8067e921d67b110e28e2a8dd94a3c0 100644 (file)
@@ -1512,6 +1512,22 @@ int dquot_initialize(struct inode *inode)
 }
 EXPORT_SYMBOL(dquot_initialize);
 
+bool dquot_initialize_needed(struct inode *inode)
+{
+       struct dquot **dquots;
+       int i;
+
+       if (!dquot_active(inode))
+               return false;
+
+       dquots = i_dquot(inode);
+       for (i = 0; i < MAXQUOTAS; i++)
+               if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
+                       return true;
+       return false;
+}
+EXPORT_SYMBOL(dquot_initialize_needed);
+
 /*
  * Release all quotas referenced by inode.
  *
index f494b182c7c785232b1d686aae893ebbd70937c7..c35610845ab19f8c17e4ef96e851cddc2205bba9 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -672,6 +672,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
                inode->i_bytes -= 512;
        }
 }
+EXPORT_SYMBOL(__inode_add_bytes);
 
 void inode_add_bytes(struct inode *inode, loff_t bytes)
 {
index a0376a2c1c29c7adc4a7a60976efd0bf49c8c9dd..d642cc0a8271b06b6fea356d7b2d8893111f465d 100644 (file)
@@ -82,7 +82,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
                        ufs_error (sb, "ufs_free_fragments",
                                   "bit already cleared for fragment %u", i);
        }
-       
+
+       inode_sub_bytes(inode, count << uspi->s_fshift);
        fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
        uspi->cs_total.cs_nffree += count;
        fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -184,6 +185,7 @@ do_more:
                        ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
                }
                ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
+               inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
                if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
                        ufs_clusteracct (sb, ucpi, blkno, 1);
 
@@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        return 0;
 }              
 
+static bool try_add_frags(struct inode *inode, unsigned frags)
+{
+       unsigned size = frags * i_blocksize(inode);
+       spin_lock(&inode->i_lock);
+       __inode_add_bytes(inode, size);
+       if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
+               __inode_sub_bytes(inode, size);
+               spin_unlock(&inode->i_lock);
+               return false;
+       }
+       spin_unlock(&inode->i_lock);
+       return true;
+}
+
 static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
                             unsigned oldcount, unsigned newcount)
 {
@@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
        for (i = oldcount; i < newcount; i++)
                if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
                        return 0;
+
+       if (!try_add_frags(inode, count))
+               return 0;
        /*
         * Block can be extended
         */
@@ -647,6 +666,7 @@ cg_found:
                        ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
                i = uspi->s_fpb - count;
 
+               inode_sub_bytes(inode, i << uspi->s_fshift);
                fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
                uspi->cs_total.cs_nffree += i;
                fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +677,8 @@ cg_found:
        result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
        if (result == INVBLOCK)
                return 0;
+       if (!try_add_frags(inode, count))
+               return 0;
        for (i = 0; i < count; i++)
                ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
        
@@ -716,6 +738,8 @@ norot:
                return INVBLOCK;
        ucpi->c_rotor = result;
 gotit:
+       if (!try_add_frags(inode, uspi->s_fpb))
+               return 0;
        blkno = ufs_fragstoblks(result);
        ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
        if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
index 7e41aee7b69a660914e7cb26765714746929a306..da553ffec85b459f0675c25033f173bbaeccb772 100644 (file)
@@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
 
        p = ufs_get_direct_data_ptr(uspi, ufsi, block);
        tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
-                               new_size, err, locked_page);
+                               new_size - (lastfrag & uspi->s_fpbmask), err,
+                               locked_page);
        return tmp != 0;
 }
 
@@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
                        goal += uspi->s_fpb;
        }
        tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
-                               goal, uspi->s_fpb, err, locked_page);
+                               goal, nfrags, err, locked_page);
 
        if (!tmp) {
                *err = -ENOSPC;
@@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
 
        if (!create) {
                phys64 = ufs_frag_map(inode, offsets, depth);
-               goto out;
+               if (phys64)
+                       map_bh(bh_result, sb, phys64 + frag);
+               return 0;
        }
 
         /* This code entered only while writing ....? */
@@ -841,8 +844,11 @@ void ufs_evict_inode(struct inode * inode)
        truncate_inode_pages_final(&inode->i_data);
        if (want_delete) {
                inode->i_size = 0;
-               if (inode->i_blocks)
+               if (inode->i_blocks &&
+                   (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                    S_ISLNK(inode->i_mode)))
                        ufs_truncate_blocks(inode);
+               ufs_update_inode(inode, inode_needs_sync(inode));
        }
 
        invalidate_inode_buffers(inode);
@@ -1100,7 +1106,7 @@ out:
        return err;
 }
 
-static void __ufs_truncate_blocks(struct inode *inode)
+static void ufs_truncate_blocks(struct inode *inode)
 {
        struct ufs_inode_info *ufsi = UFS_I(inode);
        struct super_block *sb = inode->i_sb;
@@ -1183,7 +1189,7 @@ static int ufs_truncate(struct inode *inode, loff_t size)
 
        truncate_setsize(inode, size);
 
-       __ufs_truncate_blocks(inode);
+       ufs_truncate_blocks(inode);
        inode->i_mtime = inode->i_ctime = current_time(inode);
        mark_inode_dirty(inode);
 out:
@@ -1191,16 +1197,6 @@ out:
        return err;
 }
 
-static void ufs_truncate_blocks(struct inode *inode)
-{
-       if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-             S_ISLNK(inode->i_mode)))
-               return;
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-               return;
-       __ufs_truncate_blocks(inode);
-}
-
 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = d_inode(dentry);
index 29ecaf739449c4036e6ed3ebed5499b8489079c4..878cc6264f1af4a87bf68bb71f2cb031bdb4b1a1 100644 (file)
@@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb)
        return;
 }
 
+static u64 ufs_max_bytes(struct super_block *sb)
+{
+       struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+       int bits = uspi->s_apbshift;
+       u64 res;
+
+       if (bits > 21)
+               res = ~0ULL;
+       else
+               res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
+                       (1LL << (3*bits));
+
+       if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
+               return MAX_LFS_FILESIZE;
+       return res << uspi->s_bshift;
+}
+
 static int ufs_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct ufs_sb_info * sbi;
@@ -1211,6 +1228,7 @@ magic_found:
                            "fast symlink size (%u)\n", uspi->s_maxsymlinklen);
                uspi->s_maxsymlinklen = maxsymlen;
        }
+       sb->s_maxbytes = ufs_max_bytes(sb);
        sb->s_max_links = UFS_LINK_MAX;
 
        inode = ufs_iget(sb, UFS_ROOTINO);
index b7fbf53dbc81a044e2bd10428bf2601b094cb750..398019fb144816875f2c717c2252823a4dd76b99 100644 (file)
@@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
        struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
 {
+       u8 mask;
        switch (uspi->s_fpb) {
        case 8:
                return (*ubh_get_addr (ubh, begin + block) == 0xff);
        case 4:
-               return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+               mask = 0x0f << ((block & 0x01) << 2);
+               return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
        case 2:
-               return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+               mask = 0x03 << ((block & 0x03) << 1);
+               return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
        case 1:
-               return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+               mask = 0x01 << (block & 0x07);
+               return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
        }
        return 0;       
 }
index 09af0f7cd55e278312881999755d3d8d0793d5c8..76b6f988e2fa969103b8e986b1861ab41585867f 100644 (file)
@@ -276,7 +276,7 @@ xfs_end_io(
        struct xfs_inode        *ip = XFS_I(ioend->io_inode);
        xfs_off_t               offset = ioend->io_offset;
        size_t                  size = ioend->io_size;
-       int                     error = ioend->io_bio->bi_error;
+       int                     error;
 
        /*
         * Just clean up the in-memory strutures if the fs has been shut down.
@@ -289,6 +289,7 @@ xfs_end_io(
        /*
         * Clean up any COW blocks on an I/O error.
         */
+       error = blk_status_to_errno(ioend->io_bio->bi_status);
        if (unlikely(error)) {
                switch (ioend->io_type) {
                case XFS_IO_COW:
@@ -332,7 +333,7 @@ xfs_end_bio(
        else if (ioend->io_append_trans)
                queue_work(mp->m_data_workqueue, &ioend->io_work);
        else
-               xfs_destroy_ioend(ioend, bio->bi_error);
+               xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
 }
 
 STATIC int
@@ -500,7 +501,7 @@ xfs_submit_ioend(
         * time.
         */
        if (status) {
-               ioend->io_bio->bi_error = status;
+               ioend->io_bio->bi_status = errno_to_blk_status(status);
                bio_endio(ioend->io_bio);
                return status;
        }
index 07b77b73b0240c5cca4187d29e8e403cbf4f0714..290b58464043a25d3fba9f733ae400ff947a0727 100644 (file)
@@ -1227,8 +1227,11 @@ xfs_buf_bio_end_io(
         * don't overwrite existing errors - otherwise we can lose errors on
         * buffers that require multiple bios to complete.
         */
-       if (bio->bi_error)
-               cmpxchg(&bp->b_io_error, 0, bio->bi_error);
+       if (bio->bi_status) {
+               int error = blk_status_to_errno(bio->bi_status);
+
+               cmpxchg(&bp->b_io_error, 0, error);
+       }
 
        if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
                invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
index d1b04b0e99cf8c293d4ded6eccb2b0aa2fce0d41..9455aada1399944ee7912d5d1163cd36af856e96 100644 (file)
@@ -414,7 +414,7 @@ extern void bio_endio(struct bio *);
 
 static inline void bio_io_error(struct bio *bio)
 {
-       bio->bi_error = -EIO;
+       bio->bi_status = BLK_STS_IOERR;
        bio_endio(bio);
 }
 
index fcd641032f8d3a87162b0e9f1a63e08ae16d10df..b144b7b0e1046bedd582b30a5f146d83c06b2279 100644 (file)
@@ -87,7 +87,8 @@ struct blk_mq_queue_data {
        bool last;
 };
 
-typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
+typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
+               const struct blk_mq_queue_data *);
 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -155,10 +156,6 @@ struct blk_mq_ops {
 };
 
 enum {
-       BLK_MQ_RQ_QUEUE_OK      = 0,    /* queued fine */
-       BLK_MQ_RQ_QUEUE_BUSY    = 1,    /* requeue IO for later */
-       BLK_MQ_RQ_QUEUE_ERROR   = 2,    /* end IO with error */
-
        BLK_MQ_F_SHOULD_MERGE   = 1 << 0,
        BLK_MQ_F_TAG_SHARED     = 1 << 1,
        BLK_MQ_F_SG_MERGE       = 1 << 2,
@@ -230,8 +227,8 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
 
 int blk_mq_request_started(struct request *rq);
 void blk_mq_start_request(struct request *rq);
-void blk_mq_end_request(struct request *rq, int error);
-void __blk_mq_end_request(struct request *rq, int error);
+void blk_mq_end_request(struct request *rq, blk_status_t error);
+void __blk_mq_end_request(struct request *rq, blk_status_t error);
 
 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
index 61339bc444006a477bf3e84b32a1d8199a9e5f01..dcd45b15a3a5d670fcc7253eca615bef0860f154 100644 (file)
@@ -17,6 +17,25 @@ struct io_context;
 struct cgroup_subsys_state;
 typedef void (bio_end_io_t) (struct bio *);
 
+/*
+ * Block error status values.  See block/blk-core:blk_errors for the details.
+ */
+typedef u8 __bitwise blk_status_t;
+#define        BLK_STS_OK 0
+#define BLK_STS_NOTSUPP                ((__force blk_status_t)1)
+#define BLK_STS_TIMEOUT                ((__force blk_status_t)2)
+#define BLK_STS_NOSPC          ((__force blk_status_t)3)
+#define BLK_STS_TRANSPORT      ((__force blk_status_t)4)
+#define BLK_STS_TARGET         ((__force blk_status_t)5)
+#define BLK_STS_NEXUS          ((__force blk_status_t)6)
+#define BLK_STS_MEDIUM         ((__force blk_status_t)7)
+#define BLK_STS_PROTECTION     ((__force blk_status_t)8)
+#define BLK_STS_RESOURCE       ((__force blk_status_t)9)
+#define BLK_STS_IOERR          ((__force blk_status_t)10)
+
+/* hack for device mapper, don't use elsewhere: */
+#define BLK_STS_DM_REQUEUE    ((__force blk_status_t)11)
+
 struct blk_issue_stat {
        u64 stat;
 };
@@ -28,7 +47,7 @@ struct blk_issue_stat {
 struct bio {
        struct bio              *bi_next;       /* request queue link */
        struct block_device     *bi_bdev;
-       int                     bi_error;
+       blk_status_t            bi_status;
        unsigned int            bi_opf;         /* bottom bits req flags,
                                                 * top bits REQ_OP. Use
                                                 * accessors.
index ab92c4ea138b7c665c45b765c0e1f8a4958339ca..76b6df862a128db892fee315bb04dca852aafce3 100644 (file)
@@ -55,7 +55,7 @@ struct blk_stat_callback;
  */
 #define BLKCG_MAX_POLS         3
 
-typedef void (rq_end_io_fn)(struct request *, int);
+typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 
 #define BLK_RL_SYNCFULL                (1U << 0)
 #define BLK_RL_ASYNCFULL       (1U << 1)
@@ -618,6 +618,7 @@ struct request_queue {
 #define QUEUE_FLAG_STATS       27      /* track rq completion times */
 #define QUEUE_FLAG_POLL_STATS  28      /* collecting stats for hybrid polling */
 #define QUEUE_FLAG_REGISTERED  29      /* queue has been registered to a disk */
+#define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -708,6 +709,8 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_queue_secure_erase(q) \
        (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
 #define blk_queue_dax(q)       test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
+#define blk_queue_scsi_passthrough(q)  \
+       test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
 
 #define blk_noretry_request(rq) \
        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -937,7 +940,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
                             int (*bio_ctr)(struct bio *, struct bio *, void *),
                             void *data);
 extern void blk_rq_unprep_clone(struct request *rq);
-extern int blk_insert_cloned_request(struct request_queue *q,
+extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
                                     struct request *rq);
 extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
 extern void blk_delay_queue(struct request_queue *, unsigned long);
@@ -977,6 +980,9 @@ extern void blk_execute_rq(struct request_queue *, struct gendisk *,
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
                                  struct request *, int, rq_end_io_fn *);
 
+int blk_status_to_errno(blk_status_t status);
+blk_status_t errno_to_blk_status(int errno);
+
 bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
@@ -1109,16 +1115,16 @@ extern struct request *blk_fetch_request(struct request_queue *q);
  * blk_end_request() for parts of the original function.
  * This prevents code duplication in drivers.
  */
-extern bool blk_update_request(struct request *rq, int error,
+extern bool blk_update_request(struct request *rq, blk_status_t error,
                               unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, int error);
-extern bool blk_end_request(struct request *rq, int error,
+extern void blk_finish_request(struct request *rq, blk_status_t error);
+extern bool blk_end_request(struct request *rq, blk_status_t error,
                            unsigned int nr_bytes);
-extern void blk_end_request_all(struct request *rq, int error);
-extern bool __blk_end_request(struct request *rq, int error,
+extern void blk_end_request_all(struct request *rq, blk_status_t error);
+extern bool __blk_end_request(struct request *rq, blk_status_t error,
                              unsigned int nr_bytes);
-extern void __blk_end_request_all(struct request *rq, int error);
-extern bool __blk_end_request_cur(struct request *rq, int error);
+extern void __blk_end_request_all(struct request *rq, blk_status_t error);
+extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
 
 extern void blk_complete_request(struct request *);
 extern void __blk_complete_request(struct request *);
@@ -1776,7 +1782,7 @@ struct blk_integrity_iter {
        const char              *disk_name;
 };
 
-typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
+typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
 
 struct blk_integrity_profile {
        integrity_processing_fn         *generate_fn;
index 21745946cae154f53cd87311e9350465388f70a5..ec47101cb1bf80f0867dbcff1d6aa10878df7418 100644 (file)
@@ -48,6 +48,7 @@ enum {
        CSS_ONLINE      = (1 << 1), /* between ->css_online() and ->css_offline() */
        CSS_RELEASED    = (1 << 2), /* refcnt reached zero, released */
        CSS_VISIBLE     = (1 << 3), /* css is visible to userland */
+       CSS_DYING       = (1 << 4), /* css is dying */
 };
 
 /* bits in struct cgroup flags field */
index ed2573e149faf070714e04f8160f7056b2fb1d3e..710a005c6b7a652bb9c32b5457dbd64196f6f4a0 100644 (file)
@@ -343,6 +343,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css)
        return true;
 }
 
+/**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline.  In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal.  cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed.  If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+       return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
 /**
  * css_put - put a css reference
  * @css: target css
index de179993e039d41d7e9034b5744be40954f81c09..d614c5ea1b5ea4a1772594b35525f1e64aec68ff 100644 (file)
  * with any version that can compile the kernel
  */
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+/*
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function.  This turns out to avoid the need for complex #ifdef
+ * directives.  Suppress the warning in clang as well.
+ */
+#undef inline
+#define inline inline __attribute__((unused)) notrace
index f4c639c0c362fd4c5106c5a6d2d6ceae1f3c795a..456da5017b32b57248c7f1ebb2f167d38434b27e 100644 (file)
@@ -72,9 +72,9 @@ typedef void (*dm_release_clone_request_fn) (struct request *clone);
  * 2   : The target wants to push back the io
  */
 typedef int (*dm_endio_fn) (struct dm_target *ti,
-                           struct bio *bio, int error);
+                           struct bio *bio, blk_status_t *error);
 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
-                                   struct request *clone, int error,
+                                   struct request *clone, blk_status_t error,
                                    union map_info *map_context);
 
 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
index 4eac2670bfa1a016679971f36ad698fe1d9f6a4e..92f20832fd28770c16ab9d34c5560efce2a888c6 100644 (file)
@@ -78,6 +78,7 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
 struct iommu_domain;
 struct msi_msg;
+struct device;
 
 static inline int iommu_dma_init(void)
 {
index 9ec5e22846e0f302e7e1b1d174d9ead17e619ee3..0e306c5a86d6ee90debc824aa5d18e8f6d078f4d 100644 (file)
@@ -153,7 +153,7 @@ struct elevator_type
 #endif
 
        /* managed by elevator core */
-       char icq_cache_name[ELV_NAME_MAX + 5];  /* elvname + "_io_cq" */
+       char icq_cache_name[ELV_NAME_MAX + 6];  /* elvname + "_io_cq" */
        struct list_head list;
 };
 
index 3e68cabb8457e5a9ae5b53bd9d7aa0f844ee99fa..023f0324762b4ba39844cdd90c438ca80f4b539e 100644 (file)
@@ -2844,7 +2844,7 @@ enum {
        DIO_SKIP_DIO_COUNT = 0x08,
 };
 
-void dio_end_io(struct bio *bio, int error);
+void dio_end_io(struct bio *bio);
 
 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
                             struct block_device *bdev, struct iov_iter *iter,
index 6980ca322074b9cd80f69517195a02ea837c3f34..dc152e4b7f732ed294bde4f1f2e87fc2277d6262 100644 (file)
@@ -671,7 +671,7 @@ struct ide_port_ops {
        void    (*init_dev)(ide_drive_t *);
        void    (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
        void    (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
-       int     (*reset_poll)(ide_drive_t *);
+       blk_status_t (*reset_poll)(ide_drive_t *);
        void    (*pre_reset)(ide_drive_t *);
        void    (*resetproc)(ide_drive_t *);
        void    (*maskproc)(ide_drive_t *, int);
@@ -1092,7 +1092,7 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
 extern int ide_vlb_clk;
 extern int ide_pci_clk;
 
-int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
+int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
 void ide_kill_rq(ide_drive_t *, struct request *);
 
 void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
@@ -1123,7 +1123,7 @@ extern int ide_devset_execute(ide_drive_t *drive,
                              const struct ide_devset *setting, int arg);
 
 void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
-int ide_complete_rq(ide_drive_t *, int, unsigned int);
+int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
 
 void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
 void ide_tf_dump(const char *, struct ide_cmd *);
index fffb91202bc9366a523002cc61fc074d12f84725..1fa293a37f4a7e18bff0b2989764454dd0732df7 100644 (file)
 #define ICH_HCR_EN                     (1 << 0)
 #define ICH_HCR_UIE                    (1 << 1)
 
+#define ICH_VMCR_ACK_CTL_SHIFT         2
+#define ICH_VMCR_ACK_CTL_MASK          (1 << ICH_VMCR_ACK_CTL_SHIFT)
+#define ICH_VMCR_FIQ_EN_SHIFT          3
+#define ICH_VMCR_FIQ_EN_MASK           (1 << ICH_VMCR_FIQ_EN_SHIFT)
 #define ICH_VMCR_CBPR_SHIFT            4
 #define ICH_VMCR_CBPR_MASK             (1 << ICH_VMCR_CBPR_SHIFT)
 #define ICH_VMCR_EOIM_SHIFT            9
index dc30f3d057eb0801e9ae8d22fc3ce11943d188d1..d3453ee072fc8aa859544e07598398884239d56f 100644 (file)
 #define GICC_ENABLE                    0x1
 #define GICC_INT_PRI_THRESHOLD         0xf0
 
-#define GIC_CPU_CTRL_EOImodeNS         (1 << 9)
+#define GIC_CPU_CTRL_EnableGrp0_SHIFT  0
+#define GIC_CPU_CTRL_EnableGrp0                (1 << GIC_CPU_CTRL_EnableGrp0_SHIFT)
+#define GIC_CPU_CTRL_EnableGrp1_SHIFT  1
+#define GIC_CPU_CTRL_EnableGrp1                (1 << GIC_CPU_CTRL_EnableGrp1_SHIFT)
+#define GIC_CPU_CTRL_AckCtl_SHIFT      2
+#define GIC_CPU_CTRL_AckCtl            (1 << GIC_CPU_CTRL_AckCtl_SHIFT)
+#define GIC_CPU_CTRL_FIQEn_SHIFT       3
+#define GIC_CPU_CTRL_FIQEn             (1 << GIC_CPU_CTRL_FIQEn_SHIFT)
+#define GIC_CPU_CTRL_CBPR_SHIFT                4
+#define GIC_CPU_CTRL_CBPR              (1 << GIC_CPU_CTRL_CBPR_SHIFT)
+#define GIC_CPU_CTRL_EOImodeNS_SHIFT   9
+#define GIC_CPU_CTRL_EOImodeNS         (1 << GIC_CPU_CTRL_EOImodeNS_SHIFT)
 
 #define GICC_IAR_INT_ID_MASK           0x3ff
 #define GICC_INT_SPURIOUS              1023
 #define GICH_LR_EOI                    (1 << 19)
 #define GICH_LR_HW                     (1 << 31)
 
-#define GICH_VMCR_CTRL_SHIFT           0
-#define GICH_VMCR_CTRL_MASK            (0x21f << GICH_VMCR_CTRL_SHIFT)
+#define GICH_VMCR_ENABLE_GRP0_SHIFT    0
+#define GICH_VMCR_ENABLE_GRP0_MASK     (1 << GICH_VMCR_ENABLE_GRP0_SHIFT)
+#define GICH_VMCR_ENABLE_GRP1_SHIFT    1
+#define GICH_VMCR_ENABLE_GRP1_MASK     (1 << GICH_VMCR_ENABLE_GRP1_SHIFT)
+#define GICH_VMCR_ACK_CTL_SHIFT                2
+#define GICH_VMCR_ACK_CTL_MASK         (1 << GICH_VMCR_ACK_CTL_SHIFT)
+#define GICH_VMCR_FIQ_EN_SHIFT         3
+#define GICH_VMCR_FIQ_EN_MASK          (1 << GICH_VMCR_FIQ_EN_SHIFT)
+#define GICH_VMCR_CBPR_SHIFT           4
+#define GICH_VMCR_CBPR_MASK            (1 << GICH_VMCR_CBPR_SHIFT)
+#define GICH_VMCR_EOI_MODE_SHIFT       9
+#define GICH_VMCR_EOI_MODE_MASK                (1 << GICH_VMCR_EOI_MODE_SHIFT)
+
 #define GICH_VMCR_PRIMASK_SHIFT                27
 #define GICH_VMCR_PRIMASK_MASK         (0x1f << GICH_VMCR_PRIMASK_SHIFT)
 #define GICH_VMCR_BINPOINT_SHIFT       21
index 0c9b93b0d1f7cd6133ea87515d3a83fb25f8258b..78e25aabedaf6696772347bb25b20c4cb61254db 100644 (file)
@@ -173,7 +173,6 @@ struct key {
 #ifdef KEY_DEBUGGING
        unsigned                magic;
 #define KEY_DEBUG_MAGIC                0x18273645u
-#define KEY_DEBUG_MAGIC_X      0xf8e9dacbu
 #endif
 
        unsigned long           flags;          /* status flags (change with bitops) */
index b4ee8f62ce8da82720cb79e7a9ea3ec97703b849..8e2828d48d7fcf6a7ba683bf51c8c91a1ad28ec0 100644 (file)
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params {
        u16     rate_val;
 };
 
+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
                   enum mlx4_update_qp_attr attr,
                   struct mlx4_update_qp_params *params);
index 9c6f768b7d32f66db0b05e0b5500f4b71ecebde7..dda22f45fc1b2361b9b7c79d5fa39a98f09952be 100644 (file)
@@ -44,6 +44,7 @@ void inode_sub_rsv_space(struct inode *inode, qsize_t number);
 void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
 
 int dquot_initialize(struct inode *inode);
+bool dquot_initialize_needed(struct inode *inode);
 void dquot_drop(struct inode *inode);
 struct dquot *dqget(struct super_block *sb, struct kqid qid);
 static inline struct dquot *dqgrab(struct dquot *dquot)
@@ -207,6 +208,11 @@ static inline int dquot_initialize(struct inode *inode)
        return 0;
 }
 
+static inline bool dquot_initialize_needed(struct inode *inode)
+{
+       return false;
+}
+
 static inline void dquot_drop(struct inode *inode)
 {
 }
index 167ad8831aafe092a53f87b982acb45748973337..4c1d5f7e62c4f6aef8eded0ec9db4d812a4acd29 100644 (file)
@@ -172,9 +172,7 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
 {
        int retval;
 
-       preempt_disable();
        retval = __srcu_read_lock(sp);
-       preempt_enable();
        rcu_lock_acquire(&(sp)->dep_map);
        return retval;
 }
index 0b1cf32edfd7ba1c456252124e23c68450d5bcc3..d9718378a8bee0b327d08c2e80a6fd3b5490b967 100644 (file)
@@ -189,8 +189,6 @@ struct platform_suspend_ops {
 struct platform_freeze_ops {
        int (*begin)(void);
        int (*prepare)(void);
-       void (*wake)(void);
-       void (*sync)(void);
        void (*restore)(void);
        void (*end)(void);
 };
@@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
 
 extern bool pm_wakeup_pending(void);
 extern void pm_system_wakeup(void);
-extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(void);
 extern void pm_system_irq_wakeup(unsigned int irq_number);
 extern bool pm_get_wakeup_count(unsigned int *count, bool block);
 extern bool pm_save_wakeup_count(unsigned int count);
@@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
 
 static inline bool pm_wakeup_pending(void) { return false; }
 static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(bool reset) {}
+static inline void pm_wakeup_clear(void) {}
 static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
 
 static inline void lock_system_sleep(void) {}
index eb50ce54b759154b99cf333e177adf8ca229c69b..413335c8cb529a8506a2f934577c3413512d8c97 100644 (file)
@@ -29,7 +29,7 @@ struct edid;
 struct cec_adapter;
 struct cec_notifier;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
 
 /**
  * cec_notifier_get - find or create a new cec_notifier for the given device.
index b8eb895731d561a5f0ff4ec218055581ad6e21ee..bfa88d4d67e1d6663da4952a6a097e32f5e573a8 100644 (file)
@@ -173,7 +173,7 @@ struct cec_adapter {
        bool passthrough;
        struct cec_log_addrs log_addrs;
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
        struct cec_notifier *notifier;
 #endif
 
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
  */
 int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
 
-#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+#ifdef CONFIG_CEC_NOTIFIER
 void cec_register_cec_notifier(struct cec_adapter *adap,
                               struct cec_notifier *notifier);
 #endif
index dbf0abba33b8da21be05abf6e719f69542da80fc..3e505bbff8ca4a41f8d39fefcd59aa01b85424f4 100644 (file)
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
  */
 extern const struct proto_ops inet6_stream_ops;
 extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
 
 struct group_source_req;
 struct group_filter;
index 38a7427ae902e35973a8b7fa0e95ff602ede0e87..be6223c586fa05b3ef1dbcb96e73cf7b5dae292d 100644 (file)
@@ -924,7 +924,7 @@ struct tcp_congestion_ops {
        void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
        /* call when ack arrives (optional) */
        void (*in_ack_event)(struct sock *sk, u32 flags);
-       /* new value of cwnd after loss (optional) */
+       /* new value of cwnd after loss (required) */
        u32  (*undo_cwnd)(struct sock *sk);
        /* hook for packet ack accounting (optional) */
        void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
index a09cca829082c71b10ee9799742a97250f44d4c2..a29d3086eb56e34f295ea8fb23b27583630d9166 100644 (file)
@@ -157,7 +157,7 @@ struct osd_request {
 
        osd_req_done_fn *async_done;
        void *async_private;
-       int async_error;
+       blk_status_t async_error;
        int req_errors;
 };
 
index 4bf9f1eabffc64c6a30c4f424f1047311c8c3391..2f6c77aebe1a73d04b56611152244af00a14fdb0 100644 (file)
@@ -267,9 +267,9 @@ enum {
 #define DM_DEV_SET_GEOMETRY    _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
 
 #define DM_VERSION_MAJOR       4
-#define DM_VERSION_MINOR       35
+#define DM_VERSION_MINOR       36
 #define DM_VERSION_PATCHLEVEL  0
-#define DM_VERSION_EXTRA       "-ioctl (2016-06-23)"
+#define DM_VERSION_EXTRA       "-ioctl (2017-06-09)"
 
 /* Status bits */
 #define DM_READONLY_FLAG       (1 << 0) /* In/Out */
index 201c6644b2376846f3fccf16bc64d99a5aabdfd4..ef16df06642a93462e1bd5d535ec3119af3adc22 100644 (file)
@@ -70,8 +70,8 @@ struct keyctl_dh_params {
 };
 
 struct keyctl_kdf_params {
-       char *hashname;
-       char *otherinfo;
+       char __user *hashname;
+       char __user *otherinfo;
        __u32 otherinfolen;
        __u32 __spare[8];
 };
index c8125ec1f4f2270a5a01a358832425cd57edb11a..a3960f98679c13567c3db4ecb7919cd14a70cfb0 100644 (file)
@@ -22,6 +22,7 @@ enum {
        LO_FLAGS_AUTOCLEAR      = 4,
        LO_FLAGS_PARTSCAN       = 8,
        LO_FLAGS_DIRECT_IO      = 16,
+       LO_FLAGS_BLOCKSIZE      = 32,
 };
 
 #include <asm/posix_types.h>   /* for __kernel_old_dev_t */
@@ -59,6 +60,8 @@ struct loop_info64 {
        __u64              lo_init[2];
 };
 
+#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
+
 /*
  * Loop filter types
  */
index 155e33f819134a3aecdccf3c139974ef0c54c68c..a50527ebf671e010715d542f614331f7c49e4294 100644 (file)
@@ -41,10 +41,14 @@ enum {
 #define NBD_FLAG_HAS_FLAGS     (1 << 0) /* nbd-server supports flags */
 #define NBD_FLAG_READ_ONLY     (1 << 1) /* device is read-only */
 #define NBD_FLAG_SEND_FLUSH    (1 << 2) /* can flush writeback cache */
+#define NBD_FLAG_SEND_FUA      (1 << 3) /* send FUA (forced unit access) */
 /* there is a gap here to match userspace */
 #define NBD_FLAG_SEND_TRIM     (1 << 5) /* send trim/discard */
 #define NBD_FLAG_CAN_MULTI_CONN        (1 << 8)        /* Server supports multiple connections per export. */
 
+/* values for cmd flags in the upper 16 bits of request type */
+#define NBD_CMD_FLAG_FUA       (1 << 16) /* FUA (forced unit access) op */
+
 /* These are client behavior specific flags. */
 #define NBD_CFLAG_DESTROY_ON_DISCONNECT        (1 << 0) /* delete the nbd device on
                                                    disconnect. */
index c3c9a0e1b3c9a474bd80b8cb10ea1049284474b0..8d4e85eae42c08481899e415075ee42c6d12f90f 100644 (file)
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css)
 {
        lockdep_assert_held(&cgroup_mutex);
 
+       if (css->flags & CSS_DYING)
+               return;
+
+       css->flags |= CSS_DYING;
+
        /*
         * This must happen before css is disassociated with its cgroup.
         * See seq_css() for details.
index f6501f4f6040b5a9c21e84aeb57e20906ef1c614..ae643412948added94f05d7efb120631f7798b44 100644 (file)
@@ -176,9 +176,9 @@ typedef enum {
 } cpuset_flagbits_t;
 
 /* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
 {
-       return test_bit(CS_ONLINE, &cs->flags);
+       return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 }
 
 static inline int is_cpu_exclusive(const struct cpuset *cs)
index 9ae6fbe5b5cf5d70d5869e8efb00c61f47cf66d4..cb5103413bd8df5056b3eda682a8d93b08656dd8 100644 (file)
@@ -1658,13 +1658,13 @@ static ssize_t write_cpuhp_target(struct device *dev,
        ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
        mutex_unlock(&cpuhp_state_mutex);
        if (ret)
-               return ret;
+               goto out;
 
        if (st->state < target)
                ret = do_cpu_up(dev->id, target);
        else
                ret = do_cpu_down(dev->id, target);
-
+out:
        unlock_device_hotplug();
        return ret ? ret : count;
 }
index 6e75a5c9412dee17daabc1cb131bb517ad431207..6c4e523dc1e2e6b53d44bf6a540bc168bcde1eca 100644 (file)
@@ -7316,6 +7316,21 @@ int perf_event_account_interrupt(struct perf_event *event)
        return __perf_event_account_interrupt(event, 1);
 }
 
+static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+{
+       /*
+        * Due to interrupt latency (AKA "skid"), we may enter the
+        * kernel before taking an overflow, even if the PMU is only
+        * counting user events.
+        * To avoid leaking information to userspace, we must always
+        * reject kernel samples when exclude_kernel is set.
+        */
+       if (event->attr.exclude_kernel && !user_mode(regs))
+               return false;
+
+       return true;
+}
+
 /*
  * Generic event overflow handling, sampling.
  */
@@ -7336,6 +7351,12 @@ static int __perf_event_overflow(struct perf_event *event,
 
        ret = __perf_event_account_interrupt(event, throttle);
 
+       /*
+        * For security, drop the skid kernel samples if necessary.
+        */
+       if (!sample_is_allowed(event, regs))
+               return ret;
+
        /*
         * XXX event_limit might not quite work as expected on inherited
         * events
index 78672d324a6ef95394ad72a0b0ba29c7d1155d5d..c7209f060eeb7c8672cf8f07ba9c83ac6c9460ac 100644 (file)
@@ -132,7 +132,7 @@ int freeze_processes(void)
        if (!pm_freezing)
                atomic_inc(&system_freezing_cnt);
 
-       pm_wakeup_clear(true);
+       pm_wakeup_clear();
        pr_info("Freezing user space processes ... ");
        pm_freezing = true;
        error = try_to_freeze_tasks(true);
index c0248c74d6d4cef6dbf09f485f36686862c29094..15e6baef5c73f90b6817c0b1c4e871ea40e30318 100644 (file)
@@ -72,8 +72,6 @@ static void freeze_begin(void)
 
 static void freeze_enter(void)
 {
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
-
        spin_lock_irq(&suspend_freeze_lock);
        if (pm_wakeup_pending())
                goto out;
@@ -100,27 +98,6 @@ static void freeze_enter(void)
  out:
        suspend_freeze_state = FREEZE_STATE_NONE;
        spin_unlock_irq(&suspend_freeze_lock);
-
-       trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
-}
-
-static void s2idle_loop(void)
-{
-       do {
-               freeze_enter();
-
-               if (freeze_ops && freeze_ops->wake)
-                       freeze_ops->wake();
-
-               dpm_resume_noirq(PMSG_RESUME);
-               if (freeze_ops && freeze_ops->sync)
-                       freeze_ops->sync();
-
-               if (pm_wakeup_pending())
-                       break;
-
-               pm_wakeup_clear(false);
-       } while (!dpm_suspend_noirq(PMSG_SUSPEND));
 }
 
 void freeze_wake(void)
@@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
         * all the devices are suspended.
         */
        if (state == PM_SUSPEND_FREEZE) {
-               s2idle_loop();
-               goto Platform_early_resume;
+               trace_suspend_resume(TPS("machine_suspend"), state, true);
+               freeze_enter();
+               trace_suspend_resume(TPS("machine_suspend"), state, false);
+               goto Platform_wake;
        }
 
        error = disable_nonboot_cpus();
index f80fd33639e0e5f5b4fc7b1fb53ecfd87b1eaa4d..57d22571f3068bdecf36a3ac0f8d8566d5f7c455 100644 (file)
@@ -225,14 +225,14 @@ static struct block_device *hib_resume_bdev;
 struct hib_bio_batch {
        atomic_t                count;
        wait_queue_head_t       wait;
-       int                     error;
+       blk_status_t            error;
 };
 
 static void hib_init_batch(struct hib_bio_batch *hb)
 {
        atomic_set(&hb->count, 0);
        init_waitqueue_head(&hb->wait);
-       hb->error = 0;
+       hb->error = BLK_STS_OK;
 }
 
 static void hib_end_io(struct bio *bio)
@@ -240,7 +240,7 @@ static void hib_end_io(struct bio *bio)
        struct hib_bio_batch *hb = bio->bi_private;
        struct page *page = bio->bi_io_vec[0].bv_page;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
                                imajor(bio->bi_bdev->bd_inode),
                                iminor(bio->bi_bdev->bd_inode),
@@ -253,8 +253,8 @@ static void hib_end_io(struct bio *bio)
                flush_icache_range((unsigned long)page_address(page),
                                   (unsigned long)page_address(page) + PAGE_SIZE);
 
-       if (bio->bi_error && !hb->error)
-               hb->error = bio->bi_error;
+       if (bio->bi_status && !hb->error)
+               hb->error = bio->bi_status;
        if (atomic_dec_and_test(&hb->count))
                wake_up(&hb->wait);
 
@@ -293,10 +293,10 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
        return error;
 }
 
-static int hib_wait_io(struct hib_bio_batch *hb)
+static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 {
        wait_event(hb->wait, atomic_read(&hb->count) == 0);
-       return hb->error;
+       return blk_status_to_errno(hb->error);
 }
 
 /*
index a1aecf44ab07c70ab9f33d455646559313926344..a1db38abac5b750e8ce228b441b27900360665ec 100644 (file)
@@ -269,7 +269,6 @@ static struct console *exclusive_console;
 #define MAX_CMDLINECONSOLES 8
 
 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
-static int console_cmdline_cnt;
 
 static int preferred_console = -1;
 int console_set_on_cmdline;
@@ -1906,25 +1905,12 @@ static int __add_preferred_console(char *name, int idx, char *options,
         *      See if this tty is not yet registered, and
         *      if we have a slot free.
         */
-       for (i = 0, c = console_cmdline; i < console_cmdline_cnt; i++, c++) {
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (strcmp(c->name, name) == 0 && c->index == idx) {
-                       if (brl_options)
-                               return 0;
-
-                       /*
-                        * Maintain an invariant that will help to find if
-                        * the matching console is preferred, see
-                        * register_console():
-                        *
-                        * The last non-braille console is always
-                        * the preferred one.
-                        */
-                       if (i != console_cmdline_cnt - 1)
-                               swap(console_cmdline[i],
-                                    console_cmdline[console_cmdline_cnt - 1]);
-
-                       preferred_console = console_cmdline_cnt - 1;
-
+                       if (!brl_options)
+                               preferred_console = i;
                        return 0;
                }
        }
@@ -1937,7 +1923,6 @@ static int __add_preferred_console(char *name, int idx, char *options,
        braille_set_options(c, brl_options);
 
        c->index = idx;
-       console_cmdline_cnt++;
        return 0;
 }
 /*
@@ -2477,23 +2462,12 @@ void register_console(struct console *newcon)
        }
 
        /*
-        * See if this console matches one we selected on the command line.
-        *
-        * There may be several entries in the console_cmdline array matching
-        * with the same console, one with newcon->match(), another by
-        * name/index:
-        *
-        *      pl011,mmio,0x87e024000000,115200 -- added from SPCR
-        *      ttyAMA0 -- added from command line
-        *
-        * Traverse the console_cmdline array in reverse order to be
-        * sure that if this console is preferred then it will be the first
-        * matching entry.  We use the invariant that is maintained in
-        * __add_preferred_console().
+        *      See if this console matches one we selected on
+        *      the command line.
         */
-       for (i = console_cmdline_cnt - 1; i >= 0; i--) {
-               c = console_cmdline + i;
-
+       for (i = 0, c = console_cmdline;
+            i < MAX_CMDLINECONSOLES && c->name[0];
+            i++, c++) {
                if (!newcon->match ||
                    newcon->match(newcon, c->name, c->index, c->options) != 0) {
                        /* default matching */
index 584d8a983883584dfa87ff0e36cf01465506c199..dea03614263fdc185fbdb3a7e3ec9ec8d1618888 100644 (file)
@@ -263,7 +263,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
 /*
  * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.  Must be called from process context.
+ * srcu_struct.
  * Returns an index that must be passed to the matching srcu_read_unlock().
  */
 int __srcu_read_lock(struct srcu_struct *sp)
@@ -271,7 +271,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
        int idx;
 
        idx = READ_ONCE(sp->completed) & 0x1;
-       __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
+       this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
        smp_mb(); /* B */  /* Avoid leaking the critical section. */
        return idx;
 }
@@ -281,7 +281,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
  * Removes the count for the old reader from the appropriate per-CPU
  * element of the srcu_struct.  Note that this may well be a different
  * CPU than that which was incremented by the corresponding srcu_read_lock().
- * Must be called from process context.
  */
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
index 36e1f82faed15cb8e3bbfb3b7705bcdf29b5da71..32798eb14853d47b9b155bb4bd3c4007dd13736e 100644 (file)
@@ -97,8 +97,9 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
 /*
  * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.  Must be called from process context.
- * Returns an index that must be passed to the matching srcu_read_unlock().
+ * srcu_struct.  Can be invoked from irq/bh handlers, but the matching
+ * __srcu_read_unlock() must be in the same handler instance.  Returns an
+ * index that must be passed to the matching srcu_read_unlock().
  */
 int __srcu_read_lock(struct srcu_struct *sp)
 {
@@ -112,7 +113,7 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
 
 /*
  * Removes the count for the old reader from the appropriate element of
- * the srcu_struct.  Must be called from process context.
+ * the srcu_struct.
  */
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
index 3ae8474557df3975398722ecd6c8793e271af4ad..157654fa436a2f2f0c9284aedf51315747d017d9 100644 (file)
@@ -357,7 +357,7 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
 /*
  * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.  Must be called from process context.
+ * srcu_struct.
  * Returns an index that must be passed to the matching srcu_read_unlock().
  */
 int __srcu_read_lock(struct srcu_struct *sp)
@@ -365,7 +365,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
        int idx;
 
        idx = READ_ONCE(sp->srcu_idx) & 0x1;
-       __this_cpu_inc(sp->sda->srcu_lock_count[idx]);
+       this_cpu_inc(sp->sda->srcu_lock_count[idx]);
        smp_mb(); /* B */  /* Avoid leaking the critical section. */
        return idx;
 }
@@ -375,7 +375,6 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
  * Removes the count for the old reader from the appropriate per-CPU
  * element of the srcu_struct.  Note that this may well be a different
  * CPU than that which was incremented by the corresponding srcu_read_lock().
- * Must be called from process context.
  */
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
index 193c5f5e3f7988e8d27eed5c4292e2345e3c5bf7..bc364f86100aa89f5d7b463d8c5465d1505766ce 100644 (file)
@@ -867,7 +867,7 @@ static void blk_add_trace_split(void *ignore,
 
                __blk_add_trace(bt, bio->bi_iter.bi_sector,
                                bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
-                               BLK_TA_SPLIT, bio->bi_error, sizeof(rpdu),
+                               BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
                                &rpdu);
        }
 }
@@ -900,7 +900,7 @@ static void blk_add_trace_bio_remap(void *ignore,
        r.sector_from = cpu_to_be64(from);
 
        __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
-                       bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_error,
+                       bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
                        sizeof(r), &r);
 }
 
index 23f6d0d3470fb3a92e7932890c340055540baf55..2da71e627812ea5cee576be0bca85cce32974708 100644 (file)
@@ -45,7 +45,7 @@ void end_swap_bio_write(struct bio *bio)
 {
        struct page *page = bio->bi_io_vec[0].bv_page;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                SetPageError(page);
                /*
                 * We failed to write the page out to swap-space.
@@ -118,7 +118,7 @@ static void end_swap_bio_read(struct bio *bio)
 {
        struct page *page = bio->bi_io_vec[0].bv_page;
 
-       if (bio->bi_error) {
+       if (bio->bi_status) {
                SetPageError(page);
                ClearPageUptodate(page);
                pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
index 574f78824d8a2ae53751bbe1849e53502bc575be..32bd3ead9ba14a0b42bda3fc959f9134a8c9cc36 100644 (file)
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
                err = 0;
                switch (nla_type(attr)) {
                case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
-                       if (!(p->flags & BR_VLAN_TUNNEL))
+                       if (!p || !(p->flags & BR_VLAN_TUNNEL))
                                return -EINVAL;
                        err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
                        if (err)
index 0db8102995a506d64ece0de48b7266ccf3839ba8..6f12a5271219f071ed2d0bacb263561cb9e0f605 100644 (file)
@@ -179,7 +179,8 @@ static void br_stp_start(struct net_bridge *br)
                br_debug(br, "using kernel STP\n");
 
                /* To start timers on any ports left in blocking */
-               mod_timer(&br->hello_timer, jiffies + br->hello_time);
+               if (br->dev->flags & IFF_UP)
+                       mod_timer(&br->hello_timer, jiffies + br->hello_time);
                br_port_state_selection(br);
        }
 
index b0b87a292e7ccac2221a2425510b3c28e1df97f7..a0adfc31a3fe854cd52600f6b2924255aee521c2 100644 (file)
@@ -1680,8 +1680,10 @@ start_again:
 
        hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
                          &devlink_nl_family, NLM_F_MULTI, cmd);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (devlink_nl_put_handle(skb, devlink))
                goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
 
        hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
                          &devlink_nl_family, NLM_F_MULTI, cmd);
-       if (!hdr)
+       if (!hdr) {
+               nlmsg_free(skb);
                return -EMSGSIZE;
+       }
 
        if (devlink_nl_put_handle(skb, devlink))
                goto nla_put_failure;
index 346d3e85dfbc2eca1ded0442ecb78d31e1768523..b1be7c01efe269d2bc97be7dcd1cc5485d29fa7b 100644 (file)
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
 
        spin_lock_irqsave(&q->lock, flags);
        skb = __skb_dequeue(q);
-       if (skb && (skb_next = skb_peek(q)))
+       if (skb && (skb_next = skb_peek(q))) {
                icmp_next = is_icmp_err_skb(skb_next);
+               if (icmp_next)
+                       sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
+       }
        spin_unlock_irqrestore(&q->lock, flags);
 
        if (is_icmp_err_skb(skb) && !icmp_next)
index 26130ae438da53f3f99a4e9fa712a572f40ca779..90038d45a54764df55017b46258dc772b0af1c96 100644 (file)
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+int dsa_switch_suspend(struct dsa_switch *ds)
+{
+       int i, ret = 0;
+
+       /* Suspend slave network devices */
+       for (i = 0; i < ds->num_ports; i++) {
+               if (!dsa_is_port_initialized(ds, i))
+                       continue;
+
+               ret = dsa_slave_suspend(ds->ports[i].netdev);
+               if (ret)
+                       return ret;
+       }
+
+       if (ds->ops->suspend)
+               ret = ds->ops->suspend(ds);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_suspend);
+
+int dsa_switch_resume(struct dsa_switch *ds)
+{
+       int i, ret = 0;
+
+       if (ds->ops->resume)
+               ret = ds->ops->resume(ds);
+
+       if (ret)
+               return ret;
+
+       /* Resume slave network devices */
+       for (i = 0; i < ds->num_ports; i++) {
+               if (!dsa_is_port_initialized(ds, i))
+                       continue;
+
+               ret = dsa_slave_resume(ds->ports[i].netdev);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_resume);
+#endif
+
 static struct packet_type dsa_pack_type __read_mostly = {
        .type   = cpu_to_be16(ETH_P_XDSA),
        .func   = dsa_switch_rcv,
index 033b3bfb63dc1887b15b3e08f00a00f70b706ec4..7796580e99ee2c57cdd5503130c8ec2e121d879a 100644 (file)
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
                dsa_ds_unapply(dst, ds);
        }
 
-       if (dst->cpu_switch)
+       if (dst->cpu_switch) {
                dsa_cpu_port_ethtool_restore(dst->cpu_switch);
+               dst->cpu_switch = NULL;
+       }
 
        pr_info("DSA: tree %d unapplied\n", dst->tree);
        dst->applied = false;
index ad345c8b0b0693cc214b212ccbb402859d910134..7281098df04ecd597b7824a9c0d2ec1d7f60e2b9 100644 (file)
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
        dsa_switch_unregister_notifier(ds);
 }
 
-#ifdef CONFIG_PM_SLEEP
-int dsa_switch_suspend(struct dsa_switch *ds)
-{
-       int i, ret = 0;
-
-       /* Suspend slave network devices */
-       for (i = 0; i < ds->num_ports; i++) {
-               if (!dsa_is_port_initialized(ds, i))
-                       continue;
-
-               ret = dsa_slave_suspend(ds->ports[i].netdev);
-               if (ret)
-                       return ret;
-       }
-
-       if (ds->ops->suspend)
-               ret = ds->ops->suspend(ds);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_suspend);
-
-int dsa_switch_resume(struct dsa_switch *ds)
-{
-       int i, ret = 0;
-
-       if (ds->ops->resume)
-               ret = ds->ops->resume(ds);
-
-       if (ret)
-               return ret;
-
-       /* Resume slave network devices */
-       for (i = 0; i < ds->num_ports; i++) {
-               if (!dsa_is_port_initialized(ds, i))
-                       continue;
-
-               ret = dsa_slave_resume(ds->ports[i].netdev);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(dsa_switch_resume);
-#endif
-
 /* platform driver init and cleanup *****************************************/
 static int dev_is_class(struct device *dev, void *class)
 {
index f3dad16613437c0c7ac3e9c7518a0929cddb3ca7..58925b6597de83e7d643fb9b1c7e992c9748ae1c 100644 (file)
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
                .type =       SOCK_DGRAM,
                .protocol =   IPPROTO_ICMP,
                .prot =       &ping_prot,
-               .ops =        &inet_dgram_ops,
+               .ops =        &inet_sockraw_ops,
                .flags =      INET_PROTOSW_REUSE,
        },
 
index 59792d283ff8c19048904cb790dbeebef14da73d..b5ea036ca78144b86622cb0944d0b840f7225ec5 100644 (file)
@@ -2381,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
        return 0;
 }
 
-static int tcp_repair_options_est(struct tcp_sock *tp,
+static int tcp_repair_options_est(struct sock *sk,
                struct tcp_repair_opt __user *optbuf, unsigned int len)
 {
+       struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_repair_opt opt;
 
        while (len >= sizeof(opt)) {
@@ -2396,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
                switch (opt.opt_code) {
                case TCPOPT_MSS:
                        tp->rx_opt.mss_clamp = opt.opt_val;
+                       tcp_mtup_init(sk);
                        break;
                case TCPOPT_WINDOW:
                        {
@@ -2555,7 +2557,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (!tp->repair)
                        err = -EINVAL;
                else if (sk->sk_state == TCP_ESTABLISHED)
-                       err = tcp_repair_options_est(tp,
+                       err = tcp_repair_options_est(sk,
                                        (struct tcp_repair_opt __user *)optval,
                                        optlen);
                else
index 6e3c512054a60715e8e2d16ffedd12cba6a3d2d9..324c9bcc5456b499b59cef40838b4e9829119e13 100644 (file)
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
 
+       tcp_sk(sk)->prior_ssthresh = 0;
        if (icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
        if (tcp_ca_needs_ecn(sk))
index 37ac9de713c69af30ae50d03e53ee472a7520b98..8d772fea1ddecd427a66c18f34d50f969186f02a 100644 (file)
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
        struct ipv6hdr *ip6_hdr;
        struct ipv6_opt_hdr *hop;
        unsigned char buf[CALIPSO_MAX_BUFFER];
-       int len_delta, new_end, pad;
+       int len_delta, new_end, pad, payload;
        unsigned int start, end;
 
        ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
        if (ret_val < 0)
                return ret_val;
 
+       ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
+
        if (len_delta) {
                if (len_delta > 0)
                        skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
                        sizeof(*ip6_hdr) + start);
                skb_reset_network_header(skb);
                ip6_hdr = ipv6_hdr(skb);
+               payload = ntohs(ip6_hdr->payload_len);
+               ip6_hdr->payload_len = htons(payload + len_delta);
        }
 
        hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
index 280268f1dd7b0972d7fadbcc9e28b043ceae423d..cdb3728faca7746d91e2430f6024f060a82b24fd 100644 (file)
@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
 
                if (udpfrag) {
                        int err = ip6_find_1stfragopt(skb, &prevhdr);
-                       if (err < 0)
+                       if (err < 0) {
+                               kfree_skb_list(segs);
                                return ERR_PTR(err);
+                       }
                        fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
                        fptr->frag_off = htons(offset);
                        if (skb->next)
index 7ae6c503f1ca2b089388598bfceb43e4aa2d2fea..9b37f9747fc6a6fbabb0740188bc98b5c95c41c4 100644 (file)
@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
 
        if (!dst) {
 route_lookup:
+               /* add dsfield to flowlabel for route lookup */
+               fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
+
                dst = ip6_route_output(net, NULL, fl6);
 
                if (dst->error)
index 9b522fa90e6d8f4a87ebed7cf574a36ceea89c61..ac826dd338ff0825eaf0d2d74cee92d008e018bb 100644 (file)
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
        .type =      SOCK_DGRAM,
        .protocol =  IPPROTO_ICMPV6,
        .prot =      &pingv6_prot,
-       .ops =       &inet6_dgram_ops,
+       .ops =       &inet6_sockraw_ops,
        .flags =     INET_PROTOSW_REUSE,
 };
 
index 1f992d9e261d8b75226659a4cead95f8dc04dc4f..60be012fe7085cc7a199e84333cef5ee95ed1f04 100644 (file)
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
 #endif /* CONFIG_PROC_FS */
 
 /* Same as inet6_dgram_ops, sans udp_poll.  */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
        .release           = inet6_release,
index 0e015906f9ca91e11d3e9e124c532c6a7cb04c5d..07d36573f50b9451e4c2bfee331ac2c023791a7a 100644 (file)
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
        iph = ipv6_hdr(skb);
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       if (hdr_len < 0)
+               return hdr_len;
        skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
        skb_set_network_header(skb, -x->props.header_len);
        skb->transport_header = skb->network_header + hdr_len;
index 7a92c0f3191250118ce3572ca91ee9116460bce8..9ad07a91708ef7a1008d469766ab39b9b882883f 100644 (file)
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
        skb_set_inner_transport_header(skb, skb_transport_offset(skb));
 
        hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+       if (hdr_len < 0)
+               return hdr_len;
        skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
        skb_set_network_header(skb, -x->props.header_len);
        skb->transport_header = skb->network_header + hdr_len;
index 60e2a62f7bef2fbb014f3a40403cf498a75d429c..cf2392b2ac717972e4354346fd086ec802370de3 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
        ieee80211_agg_start_txq(sta, tid, true);
 }
 
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+                             struct tid_ampdu_tx *tid_tx)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct ieee80211_local *local = sdata->local;
-       struct sta_info *sta;
-       struct tid_ampdu_tx *tid_tx;
 
-       trace_api_start_tx_ba_cb(sdata, ra, tid);
+       if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
+               return;
+
+       if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
+               ieee80211_agg_tx_operational(local, sta, tid);
+}
+
+static struct tid_ampdu_tx *
+ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
+                       const u8 *ra, u16 tid, struct sta_info **sta)
+{
+       struct tid_ampdu_tx *tid_tx;
 
        if (tid >= IEEE80211_NUM_TIDS) {
                ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
                       tid, IEEE80211_NUM_TIDS);
-               return;
+               return NULL;
        }
 
-       mutex_lock(&local->sta_mtx);
-       sta = sta_info_get_bss(sdata, ra);
-       if (!sta) {
-               mutex_unlock(&local->sta_mtx);
+       *sta = sta_info_get_bss(sdata, ra);
+       if (!*sta) {
                ht_dbg(sdata, "Could not find station: %pM\n", ra);
-               return;
+               return NULL;
        }
 
-       mutex_lock(&sta->ampdu_mlme.mtx);
-       tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+       tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
 
-       if (WARN_ON(!tid_tx)) {
+       if (WARN_ON(!tid_tx))
                ht_dbg(sdata, "addBA was not requested!\n");
-               goto unlock;
-       }
 
-       if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
-               goto unlock;
-
-       if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
-               ieee80211_agg_tx_operational(local, sta, tid);
-
- unlock:
-       mutex_unlock(&sta->ampdu_mlme.mtx);
-       mutex_unlock(&local->sta_mtx);
+       return tid_tx;
 }
 
 void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_ra_tid *ra_tid;
-       struct sk_buff *skb = dev_alloc_skb(0);
+       struct sta_info *sta;
+       struct tid_ampdu_tx *tid_tx;
 
-       if (unlikely(!skb))
-               return;
+       trace_api_start_tx_ba_cb(sdata, ra, tid);
 
-       ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-       memcpy(&ra_tid->ra, ra, ETH_ALEN);
-       ra_tid->tid = tid;
+       rcu_read_lock();
+       tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+       if (!tid_tx)
+               goto out;
 
-       skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
-       skb_queue_tail(&sdata->skb_queue, skb);
-       ieee80211_queue_work(&local->hw, &sdata->work);
+       set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
+       ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
 
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
 
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+                            struct tid_ampdu_tx *tid_tx)
 {
-       struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-       struct ieee80211_local *local = sdata->local;
-       struct sta_info *sta;
-       struct tid_ampdu_tx *tid_tx;
+       struct ieee80211_sub_if_data *sdata = sta->sdata;
        bool send_delba = false;
 
-       trace_api_stop_tx_ba_cb(sdata, ra, tid);
-
-       if (tid >= IEEE80211_NUM_TIDS) {
-               ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
-                      tid, IEEE80211_NUM_TIDS);
-               return;
-       }
-
-       ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
-
-       mutex_lock(&local->sta_mtx);
-
-       sta = sta_info_get_bss(sdata, ra);
-       if (!sta) {
-               ht_dbg(sdata, "Could not find station: %pM\n", ra);
-               goto unlock;
-       }
+       ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
+              sta->sta.addr, tid);
 
-       mutex_lock(&sta->ampdu_mlme.mtx);
        spin_lock_bh(&sta->lock);
-       tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
-       if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+       if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
                ht_dbg(sdata,
                       "unexpected callback to A-MPDU stop for %pM tid %d\n",
                       sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
        spin_unlock_bh(&sta->lock);
 
        if (send_delba)
-               ieee80211_send_delba(sdata, ra, tid,
+               ieee80211_send_delba(sdata, sta->sta.addr, tid,
                        WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
-
-       mutex_unlock(&sta->ampdu_mlme.mtx);
- unlock:
-       mutex_unlock(&local->sta_mtx);
 }
 
 void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
        struct ieee80211_local *local = sdata->local;
-       struct ieee80211_ra_tid *ra_tid;
-       struct sk_buff *skb = dev_alloc_skb(0);
+       struct sta_info *sta;
+       struct tid_ampdu_tx *tid_tx;
 
-       if (unlikely(!skb))
-               return;
+       trace_api_stop_tx_ba_cb(sdata, ra, tid);
 
-       ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
-       memcpy(&ra_tid->ra, ra, ETH_ALEN);
-       ra_tid->tid = tid;
+       rcu_read_lock();
+       tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
+       if (!tid_tx)
+               goto out;
 
-       skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
-       skb_queue_tail(&sdata->skb_queue, skb);
-       ieee80211_queue_work(&local->hw, &sdata->work);
+       set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
+       ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
+ out:
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
 
index f4a52877356349abed96d0a77d6ae75f301181e4..6ca5442b1e03b18aa81764089bf74c002d337690 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright 2017      Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
 {
        int i;
 
-       cancel_work_sync(&sta->ampdu_mlme.work);
-
        for (i = 0; i <  IEEE80211_NUM_TIDS; i++) {
                __ieee80211_stop_tx_ba_session(sta, i, reason);
                __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
                                               reason != AGG_STOP_DESTROY_STA &&
                                               reason != AGG_STOP_PEER_REQUEST);
        }
+
+       /* stopping might queue the work again - so cancel only afterwards */
+       cancel_work_sync(&sta->ampdu_mlme.work);
 }
 
 void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
                spin_unlock_bh(&sta->lock);
 
                tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-               if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
-                                                &tid_tx->state))
+               if (!tid_tx)
+                       continue;
+
+               if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
+                       ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
+               if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
                        ___ieee80211_stop_tx_ba_session(sta, tid,
                                                        AGG_STOP_LOCAL_REQUEST);
+               if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
+                       ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
        }
        mutex_unlock(&sta->ampdu_mlme.mtx);
 }
index f8f6c148f5545feeb78c16ca6f33ebf5e02d0ab5..665501ac358f8d83630f2727fe6249dcfeeb9689 100644 (file)
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
 
 enum sdata_queue_type {
        IEEE80211_SDATA_QUEUE_TYPE_FRAME        = 0,
-       IEEE80211_SDATA_QUEUE_AGG_START         = 1,
-       IEEE80211_SDATA_QUEUE_AGG_STOP          = 2,
        IEEE80211_SDATA_QUEUE_RX_AGG_START      = 3,
        IEEE80211_SDATA_QUEUE_RX_AGG_STOP       = 4,
 };
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
        return local->hw.wiphy->bands[band];
 }
 
-/* this struct represents 802.11n's RA/TID combination */
-struct ieee80211_ra_tid {
-       u8 ra[ETH_ALEN];
-       u16 tid;
-};
-
 /* this struct holds the value parsing from channel switch IE  */
 struct ieee80211_csa_ie {
        struct cfg80211_chan_def chandef;
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                   enum ieee80211_agg_stop_reason reason);
 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                    enum ieee80211_agg_stop_reason reason);
-void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
-void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
+void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
+                             struct tid_ampdu_tx *tid_tx);
+void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
+                            struct tid_ampdu_tx *tid_tx);
 void ieee80211_ba_session_work(struct work_struct *work);
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
index 3bd5b81f5d81ec7d73686043c2683630e24ecde4..8fae1a72e6a7c7ea4f71ec3a3beb215b987a715f 100644 (file)
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
        struct ieee80211_local *local = sdata->local;
        struct sk_buff *skb;
        struct sta_info *sta;
-       struct ieee80211_ra_tid *ra_tid;
        struct ieee80211_rx_agg *rx_agg;
 
        if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
        while ((skb = skb_dequeue(&sdata->skb_queue))) {
                struct ieee80211_mgmt *mgmt = (void *)skb->data;
 
-               if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
-                       ra_tid = (void *)&skb->cb;
-                       ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
-                                                ra_tid->tid);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
-                       ra_tid = (void *)&skb->cb;
-                       ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
-                                               ra_tid->tid);
-               } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
+               if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
                        rx_agg = (void *)&skb->cb;
                        mutex_lock(&local->sta_mtx);
                        sta = sta_info_get_bss(sdata, rx_agg->addr);
index 7cdf7a835bb01e8fade3b9d9bb6efaa19158f72b..403e3cc58b573dc511c8ba399ddefdc7e8e24ba0 100644 (file)
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
                        struct ieee80211_sta_rx_stats *cpurxs;
 
                        cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
-                       sinfo->rx_packets += cpurxs->dropped;
+                       sinfo->rx_dropped_misc += cpurxs->dropped;
                }
        }
 
index 5609cacb20d5f31e02ba877f88a6f0290e18ce8b..ea0747d6a6da194fec9116dc14b58a543f1accf2 100644 (file)
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
 #define HT_AGG_STATE_STOPPING          3
 #define HT_AGG_STATE_WANT_START                4
 #define HT_AGG_STATE_WANT_STOP         5
+#define HT_AGG_STATE_START_CB          6
+#define HT_AGG_STATE_STOP_CB           7
 
 enum ieee80211_agg_stop_reason {
        AGG_STOP_DECLINED,
index 257ec66009da2dd7010d063c0ad29dbb9a32564e..7b05fd1497ceddea47c2c7917f5ee58f6ff2d560 100644 (file)
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
                                continue;
                        alive++;
                        nh_flags &= ~flags;
-                       WRITE_ONCE(nh->nh_flags, flags);
+                       WRITE_ONCE(nh->nh_flags, nh_flags);
                } endfor_nexthops(rt);
 
                WRITE_ONCE(rt->rt_nhn_alive, alive);
index 9799a50bc604cc630494514cc80aa49edc2def0e..a8be9b72e6cd2ca34166bba49a532f4f92e86e9e 100644 (file)
@@ -890,8 +890,13 @@ restart:
        }
 out:
        local_bh_enable();
-       if (last)
+       if (last) {
+               /* nf ct hash resize happened, now clear the leftover. */
+               if ((struct nf_conn *)cb->args[1] == last)
+                       cb->args[1] = 0;
+
                nf_ct_put(last);
+       }
 
        while (i) {
                i--;
index 13875d599a85713bbfeb2fee7b8978fa498a10ed..1c5b14a6cab369591bd13e22b284a0737ad75c2e 100644 (file)
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
                      u8 pf, unsigned int hooknum)
 {
        const struct sctphdr *sh;
-       struct sctphdr _sctph;
        const char *logmsg;
 
-       sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
-       if (!sh) {
+       if (skb->len < dataoff + sizeof(struct sctphdr)) {
                logmsg = "nf_ct_sctp: short packet ";
                goto out_invalid;
        }
        if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
            skb->ip_summed == CHECKSUM_NONE) {
+               if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
+                       logmsg = "nf_ct_sctp: failed to read header ";
+                       goto out_invalid;
+               }
+               sh = (const struct sctphdr *)(skb->data + dataoff);
                if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
                        logmsg = "nf_ct_sctp: bad CRC ";
                        goto out_invalid;
index ef0be325a0c6368bfe29ecda39db37dcb178a6d2..6c72922d20caee83f498cb02cdce0c46899a1c22 100644 (file)
@@ -566,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
         * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
         * will delete entry from already-freed table.
         */
-       ct->status &= ~IPS_NAT_DONE_MASK;
+       clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
        rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
                        nf_nat_bysource_params);
 
index e97e2fb53f0a107b0361322be10f16b4ab4b5d32..fbdbaa00dd5fd751f6ab8f428de987017b5bdf79 100644 (file)
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                else if (d > 0)
                        p = &parent->rb_right;
                else {
-                       if (nft_set_elem_active(&rbe->ext, genmask)) {
-                               if (nft_rbtree_interval_end(rbe) &&
-                                   !nft_rbtree_interval_end(new))
-                                       p = &parent->rb_left;
-                               else if (!nft_rbtree_interval_end(rbe) &&
-                                        nft_rbtree_interval_end(new))
-                                       p = &parent->rb_right;
-                               else {
-                                       *ext = &rbe->ext;
-                                       return -EEXIST;
-                               }
+                       if (nft_rbtree_interval_end(rbe) &&
+                           !nft_rbtree_interval_end(new)) {
+                               p = &parent->rb_left;
+                       } else if (!nft_rbtree_interval_end(rbe) &&
+                                  nft_rbtree_interval_end(new)) {
+                               p = &parent->rb_right;
+                       } else if (nft_set_elem_active(&rbe->ext, genmask)) {
+                               *ext = &rbe->ext;
+                               return -EEXIST;
+                       } else {
+                               p = &parent->rb_left;
                        }
                }
        }
index ee841f00a6ec715914fb14bb31dc8405f8dd4c4e..7586d446d7dcafc5c44b43190398840b68107d1f 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/cacheflush.h>
 #include <linux/hash.h>
 #include <linux/genetlink.h>
+#include <linux/net_namespace.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
                goto out;
        }
        NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
-       NETLINK_CB(p->skb2).nsid_is_set = true;
+       if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
+               NETLINK_CB(p->skb2).nsid_is_set = true;
        val = netlink_broadcast_deliver(sk, p->skb2);
        if (val < 0) {
                netlink_overrun(sk);
index 6fd95f76bfaeaf4a300219ee290048c3913632a5..a7a23b5541f85a4994e0cc83d7d132e8a5681938 100644 (file)
@@ -20,6 +20,10 @@ config KEYS
 
          If you are unsure as to whether this is required, answer N.
 
+config KEYS_COMPAT
+       def_bool y
+       depends on COMPAT && KEYS
+
 config PERSISTENT_KEYRINGS
        bool "Enable register of persistent per-UID keyrings"
        depends on KEYS
@@ -89,9 +93,9 @@ config ENCRYPTED_KEYS
 config KEY_DH_OPERATIONS
        bool "Diffie-Hellman operations on retained keys"
        depends on KEYS
-       select MPILIB
        select CRYPTO
        select CRYPTO_HASH
+       select CRYPTO_DH
        help
         This option provides support for calculating Diffie-Hellman
         public keys and shared secrets using values stored as keys
index e603bd912e4c2300287e98248449a55b2e0b4ea4..4755d4b4f94544236cd2b8cca1b2169b895579b7 100644 (file)
@@ -8,34 +8,17 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <linux/mpi.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/scatterlist.h>
 #include <linux/crypto.h>
 #include <crypto/hash.h>
+#include <crypto/kpp.h>
+#include <crypto/dh.h>
 #include <keys/user-type.h>
 #include "internal.h"
 
-/*
- * Public key or shared secret generation function [RFC2631 sec 2.1.1]
- *
- * ya = g^xa mod p;
- * or
- * ZZ = yb^xa mod p;
- *
- * where xa is the local private key, ya is the local public key, g is
- * the generator, p is the prime, yb is the remote public key, and ZZ
- * is the shared secret.
- *
- * Both are the same calculation, so g or yb are the "base" and ya or
- * ZZ are the "result".
- */
-static int do_dh(MPI result, MPI base, MPI xa, MPI p)
-{
-       return mpi_powm(result, base, xa, p);
-}
-
-static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
+static ssize_t dh_data_from_key(key_serial_t keyid, void **data)
 {
        struct key *key;
        key_ref_t key_ref;
@@ -56,19 +39,17 @@ static ssize_t mpi_from_key(key_serial_t keyid, size_t maxlen, MPI *mpi)
                status = key_validate(key);
                if (status == 0) {
                        const struct user_key_payload *payload;
+                       uint8_t *duplicate;
 
                        payload = user_key_payload_locked(key);
 
-                       if (maxlen == 0) {
-                               *mpi = NULL;
+                       duplicate = kmemdup(payload->data, payload->datalen,
+                                           GFP_KERNEL);
+                       if (duplicate) {
+                               *data = duplicate;
                                ret = payload->datalen;
-                       } else if (payload->datalen <= maxlen) {
-                               *mpi = mpi_read_raw_data(payload->data,
-                                                        payload->datalen);
-                               if (*mpi)
-                                       ret = payload->datalen;
                        } else {
-                               ret = -EINVAL;
+                               ret = -ENOMEM;
                        }
                }
                up_read(&key->sem);
@@ -79,6 +60,29 @@ error:
        return ret;
 }
 
+static void dh_free_data(struct dh *dh)
+{
+       kzfree(dh->key);
+       kzfree(dh->p);
+       kzfree(dh->g);
+}
+
+struct dh_completion {
+       struct completion completion;
+       int err;
+};
+
+static void dh_crypto_done(struct crypto_async_request *req, int err)
+{
+       struct dh_completion *compl = req->data;
+
+       if (err == -EINPROGRESS)
+               return;
+
+       compl->err = err;
+       complete(&compl->completion);
+}
+
 struct kdf_sdesc {
        struct shash_desc shash;
        char ctx[];
@@ -89,6 +93,7 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
        struct crypto_shash *tfm;
        struct kdf_sdesc *sdesc;
        int size;
+       int err;
 
        /* allocate synchronous hash */
        tfm = crypto_alloc_shash(hashname, 0, 0);
@@ -97,16 +102,25 @@ static int kdf_alloc(struct kdf_sdesc **sdesc_ret, char *hashname)
                return PTR_ERR(tfm);
        }
 
+       err = -EINVAL;
+       if (crypto_shash_digestsize(tfm) == 0)
+               goto out_free_tfm;
+
+       err = -ENOMEM;
        size = sizeof(struct shash_desc) + crypto_shash_descsize(tfm);
        sdesc = kmalloc(size, GFP_KERNEL);
        if (!sdesc)
-               return -ENOMEM;
+               goto out_free_tfm;
        sdesc->shash.tfm = tfm;
        sdesc->shash.flags = 0x0;
 
        *sdesc_ret = sdesc;
 
        return 0;
+
+out_free_tfm:
+       crypto_free_shash(tfm);
+       return err;
 }
 
 static void kdf_dealloc(struct kdf_sdesc *sdesc)
@@ -120,14 +134,6 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
        kzfree(sdesc);
 }
 
-/* convert 32 bit integer into its string representation */
-static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
-{
-       __be32 *a = (__be32 *)buf;
-
-       *a = cpu_to_be32(val);
-}
-
 /*
  * Implementation of the KDF in counter mode according to SP800-108 section 5.1
  * as well as SP800-56A section 5.8.1 (Single-step KDF).
@@ -138,25 +144,39 @@ static inline void crypto_kw_cpu_to_be32(u32 val, u8 *buf)
  * 5.8.1.2).
  */
 static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
-                  u8 *dst, unsigned int dlen)
+                  u8 *dst, unsigned int dlen, unsigned int zlen)
 {
        struct shash_desc *desc = &sdesc->shash;
        unsigned int h = crypto_shash_digestsize(desc->tfm);
        int err = 0;
        u8 *dst_orig = dst;
-       u32 i = 1;
-       u8 iteration[sizeof(u32)];
+       __be32 counter = cpu_to_be32(1);
 
        while (dlen) {
                err = crypto_shash_init(desc);
                if (err)
                        goto err;
 
-               crypto_kw_cpu_to_be32(i, iteration);
-               err = crypto_shash_update(desc, iteration, sizeof(u32));
+               err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32));
                if (err)
                        goto err;
 
+               if (zlen && h) {
+                       u8 tmpbuffer[h];
+                       size_t chunk = min_t(size_t, zlen, h);
+                       memset(tmpbuffer, 0, chunk);
+
+                       do {
+                               err = crypto_shash_update(desc, tmpbuffer,
+                                                         chunk);
+                               if (err)
+                                       goto err;
+
+                               zlen -= chunk;
+                               chunk = min_t(size_t, zlen, h);
+                       } while (zlen);
+               }
+
                if (src && slen) {
                        err = crypto_shash_update(desc, src, slen);
                        if (err)
@@ -179,7 +199,7 @@ static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
 
                        dlen -= h;
                        dst += h;
-                       i++;
+                       counter = cpu_to_be32(be32_to_cpu(counter) + 1);
                }
        }
 
@@ -192,7 +212,7 @@ err:
 
 static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
                                 char __user *buffer, size_t buflen,
-                                uint8_t *kbuf, size_t kbuflen)
+                                uint8_t *kbuf, size_t kbuflen, size_t lzero)
 {
        uint8_t *outbuf = NULL;
        int ret;
@@ -203,7 +223,7 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
                goto err;
        }
 
-       ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen);
+       ret = kdf_ctr(sdesc, kbuf, kbuflen, outbuf, buflen, lzero);
        if (ret)
                goto err;
 
@@ -221,21 +241,26 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
                         struct keyctl_kdf_params *kdfcopy)
 {
        long ret;
-       MPI base, private, prime, result;
-       unsigned nbytes;
+       ssize_t dlen;
+       int secretlen;
+       int outlen;
        struct keyctl_dh_params pcopy;
-       uint8_t *kbuf;
-       ssize_t keylen;
-       size_t resultlen;
+       struct dh dh_inputs;
+       struct scatterlist outsg;
+       struct dh_completion compl;
+       struct crypto_kpp *tfm;
+       struct kpp_request *req;
+       uint8_t *secret;
+       uint8_t *outbuf;
        struct kdf_sdesc *sdesc = NULL;
 
        if (!params || (!buffer && buflen)) {
                ret = -EINVAL;
-               goto out;
+               goto out1;
        }
        if (copy_from_user(&pcopy, params, sizeof(pcopy)) != 0) {
                ret = -EFAULT;
-               goto out;
+               goto out1;
        }
 
        if (kdfcopy) {
@@ -244,104 +269,147 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
                if (buflen > KEYCTL_KDF_MAX_OUTPUT_LEN ||
                    kdfcopy->otherinfolen > KEYCTL_KDF_MAX_OI_LEN) {
                        ret = -EMSGSIZE;
-                       goto out;
+                       goto out1;
                }
 
                /* get KDF name string */
                hashname = strndup_user(kdfcopy->hashname, CRYPTO_MAX_ALG_NAME);
                if (IS_ERR(hashname)) {
                        ret = PTR_ERR(hashname);
-                       goto out;
+                       goto out1;
                }
 
                /* allocate KDF from the kernel crypto API */
                ret = kdf_alloc(&sdesc, hashname);
                kfree(hashname);
                if (ret)
-                       goto out;
+                       goto out1;
        }
 
-       /*
-        * If the caller requests postprocessing with a KDF, allow an
-        * arbitrary output buffer size since the KDF ensures proper truncation.
-        */
-       keylen = mpi_from_key(pcopy.prime, kdfcopy ? SIZE_MAX : buflen, &prime);
-       if (keylen < 0 || !prime) {
-               /* buflen == 0 may be used to query the required buffer size,
-                * which is the prime key length.
-                */
-               ret = keylen;
-               goto out;
+       memset(&dh_inputs, 0, sizeof(dh_inputs));
+
+       dlen = dh_data_from_key(pcopy.prime, &dh_inputs.p);
+       if (dlen < 0) {
+               ret = dlen;
+               goto out1;
+       }
+       dh_inputs.p_size = dlen;
+
+       dlen = dh_data_from_key(pcopy.base, &dh_inputs.g);
+       if (dlen < 0) {
+               ret = dlen;
+               goto out2;
        }
+       dh_inputs.g_size = dlen;
 
-       /* The result is never longer than the prime */
-       resultlen = keylen;
+       dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
+       if (dlen < 0) {
+               ret = dlen;
+               goto out2;
+       }
+       dh_inputs.key_size = dlen;
 
-       keylen = mpi_from_key(pcopy.base, SIZE_MAX, &base);
-       if (keylen < 0 || !base) {
-               ret = keylen;
-               goto error1;
+       secretlen = crypto_dh_key_len(&dh_inputs);
+       secret = kmalloc(secretlen, GFP_KERNEL);
+       if (!secret) {
+               ret = -ENOMEM;
+               goto out2;
        }
+       ret = crypto_dh_encode_key(secret, secretlen, &dh_inputs);
+       if (ret)
+               goto out3;
 
-       keylen = mpi_from_key(pcopy.private, SIZE_MAX, &private);
-       if (keylen < 0 || !private) {
-               ret = keylen;
-               goto error2;
+       tfm = crypto_alloc_kpp("dh", CRYPTO_ALG_TYPE_KPP, 0);
+       if (IS_ERR(tfm)) {
+               ret = PTR_ERR(tfm);
+               goto out3;
+       }
+
+       ret = crypto_kpp_set_secret(tfm, secret, secretlen);
+       if (ret)
+               goto out4;
+
+       outlen = crypto_kpp_maxsize(tfm);
+
+       if (!kdfcopy) {
+               /*
+                * When not using a KDF, buflen 0 is used to read the
+                * required buffer length
+                */
+               if (buflen == 0) {
+                       ret = outlen;
+                       goto out4;
+               } else if (outlen > buflen) {
+                       ret = -EOVERFLOW;
+                       goto out4;
+               }
        }
 
-       result = mpi_alloc(0);
-       if (!result) {
+       outbuf = kzalloc(kdfcopy ? (outlen + kdfcopy->otherinfolen) : outlen,
+                        GFP_KERNEL);
+       if (!outbuf) {
                ret = -ENOMEM;
-               goto error3;
+               goto out4;
        }
 
-       /* allocate space for DH shared secret and SP800-56A otherinfo */
-       kbuf = kmalloc(kdfcopy ? (resultlen + kdfcopy->otherinfolen) : resultlen,
-                      GFP_KERNEL);
-       if (!kbuf) {
+       sg_init_one(&outsg, outbuf, outlen);
+
+       req = kpp_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
                ret = -ENOMEM;
-               goto error4;
+               goto out5;
        }
 
+       kpp_request_set_input(req, NULL, 0);
+       kpp_request_set_output(req, &outsg, outlen);
+       init_completion(&compl.completion);
+       kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+                                CRYPTO_TFM_REQ_MAY_SLEEP,
+                                dh_crypto_done, &compl);
+
        /*
-        * Concatenate SP800-56A otherinfo past DH shared secret -- the
-        * input to the KDF is (DH shared secret || otherinfo)
+        * For DH, generate_public_key and generate_shared_secret are
+        * the same calculation
         */
-       if (kdfcopy && kdfcopy->otherinfo &&
-           copy_from_user(kbuf + resultlen, kdfcopy->otherinfo,
-                          kdfcopy->otherinfolen) != 0) {
-               ret = -EFAULT;
-               goto error5;
+       ret = crypto_kpp_generate_public_key(req);
+       if (ret == -EINPROGRESS) {
+               wait_for_completion(&compl.completion);
+               ret = compl.err;
+               if (ret)
+                       goto out6;
        }
 
-       ret = do_dh(result, base, private, prime);
-       if (ret)
-               goto error5;
-
-       ret = mpi_read_buffer(result, kbuf, resultlen, &nbytes, NULL);
-       if (ret != 0)
-               goto error5;
-
        if (kdfcopy) {
-               ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, kbuf,
-                                           resultlen + kdfcopy->otherinfolen);
-       } else {
-               ret = nbytes;
-               if (copy_to_user(buffer, kbuf, nbytes) != 0)
+               /*
+                * Concatenate SP800-56A otherinfo past DH shared secret -- the
+                * input to the KDF is (DH shared secret || otherinfo)
+                */
+               if (copy_from_user(outbuf + req->dst_len, kdfcopy->otherinfo,
+                                  kdfcopy->otherinfolen) != 0) {
                        ret = -EFAULT;
+                       goto out6;
+               }
+
+               ret = keyctl_dh_compute_kdf(sdesc, buffer, buflen, outbuf,
+                                           req->dst_len + kdfcopy->otherinfolen,
+                                           outlen - req->dst_len);
+       } else if (copy_to_user(buffer, outbuf, req->dst_len) == 0) {
+               ret = req->dst_len;
+       } else {
+               ret = -EFAULT;
        }
 
-error5:
-       kzfree(kbuf);
-error4:
-       mpi_free(result);
-error3:
-       mpi_free(private);
-error2:
-       mpi_free(base);
-error1:
-       mpi_free(prime);
-out:
+out6:
+       kpp_request_free(req);
+out5:
+       kzfree(outbuf);
+out4:
+       crypto_free_kpp(tfm);
+out3:
+       kzfree(secret);
+out2:
+       dh_free_data(&dh_inputs);
+out1:
        kdf_dealloc(sdesc);
        return ret;
 }
index 0010955d7876c2302704020af8b2ffb6010f2c0a..bb6324d1ccec32f6dde05f520d8d2ed2e089c785 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/scatterlist.h>
 #include <linux/ctype.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
 #include <crypto/skcipher.h>
@@ -54,13 +55,7 @@ static int blksize;
 #define MAX_DATA_SIZE 4096
 #define MIN_DATA_SIZE  20
 
-struct sdesc {
-       struct shash_desc shash;
-       char ctx[];
-};
-
-static struct crypto_shash *hashalg;
-static struct crypto_shash *hmacalg;
+static struct crypto_shash *hash_tfm;
 
 enum {
        Opt_err = -1, Opt_new, Opt_load, Opt_update
@@ -141,23 +136,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc)
  */
 static int valid_master_desc(const char *new_desc, const char *orig_desc)
 {
-       if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
-               if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
-                       goto out;
-               if (orig_desc)
-                       if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
-                               goto out;
-       } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
-               if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
-                       goto out;
-               if (orig_desc)
-                       if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
-                               goto out;
-       } else
-               goto out;
+       int prefix_len;
+
+       if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
+               prefix_len = KEY_TRUSTED_PREFIX_LEN;
+       else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
+               prefix_len = KEY_USER_PREFIX_LEN;
+       else
+               return -EINVAL;
+
+       if (!new_desc[prefix_len])
+               return -EINVAL;
+
+       if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
+               return -EINVAL;
+
        return 0;
-out:
-       return -EINVAL;
 }
 
 /*
@@ -321,53 +315,38 @@ error:
        return ukey;
 }
 
-static struct sdesc *alloc_sdesc(struct crypto_shash *alg)
-{
-       struct sdesc *sdesc;
-       int size;
-
-       size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
-       sdesc = kmalloc(size, GFP_KERNEL);
-       if (!sdesc)
-               return ERR_PTR(-ENOMEM);
-       sdesc->shash.tfm = alg;
-       sdesc->shash.flags = 0x0;
-       return sdesc;
-}
-
-static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
+static int calc_hash(struct crypto_shash *tfm, u8 *digest,
                     const u8 *buf, unsigned int buflen)
 {
-       struct sdesc *sdesc;
-       int ret;
+       SHASH_DESC_ON_STACK(desc, tfm);
+       int err;
 
-       sdesc = alloc_sdesc(hmacalg);
-       if (IS_ERR(sdesc)) {
-               pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
-               return PTR_ERR(sdesc);
-       }
+       desc->tfm = tfm;
+       desc->flags = 0;
 
-       ret = crypto_shash_setkey(hmacalg, key, keylen);
-       if (!ret)
-               ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
-       kfree(sdesc);
-       return ret;
+       err = crypto_shash_digest(desc, buf, buflen, digest);
+       shash_desc_zero(desc);
+       return err;
 }
 
-static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen)
+static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
+                    const u8 *buf, unsigned int buflen)
 {
-       struct sdesc *sdesc;
-       int ret;
+       struct crypto_shash *tfm;
+       int err;
 
-       sdesc = alloc_sdesc(hashalg);
-       if (IS_ERR(sdesc)) {
-               pr_info("encrypted_key: can't alloc %s\n", hash_alg);
-               return PTR_ERR(sdesc);
+       tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm)) {
+               pr_err("encrypted_key: can't alloc %s transform: %ld\n",
+                      hmac_alg, PTR_ERR(tfm));
+               return PTR_ERR(tfm);
        }
 
-       ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
-       kfree(sdesc);
-       return ret;
+       err = crypto_shash_setkey(tfm, key, keylen);
+       if (!err)
+               err = calc_hash(tfm, digest, buf, buflen);
+       crypto_free_shash(tfm);
+       return err;
 }
 
 enum derived_key_type { ENC_KEY, AUTH_KEY };
@@ -385,10 +364,9 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
                derived_buf_len = HASH_SIZE;
 
        derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
-       if (!derived_buf) {
-               pr_err("encrypted_key: out of memory\n");
+       if (!derived_buf)
                return -ENOMEM;
-       }
+
        if (key_type)
                strcpy(derived_buf, "AUTH_KEY");
        else
@@ -396,8 +374,8 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
 
        memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
               master_keylen);
-       ret = calc_hash(derived_key, derived_buf, derived_buf_len);
-       kfree(derived_buf);
+       ret = calc_hash(hash_tfm, derived_key, derived_buf, derived_buf_len);
+       kzfree(derived_buf);
        return ret;
 }
 
@@ -480,12 +458,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
        u8 iv[AES_BLOCK_SIZE];
-       unsigned int padlen;
-       char pad[16];
        int ret;
 
        encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
-       padlen = encrypted_datalen - epayload->decrypted_datalen;
 
        req = init_skcipher_req(derived_key, derived_keylen);
        ret = PTR_ERR(req);
@@ -493,11 +468,10 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
                goto out;
        dump_decrypted_data(epayload);
 
-       memset(pad, 0, sizeof pad);
        sg_init_table(sg_in, 2);
        sg_set_buf(&sg_in[0], epayload->decrypted_data,
                   epayload->decrypted_datalen);
-       sg_set_buf(&sg_in[1], pad, padlen);
+       sg_set_page(&sg_in[1], ZERO_PAGE(0), AES_BLOCK_SIZE, 0);
 
        sg_init_table(sg_out, 1);
        sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
@@ -533,6 +507,7 @@ static int datablob_hmac_append(struct encrypted_key_payload *epayload,
        if (!ret)
                dump_hmac(NULL, digest, HASH_SIZE);
 out:
+       memzero_explicit(derived_key, sizeof(derived_key));
        return ret;
 }
 
@@ -561,8 +536,8 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
        ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
        if (ret < 0)
                goto out;
-       ret = memcmp(digest, epayload->format + epayload->datablob_len,
-                    sizeof digest);
+       ret = crypto_memneq(digest, epayload->format + epayload->datablob_len,
+                           sizeof(digest));
        if (ret) {
                ret = -EINVAL;
                dump_hmac("datablob",
@@ -571,6 +546,7 @@ static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
                dump_hmac("calc", digest, HASH_SIZE);
        }
 out:
+       memzero_explicit(derived_key, sizeof(derived_key));
        return ret;
 }
 
@@ -584,9 +560,14 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
        struct skcipher_request *req;
        unsigned int encrypted_datalen;
        u8 iv[AES_BLOCK_SIZE];
-       char pad[16];
+       u8 *pad;
        int ret;
 
+       /* Throwaway buffer to hold the unused zero padding at the end */
+       pad = kmalloc(AES_BLOCK_SIZE, GFP_KERNEL);
+       if (!pad)
+               return -ENOMEM;
+
        encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
        req = init_skcipher_req(derived_key, derived_keylen);
        ret = PTR_ERR(req);
@@ -594,13 +575,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
                goto out;
        dump_encrypted_data(epayload, encrypted_datalen);
 
-       memset(pad, 0, sizeof pad);
        sg_init_table(sg_in, 1);
        sg_init_table(sg_out, 2);
        sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
        sg_set_buf(&sg_out[0], epayload->decrypted_data,
                   epayload->decrypted_datalen);
-       sg_set_buf(&sg_out[1], pad, sizeof pad);
+       sg_set_buf(&sg_out[1], pad, AES_BLOCK_SIZE);
 
        memcpy(iv, epayload->iv, sizeof(iv));
        skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv);
@@ -612,6 +592,7 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
                goto out;
        dump_decrypted_data(epayload);
 out:
+       kfree(pad);
        return ret;
 }
 
@@ -722,6 +703,7 @@ static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
 out:
        up_read(&mkey->sem);
        key_put(mkey);
+       memzero_explicit(derived_key, sizeof(derived_key));
        return ret;
 }
 
@@ -828,13 +810,13 @@ static int encrypted_instantiate(struct key *key,
        ret = encrypted_init(epayload, key->description, format, master_desc,
                             decrypted_datalen, hex_encoded_iv);
        if (ret < 0) {
-               kfree(epayload);
+               kzfree(epayload);
                goto out;
        }
 
        rcu_assign_keypointer(key, epayload);
 out:
-       kfree(datablob);
+       kzfree(datablob);
        return ret;
 }
 
@@ -843,8 +825,7 @@ static void encrypted_rcu_free(struct rcu_head *rcu)
        struct encrypted_key_payload *epayload;
 
        epayload = container_of(rcu, struct encrypted_key_payload, rcu);
-       memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
-       kfree(epayload);
+       kzfree(epayload);
 }
 
 /*
@@ -902,7 +883,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
        rcu_assign_keypointer(key, new_epayload);
        call_rcu(&epayload->rcu, encrypted_rcu_free);
 out:
-       kfree(buf);
+       kzfree(buf);
        return ret;
 }
 
@@ -960,33 +941,26 @@ static long encrypted_read(const struct key *key, char __user *buffer,
 
        up_read(&mkey->sem);
        key_put(mkey);
+       memzero_explicit(derived_key, sizeof(derived_key));
 
        if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
                ret = -EFAULT;
-       kfree(ascii_buf);
+       kzfree(ascii_buf);
 
        return asciiblob_len;
 out:
        up_read(&mkey->sem);
        key_put(mkey);
+       memzero_explicit(derived_key, sizeof(derived_key));
        return ret;
 }
 
 /*
- * encrypted_destroy - before freeing the key, clear the decrypted data
- *
- * Before freeing the key, clear the memory containing the decrypted
- * key data.
+ * encrypted_destroy - clear and free the key's payload
  */
 static void encrypted_destroy(struct key *key)
 {
-       struct encrypted_key_payload *epayload = key->payload.data[0];
-
-       if (!epayload)
-               return;
-
-       memzero_explicit(epayload->decrypted_data, epayload->decrypted_datalen);
-       kfree(key->payload.data[0]);
+       kzfree(key->payload.data[0]);
 }
 
 struct key_type key_type_encrypted = {
@@ -999,47 +973,17 @@ struct key_type key_type_encrypted = {
 };
 EXPORT_SYMBOL_GPL(key_type_encrypted);
 
-static void encrypted_shash_release(void)
-{
-       if (hashalg)
-               crypto_free_shash(hashalg);
-       if (hmacalg)
-               crypto_free_shash(hmacalg);
-}
-
-static int __init encrypted_shash_alloc(void)
+static int __init init_encrypted(void)
 {
        int ret;
 
-       hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(hmacalg)) {
-               pr_info("encrypted_key: could not allocate crypto %s\n",
-                       hmac_alg);
-               return PTR_ERR(hmacalg);
-       }
-
-       hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(hashalg)) {
-               pr_info("encrypted_key: could not allocate crypto %s\n",
-                       hash_alg);
-               ret = PTR_ERR(hashalg);
-               goto hashalg_fail;
+       hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(hash_tfm)) {
+               pr_err("encrypted_key: can't allocate %s transform: %ld\n",
+                      hash_alg, PTR_ERR(hash_tfm));
+               return PTR_ERR(hash_tfm);
        }
 
-       return 0;
-
-hashalg_fail:
-       crypto_free_shash(hmacalg);
-       return ret;
-}
-
-static int __init init_encrypted(void)
-{
-       int ret;
-
-       ret = encrypted_shash_alloc();
-       if (ret < 0)
-               return ret;
        ret = aes_get_sizes();
        if (ret < 0)
                goto out;
@@ -1048,14 +992,14 @@ static int __init init_encrypted(void)
                goto out;
        return 0;
 out:
-       encrypted_shash_release();
+       crypto_free_shash(hash_tfm);
        return ret;
 
 }
 
 static void __exit cleanup_encrypted(void)
 {
-       encrypted_shash_release();
+       crypto_free_shash(hash_tfm);
        unregister_key_type(&key_type_encrypted);
 }
 
index 595becc6d0d259a017be1305842f6208562324bb..87cb260e4890f3ac464e8d3f3244077653510b74 100644 (file)
@@ -158,9 +158,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
 
                kfree(key->description);
 
-#ifdef KEY_DEBUGGING
-               key->magic = KEY_DEBUG_MAGIC_X;
-#endif
+               memzero_explicit(key, sizeof(*key));
                kmem_cache_free(key_jar, key);
        }
 }
index 455c04d80bbbf27a7b20433708f35fde1ad26ffb..83da68d98b40b452a1c8b37121a6ca270387d4f6 100644 (file)
@@ -660,14 +660,11 @@ not_found:
        goto error;
 
 found:
-       /* pretend it doesn't exist if it is awaiting deletion */
-       if (refcount_read(&key->usage) == 0)
-               goto not_found;
-
-       /* this races with key_put(), but that doesn't matter since key_put()
-        * doesn't actually change the key
+       /* A key is allowed to be looked up only if someone still owns a
+        * reference to it - otherwise it's awaiting the gc.
         */
-       __key_get(key);
+       if (!refcount_inc_not_zero(&key->usage))
+               goto not_found;
 
 error:
        spin_unlock(&key_serial_lock);
@@ -966,12 +963,11 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
        /* the key must be writable */
        ret = key_permission(key_ref, KEY_NEED_WRITE);
        if (ret < 0)
-               goto error;
+               return ret;
 
        /* attempt to update it if supported */
-       ret = -EOPNOTSUPP;
        if (!key->type->update)
-               goto error;
+               return -EOPNOTSUPP;
 
        memset(&prep, 0, sizeof(prep));
        prep.data = payload;
index 447a7d5cee0f56ebcb1847d9442f6407e259be98..ab0b337c84b4c02e4856719398edb94d1caf101a 100644 (file)
@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
        /* pull the payload in if one was supplied */
        payload = NULL;
 
-       if (_payload) {
+       if (plen) {
                ret = -ENOMEM;
                payload = kvmalloc(plen, GFP_KERNEL);
                if (!payload)
@@ -132,7 +132,10 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
 
        key_ref_put(keyring_ref);
  error3:
-       kvfree(payload);
+       if (payload) {
+               memzero_explicit(payload, plen);
+               kvfree(payload);
+       }
  error2:
        kfree(description);
  error:
@@ -324,7 +327,7 @@ long keyctl_update_key(key_serial_t id,
 
        /* pull the payload in if one was supplied */
        payload = NULL;
-       if (_payload) {
+       if (plen) {
                ret = -ENOMEM;
                payload = kmalloc(plen, GFP_KERNEL);
                if (!payload)
@@ -347,7 +350,7 @@ long keyctl_update_key(key_serial_t id,
 
        key_ref_put(key_ref);
 error2:
-       kfree(payload);
+       kzfree(payload);
 error:
        return ret;
 }
@@ -1093,7 +1096,10 @@ long keyctl_instantiate_key_common(key_serial_t id,
                keyctl_change_reqkey_auth(NULL);
 
 error2:
-       kvfree(payload);
+       if (payload) {
+               memzero_explicit(payload, plen);
+               kvfree(payload);
+       }
 error:
        return ret;
 }
index 4d1678e4586f6ae29610bfb818c33000413947bf..de81793f9920787101dec77eca28ddfbe91ebbc7 100644 (file)
@@ -706,7 +706,7 @@ descend_to_keyring:
         * Non-keyrings avoid the leftmost branch of the root entirely (root
         * slots 1-15).
         */
-       ptr = ACCESS_ONCE(keyring->keys.root);
+       ptr = READ_ONCE(keyring->keys.root);
        if (!ptr)
                goto not_this_keyring;
 
@@ -720,7 +720,7 @@ descend_to_keyring:
                if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
                        goto not_this_keyring;
 
-               ptr = ACCESS_ONCE(shortcut->next_node);
+               ptr = READ_ONCE(shortcut->next_node);
                node = assoc_array_ptr_to_node(ptr);
                goto begin_node;
        }
@@ -740,7 +740,7 @@ descend_to_node:
        if (assoc_array_ptr_is_shortcut(ptr)) {
                shortcut = assoc_array_ptr_to_shortcut(ptr);
                smp_read_barrier_depends();
-               ptr = ACCESS_ONCE(shortcut->next_node);
+               ptr = READ_ONCE(shortcut->next_node);
                BUG_ON(!assoc_array_ptr_is_node(ptr));
        }
        node = assoc_array_ptr_to_node(ptr);
@@ -752,7 +752,7 @@ begin_node:
 ascend_to_node:
        /* Go through the slots in a node */
        for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
-               ptr = ACCESS_ONCE(node->slots[slot]);
+               ptr = READ_ONCE(node->slots[slot]);
 
                if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
                        goto descend_to_node;
@@ -790,13 +790,13 @@ ascend_to_node:
        /* We've dealt with all the slots in the current node, so now we need
         * to ascend to the parent and continue processing there.
         */
-       ptr = ACCESS_ONCE(node->back_pointer);
+       ptr = READ_ONCE(node->back_pointer);
        slot = node->parent_slot;
 
        if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
                shortcut = assoc_array_ptr_to_shortcut(ptr);
                smp_read_barrier_depends();
-               ptr = ACCESS_ONCE(shortcut->back_pointer);
+               ptr = READ_ONCE(shortcut->back_pointer);
                slot = shortcut->parent_slot;
        }
        if (!ptr)
index 2217dfec7996159797abce9d2480fa249691cf28..86bced9fdbdf22eb60170584d87730e1179a2744 100644 (file)
@@ -809,15 +809,14 @@ long join_session_keyring(const char *name)
                ret = PTR_ERR(keyring);
                goto error2;
        } else if (keyring == new->session_keyring) {
-               key_put(keyring);
                ret = 0;
-               goto error2;
+               goto error3;
        }
 
        /* we've got a keyring - now to install it */
        ret = install_session_keyring_to_cred(new, keyring);
        if (ret < 0)
-               goto error2;
+               goto error3;
 
        commit_creds(new);
        mutex_unlock(&key_session_mutex);
@@ -827,6 +826,8 @@ long join_session_keyring(const char *name)
 okay:
        return ret;
 
+error3:
+       key_put(keyring);
 error2:
        mutex_unlock(&key_session_mutex);
 error:
index 2ae31c5a87de9e9084de7e5f9350678da81b49a4..435e86e1387944d723cc569f228239f94c800a81 100644 (file)
@@ -70,7 +70,7 @@ static int TSS_sha1(const unsigned char *data, unsigned int datalen,
        }
 
        ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
-       kfree(sdesc);
+       kzfree(sdesc);
        return ret;
 }
 
@@ -114,7 +114,7 @@ static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
        if (!ret)
                ret = crypto_shash_final(&sdesc->shash, digest);
 out:
-       kfree(sdesc);
+       kzfree(sdesc);
        return ret;
 }
 
@@ -165,7 +165,7 @@ static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                                  paramdigest, TPM_NONCE_SIZE, h1,
                                  TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
 out:
-       kfree(sdesc);
+       kzfree(sdesc);
        return ret;
 }
 
@@ -246,7 +246,7 @@ static int TSS_checkhmac1(unsigned char *buffer,
        if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
                ret = -EINVAL;
 out:
-       kfree(sdesc);
+       kzfree(sdesc);
        return ret;
 }
 
@@ -347,7 +347,7 @@ static int TSS_checkhmac2(unsigned char *buffer,
        if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
                ret = -EINVAL;
 out:
-       kfree(sdesc);
+       kzfree(sdesc);
        return ret;
 }
 
@@ -564,7 +564,7 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
                *bloblen = storedsize;
        }
 out:
-       kfree(td);
+       kzfree(td);
        return ret;
 }
 
@@ -678,7 +678,7 @@ static int key_seal(struct trusted_key_payload *p,
        if (ret < 0)
                pr_info("trusted_key: srkseal failed (%d)\n", ret);
 
-       kfree(tb);
+       kzfree(tb);
        return ret;
 }
 
@@ -703,7 +703,7 @@ static int key_unseal(struct trusted_key_payload *p,
                /* pull migratable flag out of sealed key */
                p->migratable = p->key[--p->key_len];
 
-       kfree(tb);
+       kzfree(tb);
        return ret;
 }
 
@@ -1037,12 +1037,12 @@ static int trusted_instantiate(struct key *key,
        if (!ret && options->pcrlock)
                ret = pcrlock(options->pcrlock);
 out:
-       kfree(datablob);
-       kfree(options);
+       kzfree(datablob);
+       kzfree(options);
        if (!ret)
                rcu_assign_keypointer(key, payload);
        else
-               kfree(payload);
+               kzfree(payload);
        return ret;
 }
 
@@ -1051,8 +1051,7 @@ static void trusted_rcu_free(struct rcu_head *rcu)
        struct trusted_key_payload *p;
 
        p = container_of(rcu, struct trusted_key_payload, rcu);
-       memset(p->key, 0, p->key_len);
-       kfree(p);
+       kzfree(p);
 }
 
 /*
@@ -1094,13 +1093,13 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
        ret = datablob_parse(datablob, new_p, new_o);
        if (ret != Opt_update) {
                ret = -EINVAL;
-               kfree(new_p);
+               kzfree(new_p);
                goto out;
        }
 
        if (!new_o->keyhandle) {
                ret = -EINVAL;
-               kfree(new_p);
+               kzfree(new_p);
                goto out;
        }
 
@@ -1114,22 +1113,22 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
        ret = key_seal(new_p, new_o);
        if (ret < 0) {
                pr_info("trusted_key: key_seal failed (%d)\n", ret);
-               kfree(new_p);
+               kzfree(new_p);
                goto out;
        }
        if (new_o->pcrlock) {
                ret = pcrlock(new_o->pcrlock);
                if (ret < 0) {
                        pr_info("trusted_key: pcrlock failed (%d)\n", ret);
-                       kfree(new_p);
+                       kzfree(new_p);
                        goto out;
                }
        }
        rcu_assign_keypointer(key, new_p);
        call_rcu(&p->rcu, trusted_rcu_free);
 out:
-       kfree(datablob);
-       kfree(new_o);
+       kzfree(datablob);
+       kzfree(new_o);
        return ret;
 }
 
@@ -1158,24 +1157,19 @@ static long trusted_read(const struct key *key, char __user *buffer,
        for (i = 0; i < p->blob_len; i++)
                bufp = hex_byte_pack(bufp, p->blob[i]);
        if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
-               kfree(ascii_buf);
+               kzfree(ascii_buf);
                return -EFAULT;
        }
-       kfree(ascii_buf);
+       kzfree(ascii_buf);
        return 2 * p->blob_len;
 }
 
 /*
- * trusted_destroy - before freeing the key, clear the decrypted data
+ * trusted_destroy - clear and free the key's payload
  */
 static void trusted_destroy(struct key *key)
 {
-       struct trusted_key_payload *p = key->payload.data[0];
-
-       if (!p)
-               return;
-       memset(p->key, 0, p->key_len);
-       kfree(key->payload.data[0]);
+       kzfree(key->payload.data[0]);
 }
 
 struct key_type key_type_trusted = {
index 26605134f17a8a3cf7c7a5c0a0c14fd5a3052fcb..3d8c68eba5160286fa7af79c8da1ead6e6b05236 100644 (file)
@@ -86,10 +86,18 @@ EXPORT_SYMBOL_GPL(user_preparse);
  */
 void user_free_preparse(struct key_preparsed_payload *prep)
 {
-       kfree(prep->payload.data[0]);
+       kzfree(prep->payload.data[0]);
 }
 EXPORT_SYMBOL_GPL(user_free_preparse);
 
+static void user_free_payload_rcu(struct rcu_head *head)
+{
+       struct user_key_payload *payload;
+
+       payload = container_of(head, struct user_key_payload, rcu);
+       kzfree(payload);
+}
+
 /*
  * update a user defined key
  * - the key's semaphore is write-locked
@@ -112,7 +120,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
        prep->payload.data[0] = NULL;
 
        if (zap)
-               kfree_rcu(zap, rcu);
+               call_rcu(&zap->rcu, user_free_payload_rcu);
        return ret;
 }
 EXPORT_SYMBOL_GPL(user_update);
@@ -130,7 +138,7 @@ void user_revoke(struct key *key)
 
        if (upayload) {
                rcu_assign_keypointer(key, NULL);
-               kfree_rcu(upayload, rcu);
+               call_rcu(&upayload->rcu, user_free_payload_rcu);
        }
 }
 
@@ -143,7 +151,7 @@ void user_destroy(struct key *key)
 {
        struct user_key_payload *upayload = key->payload.data[0];
 
-       kfree(upayload);
+       kzfree(upayload);
 }
 
 EXPORT_SYMBOL_GPL(user_destroy);
index 2f836ca09860e83995630de7be6546731523489a..cd67d1c12cf1ca9a32daa4de797dc0a5ec7bbb86 100644 (file)
@@ -1618,6 +1618,7 @@ static int snd_timer_user_tselect(struct file *file,
        if (err < 0)
                goto __err;
 
+       tu->qhead = tu->qtail = tu->qused = 0;
        kfree(tu->queue);
        tu->queue = NULL;
        kfree(tu->tqueue);
@@ -1959,6 +1960,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
        tu = file->private_data;
        unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
+       mutex_lock(&tu->ioctl_lock);
        spin_lock_irq(&tu->qlock);
        while ((long)count - result >= unit) {
                while (!tu->qused) {
@@ -1974,7 +1976,9 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                        add_wait_queue(&tu->qchange_sleep, &wait);
 
                        spin_unlock_irq(&tu->qlock);
+                       mutex_unlock(&tu->ioctl_lock);
                        schedule();
+                       mutex_lock(&tu->ioctl_lock);
                        spin_lock_irq(&tu->qlock);
 
                        remove_wait_queue(&tu->qchange_sleep, &wait);
@@ -1994,7 +1998,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
-               mutex_lock(&tu->ioctl_lock);
                if (tu->tread) {
                        if (copy_to_user(buffer, &tu->tqueue[qhead],
                                         sizeof(struct snd_timer_tread)))
@@ -2004,7 +2007,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                                         sizeof(struct snd_timer_read)))
                                err = -EFAULT;
                }
-               mutex_unlock(&tu->ioctl_lock);
 
                spin_lock_irq(&tu->qlock);
                if (err < 0)
@@ -2014,6 +2016,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
        }
  _error:
        spin_unlock_irq(&tu->qlock);
+       mutex_unlock(&tu->ioctl_lock);
        return result > 0 ? result : err;
 }
 
index a57988d617e934847bff6b56f08b64b813071875..cbeebc0a9711e8283090762450525a84022db600 100644 (file)
@@ -5854,7 +5854,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
@@ -5862,13 +5866,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+       SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
-       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
index 7ae46c2647d453bcad1176b6877fcbbae110416b..b7ef8c59b49a2bdb2895f2203e0c207170227c6f 100644 (file)
@@ -301,6 +301,14 @@ static int atmel_classd_codec_probe(struct snd_soc_codec *codec)
        return 0;
 }
 
+static int atmel_classd_codec_resume(struct snd_soc_codec *codec)
+{
+       struct snd_soc_card *card = snd_soc_codec_get_drvdata(codec);
+       struct atmel_classd *dd = snd_soc_card_get_drvdata(card);
+
+       return regcache_sync(dd->regmap);
+}
+
 static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 {
        return dev_get_regmap(dev, NULL);
@@ -308,6 +316,7 @@ static struct regmap *atmel_classd_codec_get_remap(struct device *dev)
 
 static struct snd_soc_codec_driver soc_codec_dev_classd = {
        .probe          = atmel_classd_codec_probe,
+       .resume         = atmel_classd_codec_resume,
        .get_regmap     = atmel_classd_codec_get_remap,
        .component_driver = {
                .controls               = atmel_classd_snd_controls,
index 6dd7578f0bb8da118adfdb6da76e3247289bfc32..024d83fa6a7f78b81da8b55c8dbdec977871ef46 100644 (file)
@@ -772,7 +772,7 @@ static int da7213_dai_event(struct snd_soc_dapm_widget *w,
                                ++i;
                                msleep(50);
                        }
-               } while ((i < DA7213_SRM_CHECK_RETRIES) & (!srm_lock));
+               } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock));
 
                if (!srm_lock)
                        dev_warn(codec->dev, "SRM failed to lock\n");
index 9c365a7f758dbb9f1227c726f148b19672402f7c..7899a2cdeb42f46c5d76cd051319a4b547b1ed04 100644 (file)
@@ -1108,6 +1108,13 @@ static const struct dmi_system_id force_combo_jack_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Kabylake Client platform")
                }
        },
+       {
+               .ident = "Thinkpad Helix 2nd",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix 2nd")
+               }
+       },
 
        { }
 };
index 2c9dedab5184ff74909caf163f7d0697d8b67949..bc136d2bd7cdeb68b5ca7a24db94a193ede323b3 100644 (file)
@@ -202,7 +202,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
        if (ret < 0)
                return ret;
 
-       ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX);
+       ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX);
        if (ret < 0)
                return ret;
 
index 58c525096a7cbcd6ea4fd833d06e03a0127201fe..498b15345b1a657d608a3fcff773dae206e819b0 100644 (file)
@@ -413,8 +413,11 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
        u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK;
        u64 *ipc_header = (u64 *)(&header);
        struct skl_sst *skl = container_of(ipc, struct skl_sst, ipc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&ipc->dsp->spinlock, flags);
        msg = skl_ipc_reply_get_msg(ipc, *ipc_header);
+       spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
        if (msg == NULL) {
                dev_dbg(ipc->dev, "ipc: rx list is empty\n");
                return;
@@ -456,8 +459,10 @@ static void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
                }
        }
 
+       spin_lock_irqsave(&ipc->dsp->spinlock, flags);
        list_del(&msg->list);
        sst_ipc_tx_msg_reply_complete(ipc, msg);
+       spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 }
 
 irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
index 3a99712e44a80df81f7ad27d52e69501f617e948..64a0f8ed33e135eb5c0af683624afa3ebdb616b9 100644 (file)
@@ -2502,7 +2502,7 @@ static int skl_tplg_get_manifest_tkn(struct device *dev,
 
                        if (ret < 0)
                                return ret;
-                       tkn_count += ret;
+                       tkn_count = ret;
 
                        tuple_size += tkn_count *
                                sizeof(struct snd_soc_tplg_vendor_string_elem);
index 6df3b317a4768e008b539f0a619308e9545a3992..4c9b5781282bb149e8d749b32f2a1bd5c2987338 100644 (file)
@@ -410,7 +410,7 @@ static int skl_free(struct hdac_ext_bus *ebus)
        struct skl *skl  = ebus_to_skl(ebus);
        struct hdac_bus *bus = ebus_to_hbus(ebus);
 
-       skl->init_failed = 1; /* to be sure */
+       skl->init_done = 0; /* to be sure */
 
        snd_hdac_ext_stop_streams(ebus);
 
@@ -428,8 +428,10 @@ static int skl_free(struct hdac_ext_bus *ebus)
 
        snd_hdac_ext_bus_exit(ebus);
 
+       cancel_work_sync(&skl->probe_work);
        if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
                snd_hdac_i915_exit(&ebus->bus);
+
        return 0;
 }
 
@@ -566,6 +568,84 @@ static const struct hdac_bus_ops bus_core_ops = {
        .get_response = snd_hdac_bus_get_response,
 };
 
+static int skl_i915_init(struct hdac_bus *bus)
+{
+       int err;
+
+       /*
+        * The HDMI codec is in GPU so we need to ensure that it is powered
+        * up and ready for probe
+        */
+       err = snd_hdac_i915_init(bus);
+       if (err < 0)
+               return err;
+
+       err = snd_hdac_display_power(bus, true);
+       if (err < 0)
+               dev_err(bus->dev, "Cannot turn on display power on i915\n");
+
+       return err;
+}
+
+static void skl_probe_work(struct work_struct *work)
+{
+       struct skl *skl = container_of(work, struct skl, probe_work);
+       struct hdac_ext_bus *ebus = &skl->ebus;
+       struct hdac_bus *bus = ebus_to_hbus(ebus);
+       struct hdac_ext_link *hlink = NULL;
+       int err;
+
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               err = skl_i915_init(bus);
+               if (err < 0)
+                       return;
+       }
+
+       err = skl_init_chip(bus, true);
+       if (err < 0) {
+               dev_err(bus->dev, "Init chip failed with err: %d\n", err);
+               goto out_err;
+       }
+
+       /* codec detection */
+       if (!bus->codec_mask)
+               dev_info(bus->dev, "no hda codecs found!\n");
+
+       /* create codec instances */
+       err = skl_codec_create(ebus);
+       if (err < 0)
+               goto out_err;
+
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+               err = snd_hdac_display_power(bus, false);
+               if (err < 0) {
+                       dev_err(bus->dev, "Cannot turn off display power on i915\n");
+                       return;
+               }
+       }
+
+       /* register platform dai and controls */
+       err = skl_platform_register(bus->dev);
+       if (err < 0)
+               return;
+       /*
+        * we are done probing so decrement link counts
+        */
+       list_for_each_entry(hlink, &ebus->hlink_list, list)
+               snd_hdac_ext_bus_link_put(ebus, hlink);
+
+       /* configure PM */
+       pm_runtime_put_noidle(bus->dev);
+       pm_runtime_allow(bus->dev);
+       skl->init_done = 1;
+
+       return;
+
+out_err:
+       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+               err = snd_hdac_display_power(bus, false);
+}
+
 /*
  * constructor
  */
@@ -593,6 +673,7 @@ static int skl_create(struct pci_dev *pci,
        snd_hdac_ext_bus_init(ebus, &pci->dev, &bus_core_ops, io_ops);
        ebus->bus.use_posbuf = 1;
        skl->pci = pci;
+       INIT_WORK(&skl->probe_work, skl_probe_work);
 
        ebus->bus.bdl_pos_adj = 0;
 
@@ -601,27 +682,6 @@ static int skl_create(struct pci_dev *pci,
        return 0;
 }
 
-static int skl_i915_init(struct hdac_bus *bus)
-{
-       int err;
-
-       /*
-        * The HDMI codec is in GPU so we need to ensure that it is powered
-        * up and ready for probe
-        */
-       err = snd_hdac_i915_init(bus);
-       if (err < 0)
-               return err;
-
-       err = snd_hdac_display_power(bus, true);
-       if (err < 0) {
-               dev_err(bus->dev, "Cannot turn on display power on i915\n");
-               return err;
-       }
-
-       return err;
-}
-
 static int skl_first_init(struct hdac_ext_bus *ebus)
 {
        struct skl *skl = ebus_to_skl(ebus);
@@ -684,20 +744,7 @@ static int skl_first_init(struct hdac_ext_bus *ebus)
        /* initialize chip */
        skl_init_pci(skl);
 
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-               err = skl_i915_init(bus);
-               if (err < 0)
-                       return err;
-       }
-
-       skl_init_chip(bus, true);
-
-       /* codec detection */
-       if (!bus->codec_mask) {
-               dev_info(bus->dev, "no hda codecs found!\n");
-       }
-
-       return 0;
+       return skl_init_chip(bus, true);
 }
 
 static int skl_probe(struct pci_dev *pci,
@@ -706,7 +753,6 @@ static int skl_probe(struct pci_dev *pci,
        struct skl *skl;
        struct hdac_ext_bus *ebus = NULL;
        struct hdac_bus *bus = NULL;
-       struct hdac_ext_link *hlink = NULL;
        int err;
 
        /* we use ext core ops, so provide NULL for ops here */
@@ -729,7 +775,7 @@ static int skl_probe(struct pci_dev *pci,
 
        if (skl->nhlt == NULL) {
                err = -ENODEV;
-               goto out_display_power_off;
+               goto out_free;
        }
 
        err = skl_nhlt_create_sysfs(skl);
@@ -760,56 +806,24 @@ static int skl_probe(struct pci_dev *pci,
        if (bus->mlcap)
                snd_hdac_ext_bus_get_ml_capabilities(ebus);
 
+       snd_hdac_bus_stop_chip(bus);
+
        /* create device for soc dmic */
        err = skl_dmic_device_register(skl);
        if (err < 0)
                goto out_dsp_free;
 
-       /* register platform dai and controls */
-       err = skl_platform_register(bus->dev);
-       if (err < 0)
-               goto out_dmic_free;
-
-       /* create codec instances */
-       err = skl_codec_create(ebus);
-       if (err < 0)
-               goto out_unregister;
-
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
-               err = snd_hdac_display_power(bus, false);
-               if (err < 0) {
-                       dev_err(bus->dev, "Cannot turn off display power on i915\n");
-                       return err;
-               }
-       }
-
-       /*
-        * we are done probling so decrement link counts
-        */
-       list_for_each_entry(hlink, &ebus->hlink_list, list)
-               snd_hdac_ext_bus_link_put(ebus, hlink);
-
-       /* configure PM */
-       pm_runtime_put_noidle(bus->dev);
-       pm_runtime_allow(bus->dev);
+       schedule_work(&skl->probe_work);
 
        return 0;
 
-out_unregister:
-       skl_platform_unregister(bus->dev);
-out_dmic_free:
-       skl_dmic_device_unregister(skl);
 out_dsp_free:
        skl_free_dsp(skl);
 out_mach_free:
        skl_machine_device_unregister(skl);
 out_nhlt_free:
        skl_nhlt_free(skl->nhlt);
-out_display_power_off:
-       if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
-               snd_hdac_display_power(bus, false);
 out_free:
-       skl->init_failed = 1;
        skl_free(ebus);
 
        return err;
@@ -828,7 +842,7 @@ static void skl_shutdown(struct pci_dev *pci)
 
        skl = ebus_to_skl(ebus);
 
-       if (skl->init_failed)
+       if (!skl->init_done)
                return;
 
        snd_hdac_ext_stop_streams(ebus);
index a454f6035f3e64b3be01ea4c53153c141e3a9561..2a630fcb7f088c1d548f06de31933ca662eafca2 100644 (file)
@@ -46,7 +46,7 @@ struct skl {
        struct hdac_ext_bus ebus;
        struct pci_dev *pci;
 
-       unsigned int init_failed:1; /* delayed init failed */
+       unsigned int init_done:1; /* delayed init status */
        struct platform_device *dmic_dev;
        struct platform_device *i2s_dev;
        struct snd_soc_platform *platform;
@@ -64,6 +64,8 @@ struct skl {
        const struct firmware *tplg;
 
        int supend_active;
+
+       struct work_struct probe_work;
 };
 
 #define skl_to_ebus(s) (&(s)->ebus)
index 66203d107a11e5ff17e150adb145b15a0d27934e..d3b0dc145a560c8a35ddc4320810039f481db5aa 100644 (file)
@@ -507,7 +507,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                                rbga = rbgx;
                                adg->rbga_rate_for_441khz = rate / div;
                                ckr |= brg_table[i] << 20;
-                               if (req_441kHz_rate)
+                               if (req_441kHz_rate &&
+                                   !(adg_mode_flags(adg) & AUDIO_OUT_48))
                                        parent_clk_name = __clk_get_name(clk);
                        }
                }
@@ -522,7 +523,8 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                                rbgb = rbgx;
                                adg->rbgb_rate_for_48khz = rate / div;
                                ckr |= brg_table[i] << 16;
-                               if (req_48kHz_rate)
+                               if (req_48kHz_rate &&
+                                   (adg_mode_flags(adg) & AUDIO_OUT_48))
                                        parent_clk_name = __clk_get_name(clk);
                        }
                }
index 7d92a24b7cfa558afbb8331401c974c59d5f1ae5..d879c010cf03c4607ebdab3c854a582d816ddf62 100644 (file)
@@ -89,6 +89,7 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
        dev_dbg(dev, "ctu/mix path = 0x%08x", data);
 
        rsnd_mod_write(mod, CMD_ROUTE_SLCT, data);
+       rsnd_mod_write(mod, CMD_BUSIF_MODE, rsnd_get_busif_shift(io, mod) | 1);
        rsnd_mod_write(mod, CMD_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
        rsnd_adg_set_cmd_timsel_gen2(mod, io);
index 1744015408c38f2ad530fbcbae9a22027f5f0828..8c1f4e2e0c4fb8c3ac09a641b4b8a928defb871c 100644 (file)
@@ -343,6 +343,57 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
                return 0x76543210;
 }
 
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
+{
+       enum rsnd_mod_type playback_mods[] = {
+               RSND_MOD_SRC,
+               RSND_MOD_CMD,
+               RSND_MOD_SSIU,
+       };
+       enum rsnd_mod_type capture_mods[] = {
+               RSND_MOD_CMD,
+               RSND_MOD_SRC,
+               RSND_MOD_SSIU,
+       };
+       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       struct rsnd_mod *tmod = NULL;
+       enum rsnd_mod_type *mods =
+               rsnd_io_is_play(io) ?
+               playback_mods : capture_mods;
+       int i;
+
+       /*
+        * This is needed for 24bit data
+        * We need to shift 8bit
+        *
+        * Linux 24bit data is located as 0x00******
+        * HW    24bit data is located as 0x******00
+        *
+        */
+       switch (runtime->sample_bits) {
+       case 16:
+               return 0;
+       case 32:
+               break;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
+               tmod = rsnd_io_to_mod(io, mods[i]);
+               if (tmod)
+                       break;
+       }
+
+       if (tmod != mod)
+               return 0;
+
+       if (rsnd_io_is_play(io))
+               return  (0 << 20) | /* shift to Left */
+                       (8 << 16);  /* 8bit */
+       else
+               return  (1 << 20) | /* shift to Right */
+                       (8 << 16);  /* 8bit */
+}
+
 /*
  *     rsnd_dai functions
  */
index 63b6d3c28021024b1f06278c5c4f217a394faf8c..4b0980728e13ec75f18ac07135ab4971290b5310 100644 (file)
@@ -236,6 +236,7 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
                RSND_GEN_M_REG(SRC_ROUTE_MODE0, 0xc,    0x20),
                RSND_GEN_M_REG(SRC_CTRL,        0x10,   0x20),
                RSND_GEN_M_REG(SRC_INT_ENABLE0, 0x18,   0x20),
+               RSND_GEN_M_REG(CMD_BUSIF_MODE,  0x184,  0x20),
                RSND_GEN_M_REG(CMD_BUSIF_DALIGN,0x188,  0x20),
                RSND_GEN_M_REG(CMD_ROUTE_SLCT,  0x18c,  0x20),
                RSND_GEN_M_REG(CMD_CTRL,        0x190,  0x20),
index dbf4163427e808d62dbc37aa62f64b482e65e7c4..323af41ecfcb8ffea222f25fbe6c524968f29fd5 100644 (file)
@@ -73,6 +73,7 @@ enum rsnd_reg {
        RSND_REG_SCU_SYS_INT_EN0,
        RSND_REG_SCU_SYS_INT_EN1,
        RSND_REG_CMD_CTRL,
+       RSND_REG_CMD_BUSIF_MODE,
        RSND_REG_CMD_BUSIF_DALIGN,
        RSND_REG_CMD_ROUTE_SLCT,
        RSND_REG_CMDOUT_TIMSEL,
@@ -204,6 +205,7 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
                    u32 mask, u32 data);
 u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
 u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
+u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
 
 /*
  *     R-Car DMA
index 20b5b2ec625ea7b1e1812ea83d07d35b48b948ea..76a477a3ccb5d88e18fd8398d9ad2b2616a99f48 100644 (file)
@@ -190,11 +190,13 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+       int is_play = rsnd_io_is_play(io);
        int use_src = 0;
        u32 fin, fout;
        u32 ifscr, fsrate, adinr;
        u32 cr, route;
        u32 bsdsr, bsisr;
+       u32 i_busif, o_busif, tmp;
        uint ratio;
 
        if (!runtime)
@@ -270,6 +272,11 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
                break;
        }
 
+       /* BUSIF_MODE */
+       tmp = rsnd_get_busif_shift(io, mod);
+       i_busif = ( is_play ? tmp : 0) | 1;
+       o_busif = (!is_play ? tmp : 0) | 1;
+
        rsnd_mod_write(mod, SRC_ROUTE_MODE0, route);
 
        rsnd_mod_write(mod, SRC_SRCIR, 1);      /* initialize */
@@ -281,8 +288,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
        rsnd_mod_write(mod, SRC_BSISR, bsisr);
        rsnd_mod_write(mod, SRC_SRCIR, 0);      /* cancel initialize */
 
-       rsnd_mod_write(mod, SRC_I_BUSIF_MODE, 1);
-       rsnd_mod_write(mod, SRC_O_BUSIF_MODE, 1);
+       rsnd_mod_write(mod, SRC_I_BUSIF_MODE, i_busif);
+       rsnd_mod_write(mod, SRC_O_BUSIF_MODE, o_busif);
+
        rsnd_mod_write(mod, SRC_BUSIF_DALIGN, rsnd_get_dalign(mod, io));
 
        rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout);
index 135c5669f7963bd228c9a1187bc6f5dfd13bc04e..91e5c07911b4a5b14364becf64c568d7a61cc1c4 100644 (file)
@@ -302,7 +302,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
         * always use 32bit system word.
         * see also rsnd_ssi_master_clk_enable()
         */
-       cr_own = FORCE | SWL_32 | PDTA;
+       cr_own = FORCE | SWL_32;
 
        if (rdai->bit_clk_inv)
                cr_own |= SCKP;
@@ -550,6 +550,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
                u32 *buf = (u32 *)(runtime->dma_area +
                                   rsnd_dai_pointer_offset(io, 0));
+               int shift = 0;
+
+               switch (runtime->sample_bits) {
+               case 32:
+                       shift = 8;
+                       break;
+               }
 
                /*
                 * 8/16/32 data can be assesse to TDR/RDR register
@@ -557,9 +564,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                 * see rsnd_ssi_init()
                 */
                if (rsnd_io_is_play(io))
-                       rsnd_mod_write(mod, SSITDR, *buf);
+                       rsnd_mod_write(mod, SSITDR, (*buf) << shift);
                else
-                       *buf = rsnd_mod_read(mod, SSIRDR);
+                       *buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
 
                elapsed = rsnd_dai_pointer_update(io, sizeof(*buf));
        }
@@ -709,6 +716,11 @@ static int rsnd_ssi_dma_remove(struct rsnd_mod *mod,
                               struct rsnd_priv *priv)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       struct rsnd_mod *ssi_parent_mod = rsnd_io_to_mod_ssip(io);
+
+       /* Do nothing for SSI parent mod */
+       if (ssi_parent_mod == mod)
+               return 0;
 
        /* PIO will request IRQ again */
        free_irq(ssi->irq, mod);
index 14fafdaf1395f9737191df18599ee58fc4f858fd..512d238b79e2895f13a4b1b7be3e145859a65280 100644 (file)
@@ -144,7 +144,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
                               (rsnd_io_is_play(io) ?
                                rsnd_runtime_channel_after_ctu(io) :
                                rsnd_runtime_channel_original(io)));
-               rsnd_mod_write(mod, SSI_BUSIF_MODE,  1);
+               rsnd_mod_write(mod, SSI_BUSIF_MODE,
+                              rsnd_get_busif_shift(io, mod) | 1);
                rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
                               rsnd_get_dalign(mod, io));
        }
index aae099c0e50280d67f153d6769ac4237d531e169..754e3ef8d7ae1b8b188c3e52986f2c306fb7b763 100644 (file)
@@ -2286,6 +2286,9 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        list_for_each_entry(rtd, &card->rtd_list, list)
                flush_delayed_work(&rtd->delayed_work);
 
+       /* free the ALSA card at first; this syncs with pending operations */
+       snd_card_free(card->snd_card);
+
        /* remove and free each DAI */
        soc_remove_dai_links(card);
        soc_remove_pcm_runtimes(card);
@@ -2300,9 +2303,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
        if (card->remove)
                card->remove(card);
 
-       snd_card_free(card->snd_card);
        return 0;
-
 }
 
 /* removes a socdev */
index e6c9902c6d82b0e2535059c1a915356e84856589..165c2b1d43177b7df8870d85d7911a1a615ce7e7 100644 (file)
@@ -240,9 +240,13 @@ Add a probe on schedule() function 12th line with recording cpu local variable:
  or
  ./perf probe --add='schedule:12 cpu'
 
- this will add one or more probes which has the name start with "schedule".
+Add one or more probes which has the name start with "schedule".
 
- Add probes on lines in schedule() function which calls update_rq_clock().
+ ./perf probe schedule*
+ or
+ ./perf probe --add='schedule*'
+
+Add probes on lines in schedule() function which calls update_rq_clock().
 
  ./perf probe 'schedule;update_rq_clock*'
  or
index dfbb506d2c349744399637c78148fe6446b124bc..142606c0ec9c1dcbb9329f23cb9b9bc65085c419 100644 (file)
@@ -39,7 +39,7 @@ EVENT HANDLERS
 When perf script is invoked using a trace script, a user-defined
 'handler function' is called for each event in the trace.  If there's
 no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
 next event is processed.
 
 Most of the event's field values are passed as arguments to the
index 54acba22155865a8fafc7629b9e3987c061fed47..51ec2d20068ad5df7b0a00a0983b5ec08d49979a 100644 (file)
@@ -149,10 +149,8 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
                print "id=%d, args=%s\n" % \
                (id, args),
 
-def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
-               common_pid, common_comm):
-               print_header(event_name, common_cpu, common_secs, common_nsecs,
-               common_pid, common_comm)
+def trace_unhandled(event_name, context, event_fields_dict):
+               print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
 
 def print_header(event_name, cpu, secs, nsecs, pid, comm):
        print "%-20s %5u %05u.%09u %8u %-20s " % \
@@ -321,7 +319,7 @@ So those are the essential steps in writing and running a script.  The
 process can be generalized to any tracepoint or set of tracepoints
 you're interested in - basically find the tracepoint(s) you're
 interested in by looking at the list of available events shown by
-'perf list' and/or look in /sys/kernel/debug/tracing events for
+'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
 detailed event and field info, record the corresponding trace data
 using 'perf record', passing it the list of interesting events,
 generate a skeleton script using 'perf script -g python' and modify the
@@ -334,7 +332,7 @@ right place, you can have your script listed alongside the other
 scripts listed by the 'perf script -l' command e.g.:
 
 ----
-root@tropicana:~# perf script -l
+# perf script -l
 List of available trace scripts:
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
@@ -383,8 +381,6 @@ source tree:
 
 ----
 # ls -al kernel-source/tools/perf/scripts/python
-
-root@tropicana:/home/trz/src/tip# ls -al tools/perf/scripts/python
 total 32
 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 .
 drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 ..
@@ -399,7 +395,7 @@ otherwise your script won't show up at run-time), 'perf script -l'
 should show a new entry for your script:
 
 ----
-root@tropicana:~# perf script -l
+# perf script -l
 List of available trace scripts:
   wakeup-latency                       system-wide min/max/avg wakeup latency
   rw-by-file <comm>                    r/w activity for a program, by file
@@ -437,7 +433,7 @@ EVENT HANDLERS
 When perf script is invoked using a trace script, a user-defined
 'handler function' is called for each event in the trace.  If there's
 no handler function defined for a given event type, the event is
-ignored (or passed to a 'trace_handled' function, see below) and the
+ignored (or passed to a 'trace_unhandled' function, see below) and the
 next event is processed.
 
 Most of the event's field values are passed as arguments to the
@@ -532,7 +528,7 @@ can implement a set of optional functions:
 gives scripts a chance to do setup tasks:
 
 ----
-def trace_begin:
+def trace_begin():
     pass
 ----
 
@@ -541,7 +537,7 @@ def trace_begin:
  as display results:
 
 ----
-def trace_end:
+def trace_end():
     pass
 ----
 
@@ -550,8 +546,7 @@ def trace_end:
  of common arguments are passed into it:
 
 ----
-def trace_unhandled(event_name, context, common_cpu, common_secs,
-        common_nsecs, common_pid, common_comm):
+def trace_unhandled(event_name, context, event_fields_dict):
     pass
 ----
 
index 837067f48a4c54a88b883e8de6dc95df0dd5b3d0..6b40e9f017404f87877668645f7afa8f28649788 100644 (file)
@@ -26,6 +26,7 @@ const char *const arm64_triplets[] = {
 
 const char *const powerpc_triplets[] = {
        "powerpc-unknown-linux-gnu-",
+       "powerpc-linux-gnu-",
        "powerpc64-unknown-linux-gnu-",
        "powerpc64-linux-gnu-",
        "powerpc64le-linux-gnu-",
index a935b502373253217d6f43680e6b88a6ab143bfe..ad9324d1daf9f29a990a0d8f903563873ac91ef9 100644 (file)
@@ -1578,6 +1578,7 @@ static void print_header(int argc, const char **argv)
 static void print_footer(void)
 {
        FILE *output = stat_config.output;
+       int n;
 
        if (!null_run)
                fprintf(output, "\n");
@@ -1590,7 +1591,9 @@ static void print_footer(void)
        }
        fprintf(output, "\n\n");
 
-       if (print_free_counters_hint)
+       if (print_free_counters_hint &&
+           sysctl__read_int("kernel/nmi_watchdog", &n) >= 0 &&
+           n > 0)
                fprintf(output,
 "Some events weren't counted. Try disabling the NMI watchdog:\n"
 "      echo 0 > /proc/sys/kernel/nmi_watchdog\n"
index d014350adc526722da3a12f29421cd6d3c00f4af..4b2a5d2981970baf86e4e458ef4ced256fc98c0e 100644 (file)
@@ -681,6 +681,10 @@ static struct syscall_fmt {
        { .name     = "mlockall",   .errmsg = true,
          .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
        { .name     = "mmap",       .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+       .alias = "old_mmap",
+#endif
          .arg_scnprintf = { [0] = SCA_HEX,       /* addr */
                             [2] = SCA_MMAP_PROT, /* prot */
                             [3] = SCA_MMAP_FLAGS, /* flags */ }, },
index e7664fe3bd33739fd92be2579c30102e481e8f03..8ba2c4618fe90231d1157e8218bf10a2cb82f6a0 100644 (file)
@@ -288,3 +288,17 @@ int test__bp_signal(int subtest __maybe_unused)
        return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
                TEST_OK : TEST_FAIL;
 }
+
+bool test__bp_signal_is_supported(void)
+{
+/*
+ * The powerpc so far does not have support to even create
+ * instruction breakpoint using the perf event interface.
+ * Once it's there we can release this.
+ */
+#ifdef __powerpc__
+       return false;
+#else
+       return true;
+#endif
+}
index 9e08d297f1a905f57554bf6fd7b9555e980ad10a..3ccfd58a8c3cf3e8b16cc513a67324b8f11eae7b 100644 (file)
@@ -97,10 +97,12 @@ static struct test generic_tests[] = {
        {
                .desc = "Breakpoint overflow signal handler",
                .func = test__bp_signal,
+               .is_supported = test__bp_signal_is_supported,
        },
        {
                .desc = "Breakpoint overflow sampling",
                .func = test__bp_signal_overflow,
+               .is_supported = test__bp_signal_is_supported,
        },
        {
                .desc = "Number of exit events of a simple workload",
@@ -401,6 +403,11 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
                if (!perf_test__matches(t, curr, argc, argv))
                        continue;
 
+               if (t->is_supported && !t->is_supported()) {
+                       pr_debug("%2d: %-*s: Disabled\n", i, width, t->desc);
+                       continue;
+               }
+
                pr_info("%2d: %-*s:", i, width, t->desc);
 
                if (intlist__find(skiplist, i)) {
index 1f14e7612cbb1615e993c73758a3c9ac7024a0e1..94b7c7b02bdefbb33f2987677c11b3577ab062ec 100644 (file)
@@ -229,6 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
        unsigned char buf2[BUFSZ];
        size_t ret_len;
        u64 objdump_addr;
+       const char *objdump_name;
+       char decomp_name[KMOD_DECOMP_LEN];
        int ret;
 
        pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -289,9 +291,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
                state->done[state->done_cnt++] = al.map->start;
        }
 
+       objdump_name = al.map->dso->long_name;
+       if (dso__needs_decompress(al.map->dso)) {
+               if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
+                                                decomp_name,
+                                                sizeof(decomp_name)) < 0) {
+                       pr_debug("decompression failed\n");
+                       return -1;
+               }
+
+               objdump_name = decomp_name;
+       }
+
        /* Read the object code using objdump */
        objdump_addr = map__rip_2objdump(al.map, al.addr);
-       ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
+       ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
+
+       if (dso__needs_decompress(al.map->dso))
+               unlink(objdump_name);
+
        if (ret > 0) {
                /*
                 * The kernel maps are inaccurate - assume objdump is right in
index 6318596294032602b2858acf42c2aa80993fdac9..577363809c9b1b54731f7e80b291278bf2764e78 100644 (file)
@@ -34,6 +34,7 @@ struct test {
                int (*get_nr)(void);
                const char *(*get_desc)(int subtest);
        } subtest;
+       bool (*is_supported)(void);
 };
 
 /* Tests */
@@ -99,6 +100,8 @@ const char *test__clang_subtest_get_desc(int subtest);
 int test__clang_subtest_get_nr(void);
 int test__unit_number__scnprint(int subtest);
 
+bool test__bp_signal_is_supported(void);
+
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
index 683f8340460c1777f82f35860b3bb581a07f222a..ddbd56df91878884a4de5da2a29aae991e0689a0 100644 (file)
@@ -239,10 +239,20 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
        const char *s = strchr(ops->raw, '+');
        const char *c = strchr(ops->raw, ',');
 
-       if (c++ != NULL)
+       /*
+        * skip over possible up to 2 operands to get to address, e.g.:
+        * tbnz  w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
+        */
+       if (c++ != NULL) {
                ops->target.addr = strtoull(c, NULL, 16);
-       else
+               if (!ops->target.addr) {
+                       c = strchr(c, ',');
+                       if (c++ != NULL)
+                               ops->target.addr = strtoull(c, NULL, 16);
+               }
+       } else {
                ops->target.addr = strtoull(ops->raw, NULL, 16);
+       }
 
        if (s++ != NULL) {
                ops->target.offset = strtoull(s, NULL, 16);
@@ -257,10 +267,27 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
 static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                           struct ins_operands *ops)
 {
+       const char *c = strchr(ops->raw, ',');
+
        if (!ops->target.addr || ops->target.offset < 0)
                return ins__raw_scnprintf(ins, bf, size, ops);
 
-       return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset);
+       if (c != NULL) {
+               const char *c2 = strchr(c + 1, ',');
+
+               /* check for 3-op insn */
+               if (c2 != NULL)
+                       c = c2;
+               c++;
+
+               /* mirror arch objdump's space-after-comma style */
+               if (*c == ' ')
+                       c++;
+       }
+
+       return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64,
+                        ins->name, c ? c - ops->raw : 0, ops->raw,
+                        ops->target.offset);
 }
 
 static struct ins_ops jump_ops = {
@@ -1294,6 +1321,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
        char linkname[PATH_MAX];
        char *build_id_filename;
        char *build_id_path = NULL;
+       char *pos;
 
        if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
            !dso__is_kcore(dso))
@@ -1313,7 +1341,14 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
        if (!build_id_path)
                return -1;
 
-       dirname(build_id_path);
+       /*
+        * old style build-id cache has name of XX/XXXXXXX.. while
+        * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
+        * extract the build-id part of dirname in the new style only.
+        */
+       pos = strrchr(build_id_path, '/');
+       if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
+               dirname(build_id_path);
 
        if (dso__is_kcore(dso) ||
            readlink(build_id_path, linkname, sizeof(linkname)) < 0 ||
@@ -1396,31 +1431,10 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
                                sizeof(symfs_filename));
                }
        } else if (dso__needs_decompress(dso)) {
-               char tmp[PATH_MAX];
-               struct kmod_path m;
-               int fd;
-               bool ret;
-
-               if (kmod_path__parse_ext(&m, symfs_filename))
-                       goto out;
-
-               snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX");
-
-               fd = mkstemp(tmp);
-               if (fd < 0) {
-                       free(m.ext);
-                       goto out;
-               }
-
-               ret = decompress_to_file(m.ext, symfs_filename, fd);
-
-               if (ret)
-                       pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename);
-
-               free(m.ext);
-               close(fd);
+               char tmp[KMOD_DECOMP_LEN];
 
-               if (!ret)
+               if (dso__decompress_kmodule_path(dso, symfs_filename,
+                                                tmp, sizeof(tmp)) < 0)
                        goto out;
 
                strcpy(symfs_filename, tmp);
@@ -1429,7 +1443,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_na
        snprintf(command, sizeof(command),
                 "%s %s%s --start-address=0x%016" PRIx64
                 " --stop-address=0x%016" PRIx64
-                " -l -d %s %s -C %s 2>/dev/null|grep -v %s:|expand",
+                " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
                 objdump_path ? objdump_path : "objdump",
                 disassembler_style ? "-M " : "",
                 disassembler_style ? disassembler_style : "",
index 168cc49654e7a4b18f69d177e29373f8a13e3471..e0148b081bdfbb7cead2c118a51ca81bd238e740 100644 (file)
@@ -278,51 +278,6 @@ char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
        return bf;
 }
 
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
-{
-       char *id_name = NULL, *ch;
-       struct stat sb;
-       char sbuild_id[SBUILD_ID_SIZE];
-
-       if (!dso->has_build_id)
-               goto err;
-
-       build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
-       id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
-       if (!id_name)
-               goto err;
-       if (access(id_name, F_OK))
-               goto err;
-       if (lstat(id_name, &sb) == -1)
-               goto err;
-       if ((size_t)sb.st_size > size - 1)
-               goto err;
-       if (readlink(id_name, bf, size - 1) < 0)
-               goto err;
-
-       bf[sb.st_size] = '\0';
-
-       /*
-        * link should be:
-        * ../../lib/modules/4.4.0-rc4/kernel/net/ipv4/netfilter/nf_nat_ipv4.ko/a09fe3eb3147dafa4e3b31dbd6257e4d696bdc92
-        */
-       ch = strrchr(bf, '/');
-       if (!ch)
-               goto err;
-       if (ch - 3 < bf)
-               goto err;
-
-       free(id_name);
-       return strncmp(".ko", ch - 3, 3) == 0;
-err:
-       pr_err("Invalid build id: %s\n", id_name ? :
-                                        dso->long_name ? :
-                                        dso->short_name ? :
-                                        "[unknown]");
-       free(id_name);
-       return false;
-}
-
 #define dsos__for_each_with_build_id(pos, head)        \
        list_for_each_entry(pos, head, node)    \
                if (!pos->has_build_id)         \
index 8a89b195c1fc3a5c36d7ca260dafe9c6b3fa8262..96690a55c62c40394444a8f23df1cd03b840acfe 100644 (file)
@@ -17,7 +17,6 @@ char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
                                    size_t size);
 
 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
-bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
 
 int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
                           struct perf_sample *sample, struct perf_evsel *evsel,
index a96a99d2369f800634025bcdfa9838d1d6bc9d97..4e7ab611377acd56c1be78cbd983058e5ce5142b 100644 (file)
@@ -248,6 +248,64 @@ bool dso__needs_decompress(struct dso *dso)
                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
 }
 
+static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf)
+{
+       int fd = -1;
+       struct kmod_path m;
+
+       if (!dso__needs_decompress(dso))
+               return -1;
+
+       if (kmod_path__parse_ext(&m, dso->long_name))
+               return -1;
+
+       if (!m.comp)
+               goto out;
+
+       fd = mkstemp(tmpbuf);
+       if (fd < 0) {
+               dso->load_errno = errno;
+               goto out;
+       }
+
+       if (!decompress_to_file(m.ext, name, fd)) {
+               dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
+               close(fd);
+               fd = -1;
+       }
+
+out:
+       free(m.ext);
+       return fd;
+}
+
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
+{
+       char tmpbuf[] = KMOD_DECOMP_NAME;
+       int fd;
+
+       fd = decompress_kmodule(dso, name, tmpbuf);
+       unlink(tmpbuf);
+       return fd;
+}
+
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+                                char *pathname, size_t len)
+{
+       char tmpbuf[] = KMOD_DECOMP_NAME;
+       int fd;
+
+       fd = decompress_kmodule(dso, name, tmpbuf);
+       if (fd < 0) {
+               unlink(tmpbuf);
+               return -1;
+       }
+
+       strncpy(pathname, tmpbuf, len);
+       close(fd);
+       return 0;
+}
+
 /*
  * Parses kernel module specified in @path and updates
  * @m argument like:
@@ -335,6 +393,21 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
        return 0;
 }
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+                         struct machine *machine)
+{
+       if (machine__is_host(machine))
+               dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+       else
+               dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+
+       /* _KMODULE_COMP should be next to _KMODULE */
+       if (m->kmod && m->comp)
+               dso->symtab_type++;
+
+       dso__set_short_name(dso, strdup(m->name), true);
+}
+
 /*
  * Global list of open DSOs and the counter.
  */
@@ -381,7 +454,7 @@ static int do_open(char *name)
 
 static int __open_dso(struct dso *dso, struct machine *machine)
 {
-       int fd;
+       int fd = -EINVAL;
        char *root_dir = (char *)"";
        char *name = malloc(PATH_MAX);
 
@@ -392,15 +465,30 @@ static int __open_dso(struct dso *dso, struct machine *machine)
                root_dir = machine->root_dir;
 
        if (dso__read_binary_type_filename(dso, dso->binary_type,
-                                           root_dir, name, PATH_MAX)) {
-               free(name);
-               return -EINVAL;
-       }
+                                           root_dir, name, PATH_MAX))
+               goto out;
 
        if (!is_regular_file(name))
-               return -EINVAL;
+               goto out;
+
+       if (dso__needs_decompress(dso)) {
+               char newpath[KMOD_DECOMP_LEN];
+               size_t len = sizeof(newpath);
+
+               if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
+                       fd = -dso->load_errno;
+                       goto out;
+               }
+
+               strcpy(name, newpath);
+       }
 
        fd = do_open(name);
+
+       if (dso__needs_decompress(dso))
+               unlink(name);
+
+out:
        free(name);
        return fd;
 }
index 12350b17172730adf0dffdf324cb9e0b4a4be2ba..bd061ba7b47cc8eab2ff05e4027dc22990d7c7ef 100644 (file)
@@ -244,6 +244,12 @@ bool is_supported_compression(const char *ext);
 bool is_kernel_module(const char *pathname, int cpumode);
 bool decompress_to_file(const char *ext, const char *filename, int output_fd);
 bool dso__needs_decompress(struct dso *dso);
+int dso__decompress_kmodule_fd(struct dso *dso, const char *name);
+int dso__decompress_kmodule_path(struct dso *dso, const char *name,
+                                char *pathname, size_t len);
+
+#define KMOD_DECOMP_NAME  "/tmp/perf-kmod-XXXXXX"
+#define KMOD_DECOMP_LEN   sizeof(KMOD_DECOMP_NAME)
 
 struct kmod_path {
        char *name;
@@ -259,6 +265,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
 #define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false)
 #define kmod_path__parse_ext(__m, __p)  __kmod_path__parse(__m, __p, false, true)
 
+void dso__set_module_info(struct dso *dso, struct kmod_path *m,
+                         struct machine *machine);
+
 /*
  * The dso__data_* external interface provides following functions:
  *   dso__data_get_fd
index 314a07151fb772377752dae62658b79ffdc87cd6..5cac8d5e009a88ff096d9e2f8026e39e8567c595 100644 (file)
@@ -1469,8 +1469,16 @@ static int __event_process_build_id(struct build_id_event *bev,
 
                dso__set_build_id(dso, &bev->build_id);
 
-               if (!is_kernel_module(filename, cpumode))
-                       dso->kernel = dso_type;
+               if (dso_type != DSO_TYPE_USER) {
+                       struct kmod_path m = { .name = NULL, };
+
+                       if (!kmod_path__parse_name(&m, filename) && m.kmod)
+                               dso__set_module_info(dso, &m, machine);
+                       else
+                               dso->kernel = dso_type;
+
+                       free(m.name);
+               }
 
                build_id__sprintf(dso->build_id, sizeof(dso->build_id),
                                  sbuild_id);
index d97e014c3df395e51e61da31f9624e12927a43b2..d7f31cb0a4cbeb41e6c02d58323a8848ff82cfc4 100644 (file)
@@ -572,16 +572,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
                if (dso == NULL)
                        goto out_unlock;
 
-               if (machine__is_host(machine))
-                       dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
-               else
-                       dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
-
-               /* _KMODULE_COMP should be next to _KMODULE */
-               if (m->kmod && m->comp)
-                       dso->symtab_type++;
-
-               dso__set_short_name(dso, strdup(m->name), true);
+               dso__set_module_info(dso, m, machine);
                dso__set_long_name(dso, strdup(filename), true);
        }
 
index 9d92af7d07182e662b1a6d7ad5e66c0147a78234..40de3cb40d2100fe918f26795ae29ae702d62665 100644 (file)
@@ -1219,7 +1219,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "# be retrieved using Python functions of the form "
                "common_*(context).\n");
 
-       fprintf(ofp, "# See the perf-trace-python Documentation for the list "
+       fprintf(ofp, "# See the perf-script-python Documentation for the list "
                "of available functions.\n\n");
 
        fprintf(ofp, "import os\n");
index e7ee47f7377ab17bbb414be4240cf01565ae863a..502505cf236af30226b22ff82878098060f19862 100644 (file)
@@ -637,43 +637,6 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
        return 0;
 }
 
-static int decompress_kmodule(struct dso *dso, const char *name,
-                             enum dso_binary_type type)
-{
-       int fd = -1;
-       char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
-       struct kmod_path m;
-
-       if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
-           type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
-           type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
-               return -1;
-
-       if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
-               name = dso->long_name;
-
-       if (kmod_path__parse_ext(&m, name) || !m.comp)
-               return -1;
-
-       fd = mkstemp(tmpbuf);
-       if (fd < 0) {
-               dso->load_errno = errno;
-               goto out;
-       }
-
-       if (!decompress_to_file(m.ext, name, fd)) {
-               dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
-               close(fd);
-               fd = -1;
-       }
-
-       unlink(tmpbuf);
-
-out:
-       free(m.ext);
-       return fd;
-}
-
 bool symsrc__possibly_runtime(struct symsrc *ss)
 {
        return ss->dynsym || ss->opdsec;
@@ -705,9 +668,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
        int fd;
 
        if (dso__needs_decompress(dso)) {
-               fd = decompress_kmodule(dso, name, type);
+               fd = dso__decompress_kmodule_fd(dso, name);
                if (fd < 0)
                        return -1;
+
+               type = dso->symtab_type;
        } else {
                fd = open(name, O_RDONLY);
                if (fd < 0) {
index 8f2b068ff7564900c989d1cdec96757c1b3ab45b..e7a98dbd2aed9133d99f38b71b3cac9fb297e0d6 100644 (file)
@@ -1562,10 +1562,6 @@ int dso__load(struct dso *dso, struct map *map)
        if (!runtime_ss && syms_ss)
                runtime_ss = syms_ss;
 
-       if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
-               if (dso__build_id_is_kmod(dso, name, PATH_MAX))
-                       kmod = true;
-
        if (syms_ss)
                ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
        else
index 943a06291587b064c3649569dd85b356805a7fab..da45c4be5fb3e77ee59131602667b4d675bc3a40 100644 (file)
@@ -39,6 +39,14 @@ static int __report_module(struct addr_location *al, u64 ip,
                return 0;
 
        mod = dwfl_addrmodule(ui->dwfl, ip);
+       if (mod) {
+               Dwarf_Addr s;
+
+               dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+               if (s != al->map->start)
+                       mod = 0;
+       }
+
        if (!mod)
                mod = dwfl_report_elf(ui->dwfl, dso->short_name,
                                      dso->long_name, -1, al->map->start,
@@ -224,7 +232,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
 
        err = dwfl_getthread_frames(ui->dwfl, thread->tid, frame_callback, ui);
 
-       if (err && !ui->max_stack)
+       if (err && ui->max_stack != max_stack)
                err = 0;
 
        /*
index 32c3295929b01b47d2071f372a458fa7b8e5d60d..87940364570bcbff87cfa4447cb555e96612355b 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/kvm_hyp.h>
 
 #define vtr_to_max_lr_idx(v)           ((v) & 0xf)
-#define vtr_to_nr_pre_bits(v)          (((u32)(v) >> 26) + 1)
+#define vtr_to_nr_pre_bits(v)          ((((u32)(v) >> 26) & 7) + 1)
 
 static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
 {
index a2d63247d1bbe00ca2693197c3f1e73963ad1c62..e2e5effba2a999577edb338fdc0c2cf5c9a1e63f 100644 (file)
@@ -879,6 +879,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
        pmd_t *pmd;
 
        pud = stage2_get_pud(kvm, cache, addr);
+       if (!pud)
+               return NULL;
+
        if (stage2_pud_none(*pud)) {
                if (!cache)
                        return NULL;
index 0a4283ed9aa735e55e476bdea0a19ed159952646..63e0bbdcddcc3e5e59163c08a5691dd36c416564 100644 (file)
@@ -226,7 +226,13 @@ static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
 
        switch (addr & 0xff) {
        case GIC_CPU_CTRL:
-               val = vmcr.ctlr;
+               val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
+               val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
+               val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
+               val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
+               val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
+               val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
+
                break;
        case GIC_CPU_PRIMASK:
                /*
@@ -267,7 +273,13 @@ static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
 
        switch (addr & 0xff) {
        case GIC_CPU_CTRL:
-               vmcr.ctlr = val;
+               vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
+               vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
+               vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
+               vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
+               vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
+               vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
+
                break;
        case GIC_CPU_PRIMASK:
                /*
index 504b4bd0d651cf820eec843a325c649e0d1bd181..e4187e52bb26e65c87743c760fb5c651eedb5ea7 100644 (file)
@@ -177,7 +177,18 @@ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
        struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
        u32 vmcr;
 
-       vmcr  = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+       vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
+               GICH_VMCR_ENABLE_GRP0_MASK;
+       vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
+               GICH_VMCR_ENABLE_GRP1_MASK;
+       vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
+               GICH_VMCR_ACK_CTL_MASK;
+       vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
+               GICH_VMCR_FIQ_EN_MASK;
+       vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
+               GICH_VMCR_CBPR_MASK;
+       vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
+               GICH_VMCR_EOI_MODE_MASK;
        vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
                GICH_VMCR_ALIAS_BINPOINT_MASK;
        vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
@@ -195,8 +206,19 @@ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 
        vmcr = cpu_if->vgic_vmcr;
 
-       vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
-                       GICH_VMCR_CTRL_SHIFT;
+       vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
+               GICH_VMCR_ENABLE_GRP0_SHIFT;
+       vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
+               GICH_VMCR_ENABLE_GRP1_SHIFT;
+       vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
+               GICH_VMCR_ACK_CTL_SHIFT;
+       vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
+               GICH_VMCR_FIQ_EN_SHIFT;
+       vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
+               GICH_VMCR_CBPR_SHIFT;
+       vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
+               GICH_VMCR_EOI_MODE_SHIFT;
+
        vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
                        GICH_VMCR_ALIAS_BINPOINT_SHIFT;
        vmcrp->bpr  = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
index 6fe3f003636a311d055581ee6a8133d0951fd3f2..030248e669f65acd5e0155fbbef189d96e7cf7e3 100644 (file)
@@ -159,15 +159,24 @@ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
 void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 {
        struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+       u32 model = vcpu->kvm->arch.vgic.vgic_model;
        u32 vmcr;
 
-       /*
-        * Ignore the FIQen bit, because GIC emulation always implies
-        * SRE=1 which means the vFIQEn bit is also RES1.
-        */
-       vmcr = ((vmcrp->ctlr >> ICC_CTLR_EL1_EOImode_SHIFT) <<
-                ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
-       vmcr |= (vmcrp->ctlr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
+       if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+               vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
+                       ICH_VMCR_ACK_CTL_MASK;
+               vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
+                       ICH_VMCR_FIQ_EN_MASK;
+       } else {
+               /*
+                * When emulating GICv3 on GICv3 with SRE=1 on the
+                * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
+                */
+               vmcr = ICH_VMCR_FIQ_EN_MASK;
+       }
+
+       vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
+       vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
        vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
        vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
        vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
@@ -180,17 +189,27 @@ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
 {
        struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+       u32 model = vcpu->kvm->arch.vgic.vgic_model;
        u32 vmcr;
 
        vmcr = cpu_if->vgic_vmcr;
 
-       /*
-        * Ignore the FIQen bit, because GIC emulation always implies
-        * SRE=1 which means the vFIQEn bit is also RES1.
-        */
-       vmcrp->ctlr = ((vmcr >> ICH_VMCR_EOIM_SHIFT) <<
-                       ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
-       vmcrp->ctlr |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+       if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+               vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
+                       ICH_VMCR_ACK_CTL_SHIFT;
+               vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
+                       ICH_VMCR_FIQ_EN_SHIFT;
+       } else {
+               /*
+                * When emulating GICv3 on GICv3 with SRE=1 on the
+                * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
+                */
+               vmcrp->fiqen = 1;
+               vmcrp->ackctl = 0;
+       }
+
+       vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
+       vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
        vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
        vmcrp->bpr  = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
        vmcrp->pmr  = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
index da83e4caa272f2621b840123e79dfbc3e5ceaead..bba7fa22a7f7c41a5d1fa88c02fb15c158992ccd 100644 (file)
@@ -111,14 +111,18 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
  * registers regardless of the hardware backed GIC used.
  */
 struct vgic_vmcr {
-       u32     ctlr;
+       u32     grpen0;
+       u32     grpen1;
+
+       u32     ackctl;
+       u32     fiqen;
+       u32     cbpr;
+       u32     eoim;
+
        u32     abpr;
        u32     bpr;
        u32     pmr;  /* Priority mask field in the GICC_PMR and
                       * ICC_PMR_EL1 priority field format */
-       /* Below member variable are valid only for GICv3 */
-       u32     grpen0;
-       u32     grpen1;
 };
 
 struct vgic_reg_attr {