]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 1 Apr 2013 15:17:09 +0000 (08:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 1 Apr 2013 15:17:09 +0000 (08:17 -0700)
Pull arch/tile fix from Chris Metcalf:
 "This change allows newer Tilera boot tools to work correctly with
  current (and stable) kernels by using the right filename to get the
  initramfs from the Tilera hypervisor filesystem."

* 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
  tile: expect new initramfs name from hypervisor file system

601 files changed:
CREDITS
Documentation/hwmon/lm75
Documentation/i2c/busses/i2c-diolan-u2c
Documentation/networking/ipvs-sysctl.txt
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/sound/alsa/seq_oss.html
MAINTAINERS
Makefile
arch/arc/include/asm/dma-mapping.h
arch/arc/include/asm/elf.h
arch/arc/include/asm/entry.h
arch/arc/include/asm/kgdb.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/syscalls.h
arch/arc/include/uapi/asm/ptrace.h
arch/arc/kernel/entry.S
arch/arc/kernel/kgdb.c
arch/arc/kernel/setup.c
arch/arc/kernel/sys.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos5440.dtsi
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30.dtsi
arch/arm/kernel/smp.c
arch/arm/lib/memset.S
arch/arm/mach-at91/include/mach/gpio.h
arch/arm/mach-at91/irq.c
arch/arm/mach-at91/pm.c
arch/arm/mach-davinci/dma.c
arch/arm/mach-footbridge/Kconfig
arch/arm/mach-imx/clk-imx35.c
arch/arm/mach-imx/imx25-dt.c
arch/arm/mach-mmp/gplugd.c
arch/arm/mach-mxs/mach-mxs.c
arch/arm/mach-s5pv210/clock.c
arch/arm/mach-s5pv210/mach-goni.c
arch/arm/mach-shmobile/board-marzen.c
arch/arm/net/bpf_jit_32.c
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/configs/defconfig
arch/arm64/include/asm/ucontext.h
arch/arm64/kernel/arm64ksyms.c
arch/arm64/kernel/signal32.c
arch/arm64/mm/mmu.c
arch/ia64/kernel/process.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/epapr_paravirt.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/mmu_context_hash64.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/slb_low.S
arch/powerpc/mm/tlb_hash64.c
arch/powerpc/perf/power7-pmu.c
arch/powerpc/platforms/85xx/sgy_cts1000.c
arch/powerpc/platforms/Kconfig.cputype
arch/s390/include/asm/eadm.h
arch/s390/include/asm/tlbflush.h
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/setup.c
arch/sparc/Kconfig
arch/sparc/include/asm/spitfire.h
arch/sparc/kernel/cpu.c
arch/sparc/kernel/head_64.S
arch/sparc/kernel/leon_pci_grpci2.c
arch/tile/configs/tilegx_defconfig
arch/tile/configs/tilepro_defconfig
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/microcode_intel_early.c
arch/x86/kvm/x86.c
arch/x86/lib/usercopy_64.c
arch/x86/xen/mmu.c
block/blk-flush.c
block/partition-generic.c
drivers/acpi/apei/cper.c
drivers/acpi/pci_root.c
drivers/acpi/sleep.c
drivers/amba/tegra-ahb.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ata_piix.c
drivers/ata/libata-acpi.c
drivers/ata/pata_samsung_cf.c
drivers/ata/sata_fsl.c
drivers/block/Kconfig
drivers/block/aoe/aoecmd.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/mg_disk.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nvme.c
drivers/block/rbd.c
drivers/block/rsxx/Makefile
drivers/block/rsxx/config.c
drivers/block/rsxx/core.c
drivers/block/rsxx/cregs.c
drivers/block/rsxx/dma.c
drivers/block/rsxx/rsxx.h
drivers/block/rsxx/rsxx_cfg.h
drivers/block/rsxx/rsxx_priv.h
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/clk/clk-vt8500.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/cpufreq_stats.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/compat.h
drivers/crypto/talitos.c
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac_regs.h
drivers/edac/amd64_edac.c
drivers/edac/edac_mc.c
drivers/edac/edac_mc_sysfs.c
drivers/extcon/extcon-max77693.c
drivers/extcon/extcon-max8997.c
drivers/firmware/Kconfig
drivers/firmware/efivars.c
drivers/gpio/gpiolib-of.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/nouveau/core/core/object.c
drivers/gpu/drm/nouveau/core/include/subdev/therm.h
drivers/gpu/drm/nouveau/core/subdev/therm/base.c
drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
drivers/gpu/drm/nouveau/nouveau_pm.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/si.c
drivers/hid/hid-ids.h
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/lm75.h
drivers/i2c/Kconfig
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/muxes/i2c-mux-pca9541.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/ipath/ipath_verbs.c
drivers/infiniband/hw/qib/Kconfig
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_sd7220.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/input/joystick/analog.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/irq_remapping.c
drivers/isdn/hisax/Kconfig
drivers/md/dm-bufio.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-metadata.h
drivers/md/dm-cache-policy-cleaner.c
drivers/md/dm-cache-policy-internal.h
drivers/md/dm-cache-policy-mq.c
drivers/md/dm-cache-policy.c
drivers/md/dm-cache-policy.h
drivers/md/dm-cache-target.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/md.c
drivers/md/md.h
drivers/md/persistent-data/dm-btree-remove.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/i2c/m5mols/m5mols_core.c
drivers/media/pci/bt8xx/bttv-driver.c
drivers/media/platform/exynos-gsc/gsc-core.c
drivers/media/platform/s5p-fimc/fimc-core.c
drivers/media/platform/s5p-fimc/fimc-lite-reg.c
drivers/media/platform/s5p-fimc/fimc-lite.c
drivers/media/platform/s5p-fimc/fimc-mdevice.c
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
drivers/media/rc/Kconfig
drivers/media/v4l2-core/Makefile
drivers/misc/mei/hw-me.c
drivers/misc/mei/init.c
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/vmw_vmci/vmci_datagram.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_ids.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/can/sja1000/Kconfig
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/sja1000/sja1000.h
drivers/net/ethernet/atheros/atl1e/atl1e.h
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/davicom/dm9000.h
drivers/net/ethernet/dec/tulip/Kconfig
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_hwmon.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/marvell/sky2.h
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/netconsole.c
drivers/net/usb/Kconfig
drivers/net/usb/cdc_mbim.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/smsc75xx.c
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/4965-rs.c
drivers/net/wireless/iwlwifi/dvm/lib.c
drivers/net/wireless/iwlwifi/dvm/ucode.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sta_ioctl.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/usb.c
drivers/pci/rom.c
drivers/pinctrl/mvebu/pinctrl-mvebu.c
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinconf.h
drivers/pinctrl/pinctrl-abx500.c
drivers/pinctrl/pinctrl-at91.c
drivers/pinctrl/pinmux.c
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-at91rm9200.h
drivers/rtc/rtc-da9052.c
drivers/s390/block/scm_blk.c
drivers/s390/block/scm_blk.h
drivers/s390/block/scm_drv.c
drivers/s390/char/sclp_cmd.c
drivers/s390/cio/chsc.c
drivers/s390/cio/chsc.h
drivers/s390/cio/scm.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/staging/comedi/drivers/s626.c
drivers/staging/zcache/Kconfig
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/target_core_file.h
drivers/target/target_core_pscsi.c
drivers/target/target_core_sbc.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/thermal/dove_thermal.c
drivers/thermal/exynos_thermal.c
drivers/thermal/kirkwood_thermal.c
drivers/thermal/rcar_thermal.c
drivers/tty/serial/8250/8250_core.c [moved from drivers/tty/serial/8250/8250.c with 99% similarity]
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/8250/Makefile
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/sunsu.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/vt/vc_screen.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hcd.c
drivers/usb/core/usb-acpi.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/g_ffs.c
drivers/usb/gadget/net2272.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/u_serial.c
drivers/usb/gadget/udc-core.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-timer.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/da8xx.c
drivers/usb/musb/musb_gadget.c
drivers/usb/phy/Kconfig
drivers/usb/serial/ark3116.c
drivers/usb/serial/ch341.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/f81232.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/garmin_gps.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/io_ti.c
drivers/usb/serial/mct_u232.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/oti6858.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/quatech2.c
drivers/usb/serial/spcp8x5.c
drivers/usb/serial/ssu100.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/storage/unusual_devs.h
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vhost/net.c
drivers/vhost/tcm_vhost.c
drivers/video/atmel_lcdfb.c
drivers/video/ep93xx-fb.c
drivers/video/mxsfb.c
drivers/video/omap/omapfb_main.c
drivers/video/omap2/displays/panel-tpo-td043mtea1.c
drivers/video/omap2/dss/dss_features.c
drivers/watchdog/sp5100_tco.c
drivers/watchdog/sp5100_tco.h
drivers/xen/Kconfig
drivers/xen/events.c
drivers/xen/fallback.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xen-pciback/pci_stub.c
firmware/Makefile
firmware/intel/sd7220.fw.ihex [moved from firmware/qlogic/sd7220.fw.ihex with 100% similarity]
fs/btrfs/ctree.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/volumes.c
fs/cifs/asn1.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/netmisc.c
fs/dcache.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/extents_status.c
fs/ext4/extents_status.h
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/move_extent.c
fs/ext4/page-io.c
fs/ext4/resize.c
fs/ext4/super.c
fs/internal.h
fs/jbd2/transaction.c
fs/namespace.c
fs/nfs/blocklayout/blocklayoutdm.c
fs/nfs/idmap.c
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfsd/nfscache.c
fs/nfsd/vfs.c
fs/pnode.c
fs/pnode.h
fs/proc/inode.c
fs/proc/root.c
fs/read_write.c
fs/splice.c
fs/sysfs/dir.c
fs/sysfs/mount.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_iomap.c
include/drm/drm_pciids.h
include/linux/debug_locks.h
include/linux/edac.h
include/linux/freezer.h
include/linux/fs_struct.h
include/linux/hash.h
include/linux/irq_work.h
include/linux/kernel.h
include/linux/mfd/max77693-private.h
include/linux/mm.h
include/linux/mman.h
include/linux/mmzone.h
include/linux/mount.h
include/linux/mtd/nand.h
include/linux/mxsfb.h
include/linux/netdevice.h
include/linux/nvme.h
include/linux/printk.h
include/linux/skbuff.h
include/linux/thermal.h
include/linux/udp.h
include/linux/usb/cdc_ncm.h
include/linux/usb/hcd.h
include/linux/usb/serial.h
include/linux/usb/ulpi.h
include/linux/user_namespace.h
include/net/dst.h
include/net/flow_keys.h
include/net/inet_frag.h
include/net/ip_fib.h
include/net/ip_vs.h
include/net/ipip.h
include/uapi/linux/packet_diag.h
include/uapi/linux/unix_diag.h
include/video/atmel_lcdc.h
include/xen/interface/io/blkif.h
include/xen/interface/physdev.h
ipc/mqueue.c
kernel/events/core.c
kernel/exit.c
kernel/lockdep.c
kernel/pid_namespace.c
kernel/printk.c
kernel/sys.c
kernel/time/tick-broadcast.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_irqsoff.c
kernel/trace/trace_sched_wakeup.c
kernel/user.c
kernel/user_namespace.c
kernel/workqueue.c
lib/bust_spinlocks.c
lib/dma-debug.c
mm/fremap.c
mm/hugetlb.c
mm/memory_hotplug.c
mm/mlock.c
mm/mmap.c
net/8021q/vlan.c
net/batman-adv/bat_iv_ogm.c
net/bluetooth/sco.c
net/bridge/br_fdb.c
net/bridge/br_netlink.c
net/core/dev.c
net/core/flow.c
net/core/flow_dissector.c
net/core/rtnetlink.c
net/core/scm.c
net/ipv4/af_inet.c
net/ipv4/inet_fragment.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_options.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/Kconfig
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ip6_input.c
net/ipv6/netfilter/ip6t_NPT.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/reassembly.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/af_irda.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/mac80211/iface.c
net/mac80211/mesh.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_udplite.c
net/netfilter/nfnetlink_queue_core.c
net/netlink/genetlink.c
net/nfc/llcp/llcp.c
net/nfc/llcp/sock.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sctp/associola.c
net/sctp/sm_statefuns.c
net/sunrpc/sched.c
net/unix/af_unix.c
net/wireless/core.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/sme.c
net/wireless/trace.h
net/wireless/wext-sme.c
net/xfrm/xfrm_replay.c
security/selinux/xfrm.c
security/yama/yama_lsm.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/usb/mixer.c
tools/lib/traceevent/Makefile
tools/perf/Makefile
tools/perf/bench/bench.h
tools/perf/builtin-record.c
tools/perf/util/hist.h
tools/perf/util/strlist.c
virt/kvm/ioapic.c

diff --git a/CREDITS b/CREDITS
index 78163cb3eb6ad9d0b2c3372ef0d6305b48e95981..afaa7cec6ea5126198d5220389f66d9678ed8949 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1510,6 +1510,14 @@ D: Natsemi ethernet
 D: Cobalt Networks (x86) support
 D: This-and-That
 
+N: Mark M. Hoffman
+E: mhoffman@lightlink.com
+D: asb100, lm93 and smsc47b397 hardware monitoring drivers
+D: hwmon subsystem core
+D: hwmon subsystem maintainer
+D: i2c-sis96x and i2c-stub SMBus drivers
+S: USA
+
 N: Dirk Hohndel
 E: hohndel@suse.de
 D: The XFree86[tm] Project
index c91a1d15fa28ae62aca3796ab89e15de3fdd0f5a..69af1c7db6b7437ca797c61baaeb74086d264c5c 100644 (file)
@@ -23,7 +23,7 @@ Supported chips:
     Datasheet: Publicly available at the Maxim website
                http://www.maxim-ic.com/
   * Microchip (TelCom) TCN75
-    Prefix: 'lm75'
+    Prefix: 'tcn75'
     Addresses scanned: none
     Datasheet: Publicly available at the Microchip website
                http://www.microchip.com/
index 30fe4bb9a069b13f5979b7debd8a5ec0c0302eab..0d6018c316c741f9cc0cf3b0391457b880b58234 100644 (file)
@@ -5,7 +5,7 @@ Supported adapters:
     Documentation:
        http://www.diolan.com/i2c/u2c12.html
 
-Author: Guenter Roeck <guenter.roeck@ericsson.com>
+Author: Guenter Roeck <linux@roeck-us.net>
 
 Description
 -----------
index f2a2488f1bf33d8290384c4823fccee2b39afe36..9573d0c48c6ea882099edd94ed2dcda94e4fe3dd 100644 (file)
@@ -15,6 +15,13 @@ amemthresh - INTEGER
         enabled and the variable is automatically set to 2, otherwise
         the strategy is disabled and the variable is  set  to 1.
 
+backup_only - BOOLEAN
+       0 - disabled (default)
+       not 0 - enabled
+
+       If set, disable the director function while the server is
+       in backup mode to avoid packet loops for DR/TUN methods.
+
 conntrack - BOOLEAN
        0 - disabled (default)
        not 0 - enabled
index ce6581c8ca26915c1a00f31a9b7605c0cb6c3413..4499bd948860cebe52e7db8aae65b2a16fd1a897 100644 (file)
@@ -912,7 +912,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
     models depending on the codec chip.  The list of available models
     is found in HD-Audio-Models.txt
 
-    The model name "genric" is treated as a special case.  When this
+    The model name "generic" is treated as a special case.  When this
     model is given, the driver uses the generic codec parser without
     "codec-patch".  It's sometimes good for testing and debugging.
 
index d9776cf60c07a9b62445174cd6eed64e1e69cb83..9663b45f6fde66ba5d2bb4abdd4b9bf9e651a32b 100644 (file)
@@ -285,7 +285,7 @@ sample data.
 <H4>
 7.2.4 Close Callback</H4>
 The <TT>close</TT> callback is called when this device is closed by the
-applicaion. If any private data was allocated in open callback, it must
+application. If any private data was allocated in open callback, it must
 be released in the close callback. The deletion of ALSA port should be
 done here, too. This callback must not be NULL.
 <H4>
index 50b4d735f961a1e5fe700faee710c8b9bd65c5dc..836a6183c37f6f6e2ceec36ea72497136fffde4c 100644 (file)
@@ -1338,12 +1338,6 @@ S:       Maintained
 F:     drivers/platform/x86/asus*.c
 F:     drivers/platform/x86/eeepc*.c
 
-ASUS ASB100 HARDWARE MONITOR DRIVER
-M:     "Mark M. Hoffman" <mhoffman@lightlink.com>
-L:     lm-sensors@lm-sensors.org
-S:     Maintained
-F:     drivers/hwmon/asb100.c
-
 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
 M:     Dan Williams <djbw@fb.com>
 W:     http://sourceforge.net/projects/xscaleiop
@@ -1467,6 +1461,12 @@ F:       drivers/dma/at_hdmac.c
 F:     drivers/dma/at_hdmac_regs.h
 F:     include/linux/platform_data/dma-atmel.h
 
+ATMEL I2C DRIVER
+M:     Ludovic Desroches <ludovic.desroches@atmel.com>
+L:     linux-i2c@vger.kernel.org
+S:     Supported
+F:     drivers/i2c/busses/i2c-at91.c
+
 ATMEL ISI DRIVER
 M:     Josh Wu <josh.wu@atmel.com>
 L:     linux-media@vger.kernel.org
@@ -2629,7 +2629,7 @@ F:        include/uapi/drm/
 
 INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
 M:     Daniel Vetter <daniel.vetter@ffwll.ch>
-L:     intel-gfx@lists.freedesktop.org (subscribers-only)
+L:     intel-gfx@lists.freedesktop.org
 L:     dri-devel@lists.freedesktop.org
 T:     git git://people.freedesktop.org/~danvet/drm-intel
 S:     Supported
@@ -3242,6 +3242,12 @@ F:       Documentation/firmware_class/
 F:     drivers/base/firmware*.c
 F:     include/linux/firmware.h
 
+FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
+M:     Joshua Morris <josh.h.morris@us.ibm.com>
+M:     Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+S:     Maintained
+F:     drivers/block/rsxx/
+
 FLOPPY DRIVER
 M:     Jiri Kosina <jkosina@suse.cz>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
@@ -3851,7 +3857,7 @@ F:        drivers/i2c/busses/i2c-ismt.c
 F:     Documentation/i2c/busses/i2c-ismt
 
 I2C/SMBUS STUB DRIVER
-M:     "Mark M. Hoffman" <mhoffman@lightlink.com>
+M:     Jean Delvare <khali@linux-fr.org>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/i2c-stub.c
@@ -5059,9 +5065,8 @@ S:        Maintained
 F:     drivers/net/ethernet/marvell/sk*
 
 MARVELL LIBERTAS WIRELESS DRIVER
-M:     Dan Williams <dcbw@redhat.com>
 L:     libertas-dev@lists.infradead.org
-S:     Maintained
+S:     Orphan
 F:     drivers/net/wireless/libertas/
 
 MARVELL MV643XX ETHERNET DRIVER
@@ -5563,6 +5568,7 @@ F:        include/uapi/linux/if_*
 F:     include/uapi/linux/netdevice.h
 
 NETXEN (1/10) GbE SUPPORT
+M:     Manish Chopra <manish.chopra@qlogic.com>
 M:     Sony Chacko <sony.chacko@qlogic.com>
 M:     Rajesh Borundia <rajesh.borundia@qlogic.com>
 L:     netdev@vger.kernel.org
@@ -5647,6 +5653,14 @@ S:       Maintained
 F:     drivers/video/riva/
 F:     drivers/video/nvidia/
 
+NVM EXPRESS DRIVER
+M:     Matthew Wilcox <willy@linux.intel.com>
+L:     linux-nvme@lists.infradead.org
+T:     git git://git.infradead.org/users/willy/linux-nvme.git
+S:     Supported
+F:     drivers/block/nvme.c
+F:     include/linux/nvme.h
+
 OMAP SUPPORT
 M:     Tony Lindgren <tony@atomide.com>
 L:     linux-omap@vger.kernel.org
@@ -5675,7 +5689,7 @@ S:        Maintained
 F:     arch/arm/*omap*/*clock*
 
 OMAP POWER MANAGEMENT SUPPORT
-M:     Kevin Hilman <khilman@ti.com>
+M:     Kevin Hilman <khilman@deeprootsystems.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/*omap*/*pm*
@@ -5769,7 +5783,7 @@ F:        arch/arm/*omap*/usb*
 
 OMAP GPIO DRIVER
 M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
-M:     Kevin Hilman <khilman@ti.com>
+M:     Kevin Hilman <khilman@deeprootsystems.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/gpio/gpio-omap.c
@@ -6201,7 +6215,7 @@ F:        include/linux/power_supply.h
 F:     drivers/power/
 
 PNP SUPPORT
-M:     Adam Belay <abelay@mit.edu>
+M:     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 M:     Bjorn Helgaas <bhelgaas@google.com>
 S:     Maintained
 F:     drivers/pnp/
@@ -6543,12 +6557,6 @@ S:       Maintained
 F:     Documentation/blockdev/ramdisk.txt
 F:     drivers/block/brd.c
 
-RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
-M:     Joshua Morris <josh.h.morris@us.ibm.com>
-M:     Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-S:     Maintained
-F:     drivers/block/rsxx/
-
 RANDOM NUMBER DRIVER
 M:     Theodore Ts'o" <tytso@mit.edu>
 S:     Maintained
@@ -7165,7 +7173,7 @@ F:        arch/arm/mach-s3c2410/bast-irq.c
 
 TI DAVINCI MACHINE SUPPORT
 M:     Sekhar Nori <nsekhar@ti.com>
-M:     Kevin Hilman <khilman@ti.com>
+M:     Kevin Hilman <khilman@deeprootsystems.com>
 L:     davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
 T:     git git://gitorious.org/linux-davinci/linux-davinci.git
 Q:     http://patchwork.kernel.org/project/linux-davinci/list/
@@ -7198,13 +7206,6 @@ L:       netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/sis/sis900.*
 
-SIS 96X I2C/SMBUS DRIVER
-M:     "Mark M. Hoffman" <mhoffman@lightlink.com>
-L:     linux-i2c@vger.kernel.org
-S:     Maintained
-F:     Documentation/i2c/busses/i2c-sis96x
-F:     drivers/i2c/busses/i2c-sis96x.c
-
 SIS FRAMEBUFFER DRIVER
 M:     Thomas Winischhofer <thomas@winischhofer.net>
 W:     http://www.winischhofer.net/linuxsisvga.shtml
@@ -7282,7 +7283,7 @@ F:        Documentation/hwmon/sch5627
 F:     drivers/hwmon/sch5627.c
 
 SMSC47B397 HARDWARE MONITOR DRIVER
-M:     "Mark M. Hoffman" <mhoffman@lightlink.com>
+M:     Jean Delvare <khali@linux-fr.org>
 L:     lm-sensors@lm-sensors.org
 S:     Maintained
 F:     Documentation/hwmon/smsc47b397
@@ -7705,9 +7706,10 @@ F:       include/linux/swiotlb.h
 
 SYNOPSYS ARC ARCHITECTURE
 M:     Vineet Gupta <vgupta@synopsys.com>
-L:     linux-snps-arc@vger.kernel.org
 S:     Supported
 F:     arch/arc/
+F:     Documentation/devicetree/bindings/arc/
+F:     drivers/tty/serial/arc-uart.c
 
 SYSV FILESYSTEM
 M:     Christoph Hellwig <hch@infradead.org>
index 22113a77f8ed4af82257b097d821af841f082844..58a165b02af1e27acb6d2635f3c9ab0d58c5cb00 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index 31f77aec082381e34f5cb7ddb06added7d98d179..45b8e0cea1764d2b9e4a0b0d1f27b787eb845870 100644 (file)
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
        int i;
 
        for_each_sg(sg, s, nents, i)
-               sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+               s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
                                               s->length, dir);
 
        return nents;
index f4c8d36ebecbd2dcad76db454edc64686cfae292..a262828576839d9d48e5e2be18ae65b9fad815e5 100644 (file)
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
  */
 #define ELF_PLATFORM   (NULL)
 
-#define SET_PERSONALITY(ex) \
-       set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
-
 #endif
index 23daa326fc9b4d19c5711af313e730dae2551f27..eb2ae53187d95fe1240885bd7026d1f13c3c98a4 100644 (file)
  *-------------------------------------------------------------*/
 .macro SAVE_ALL_EXCEPTION   marker
 
-       st      \marker, [sp, 8]
+       st      \marker, [sp, 8]        /* orig_r8 */
        st      r0, [sp, 4]    /* orig_r0, needed only for sys calls */
 
        /* Restore r9 used to code the early prologue */
index f3c4934f0ca9462e1876095fa197a90dc2e4fede..4930957ca3d38c4cb312aa60f21ffa9cbb413087 100644 (file)
@@ -13,7 +13,7 @@
 
 #ifdef CONFIG_KGDB
 
-#include <asm/user.h>
+#include <asm/ptrace.h>
 
 /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
  * register API yet */
@@ -53,9 +53,7 @@ enum arc700_linux_regnums {
 };
 
 #else
-static inline void kgdb_trap(struct pt_regs *regs, int param)
-{
-}
+#define kgdb_trap(regs, param)
 #endif
 
 #endif /* __ARC_KGDB_H__ */
index 8ae783d20a81185dab9ae04e61f5e2a9e47082b0..6179de7e07c21ca4875017adc1107c0e7b0b46c4 100644 (file)
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
 #define orig_r8_IS_SCALL               0x0001
 #define orig_r8_IS_SCALL_RESTARTED     0x0002
 #define orig_r8_IS_BRKPT               0x0004
-#define orig_r8_IS_EXCPN               0x0004
+#define orig_r8_IS_EXCPN               0x0008
 #define orig_r8_IS_IRQ1                        0x0010
 #define orig_r8_IS_IRQ2                        0x0020
 
index e53a5340ba4f2b91376f76ca7b4f5286567cd302..dd785befe7fd1eaa3b6b7d5df5ee8ef77e849587 100644 (file)
@@ -16,8 +16,6 @@
 #include <linux/types.h>
 
 int sys_clone_wrapper(int, int, int, int, int);
-int sys_fork_wrapper(void);
-int sys_vfork_wrapper(void);
 int sys_cacheflush(uint32_t, uint32_t uint32_t);
 int sys_arc_settls(void *);
 int sys_arc_gettls(void);
index 6afa4f70207529c5d9ae7d0caa69082108d154c9..30333cec0fef274365aeb559084d58a5ee023a9b 100644 (file)
 */
 struct user_regs_struct {
 
-       struct scratch {
+       struct {
                long pad;
                long bta, lp_start, lp_end, lp_count;
                long status32, ret, blink, fp, gp;
                long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
                long sp;
        } scratch;
-       struct callee {
+       struct {
                long pad;
                long r25, r24, r23, r22, r21, r20;
                long r19, r18, r17, r16, r15, r14, r13;
index ef6800ba2f03f6cce95a78e1db7b175f66d72e7c..91eeab81f52d3d66a408a215f06b4ff19022d364 100644 (file)
@@ -452,7 +452,7 @@ tracesys:
        ; using ERET won't work since next-PC has already committed
        lr  r12, [efa]
        GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r11
-       st  r12, [r11, THREAD_FAULT_ADDR]
+       st  r12, [r11, THREAD_FAULT_ADDR]       ; thread.fault_address
 
        ; PRE Sys Call Ptrace hook
        mov r0, sp                      ; pt_regs needed
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
 
 ;################### Special Sys Call Wrappers ##########################
 
-; TBD: call do_fork directly from here
-ARC_ENTRY sys_fork_wrapper
-       SAVE_CALLEE_SAVED_USER
-       bl  @sys_fork
-       DISCARD_CALLEE_SAVED_USER
-
-       GET_CURR_THR_INFO_FLAGS   r10
-       btst r10, TIF_SYSCALL_TRACE
-       bnz  tracesys_exit
-
-       b ret_from_system_call
-ARC_EXIT sys_fork_wrapper
-
-ARC_ENTRY sys_vfork_wrapper
-       SAVE_CALLEE_SAVED_USER
-       bl  @sys_vfork
-       DISCARD_CALLEE_SAVED_USER
-
-       GET_CURR_THR_INFO_FLAGS   r10
-       btst r10, TIF_SYSCALL_TRACE
-       bnz  tracesys_exit
-
-       b ret_from_system_call
-ARC_EXIT sys_vfork_wrapper
-
 ARC_ENTRY sys_clone_wrapper
        SAVE_CALLEE_SAVED_USER
        bl  @sys_clone
index 2888ba5be47e4be4e796d0840abb9570253a416a..52bdc83c1495b5bf31a685998f7a5d7b9a1bd15e 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/kgdb.h>
+#include <linux/sched.h>
 #include <asm/disasm.h>
 #include <asm/cacheflush.h>
 
index dc0f968dae0aecfa96db2bc005a99c74bdabb71a..2d95ac07df7bde15169ff5058d7e53afd2f2d73e 100644 (file)
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
 
        n += scnprintf(buf + n, len - n, "\n");
 
-#ifdef _ASM_GENERIC_UNISTD_H
        n += scnprintf(buf + n, len - n,
-                      "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
-#endif
+                      "OS ABI [v3]\t: no-legacy-syscalls\n");
 
        return buf;
 }
index f6bdd07583f3a2ed190fdc1de9ceff9e77bf2ba8..9d6c1ca26af6d8bf4c035ae5f6085d8bfb6bc0be 100644 (file)
@@ -6,8 +6,6 @@
 #include <asm/syscalls.h>
 
 #define sys_clone      sys_clone_wrapper
-#define sys_fork       sys_fork_wrapper
-#define sys_vfork      sys_vfork_wrapper
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call) [nr] = (call),
index 2c3bdce151346f403ee171b2d85d601b1826b39d..13b739469c515cb93a8dff1cdf686583a3941c54 100644 (file)
@@ -49,7 +49,6 @@ config ARM
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16
-       select VIRT_TO_BUS
        select KTIME_SCALAR
        select PERF_USE_VMALLOC
        select RTC_LIB
@@ -743,6 +742,7 @@ config ARCH_RPC
        select NEED_MACH_IO_H
        select NEED_MACH_MEMORY_H
        select NO_IOPORT
+       select VIRT_TO_BUS
        help
          On the Acorn Risc-PC, Linux can support the internal IDE disk and
          CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -878,6 +878,7 @@ config ARCH_SHARK
        select ISA_DMA
        select NEED_MACH_MEMORY_H
        select PCI
+       select VIRT_TO_BUS
        select ZONE_DMA
        help
          Support for the StrongARM based Digital DNARD machine, also known
@@ -1005,12 +1006,12 @@ config ARCH_MULTI_V4_V5
        bool
 
 config ARCH_MULTI_V6
-       bool "ARMv6 based platforms (ARM11, Scorpion, ...)"
+       bool "ARMv6 based platforms (ARM11)"
        select ARCH_MULTI_V6_V7
        select CPU_V6
 
 config ARCH_MULTI_V7
-       bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)"
+       bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)"
        default y
        select ARCH_MULTI_V6_V7
        select ARCH_VEXPRESS
@@ -1461,10 +1462,6 @@ config ISA_DMA
        bool
        select ISA_DMA_API
 
-config ARCH_NO_VIRT_TO_BUS
-       def_bool y
-       depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK
-
 # Select ISA DMA interface
 config ISA_DMA_API
        bool
index ecfcdba2d17c5976c34d238b19fb39181b36883d..9b31f4311ea2717818d8a387d3e0131d335195d0 100644 (file)
@@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
                                                DEBUG_IMX53_UART || \
                                                DEBUG_IMX6Q_UART
        default 1
+       depends on ARCH_MXC
        help
          Choose UART port on which kernel low-level debug messages
          should be output.
index aa98e641931fa21ed76c627229da576289005d12..a98c0d50fbbe1ed80e6c29d5228716155f3399f5 100644 (file)
                                nand {
                                        pinctrl_nand: nand-0 {
                                                atmel,pins =
-                                                       <3 4 0x0 0x1    /* PD5 gpio RDY pin pull_up */
-                                                        3 5 0x0 0x1>;  /* PD4 gpio enable pin pull_up */
+                                                       <3 0 0x1 0x0    /* PD0 periph A Read Enable */
+                                                        3 1 0x1 0x0    /* PD1 periph A Write Enable */
+                                                        3 2 0x1 0x0    /* PD2 periph A Address Latch Enable */
+                                                        3 3 0x1 0x0    /* PD3 periph A Command Latch Enable */
+                                                        3 4 0x0 0x1    /* PD4 gpio Chip Enable pin pull_up */
+                                                        3 5 0x0 0x1    /* PD5 gpio RDY/BUSY pin pull_up */
+                                                        3 6 0x1 0x0    /* PD6 periph A Data bit 0 */
+                                                        3 7 0x1 0x0    /* PD7 periph A Data bit 1 */
+                                                        3 8 0x1 0x0    /* PD8 periph A Data bit 2 */
+                                                        3 9 0x1 0x0    /* PD9 periph A Data bit 3 */
+                                                        3 10 0x1 0x0   /* PD10 periph A Data bit 4 */
+                                                        3 11 0x1 0x0   /* PD11 periph A Data bit 5 */
+                                                        3 12 0x1 0x0   /* PD12 periph A Data bit 6 */
+                                                        3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */
+                                       };
+
+                                       pinctrl_nand_16bits: nand_16bits-0 {
+                                               atmel,pins =
+                                                       <3 14 0x1 0x0   /* PD14 periph A Data bit 8 */
+                                                        3 15 0x1 0x0   /* PD15 periph A Data bit 9 */
+                                                        3 16 0x1 0x0   /* PD16 periph A Data bit 10 */
+                                                        3 17 0x1 0x0   /* PD17 periph A Data bit 11 */
+                                                        3 18 0x1 0x0   /* PD18 periph A Data bit 12 */
+                                                        3 19 0x1 0x0   /* PD19 periph A Data bit 13 */
+                                                        3 20 0x1 0x0   /* PD20 periph A Data bit 14 */
+                                                        3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */
                                        };
                                };
 
index e1347fceb5bc44c269e62d85c97a36249a76a073..1a62bcf18aa3cb942b4808d02fd007c7b995850b 100644 (file)
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x12680000 0x1000>;
                        interrupts = <0 35 0>;
+                       #dma-cells = <1>;
+                       #dma-channels = <8>;
+                       #dma-requests = <32>;
                };
 
                pdma1: pdma@12690000 {
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x12690000 0x1000>;
                        interrupts = <0 36 0>;
+                       #dma-cells = <1>;
+                       #dma-channels = <8>;
+                       #dma-requests = <32>;
                };
 
                mdma1: mdma@12850000 {
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x12850000 0x1000>;
                        interrupts = <0 34 0>;
+                       #dma-cells = <1>;
+                       #dma-channels = <8>;
+                       #dma-requests = <1>;
                };
        };
 };
index 5f3562ad67463a89a2bac80a81b1ac01c46907af..9a99755920c04e52f58b29bb5ca684606d6d55e6 100644 (file)
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x120000 0x1000>;
                        interrupts = <0 34 0>;
+                       #dma-cells = <1>;
+                       #dma-channels = <8>;
+                       #dma-requests = <32>;
                };
 
                pdma1: pdma@121B0000 {
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x121000 0x1000>;
                        interrupts = <0 35 0>;
+                       #dma-cells = <1>;
+                       #dma-channels = <8>;
+                       #dma-requests = <32>;
                };
        };
 
index 48d00a099ce38d75d0df6ceb513078383c8d54e5..3d3f64d2111a33fb415979c2a1b911c2f7b37a66 100644 (file)
 
        spi@7000d800 {
                compatible = "nvidia,tegra20-slink";
-               reg = <0x7000d480 0x200>;
+               reg = <0x7000d800 0x200>;
                interrupts = <0 83 0x04>;
                nvidia,dma-request-selector = <&apbdma 17>;
                #address-cells = <1>;
index 9d87a3ffe9980a140585d063dbb9be5f2fbfa482..dbf46c27256255fd35ffaf6501a314f50668af3c 100644 (file)
 
        spi@7000d800 {
                compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
-               reg = <0x7000d480 0x200>;
+               reg = <0x7000d800 0x200>;
                interrupts = <0 83 0x04>;
                nvidia,dma-request-selector = <&apbdma 17>;
                #address-cells = <1>;
index 31644f1978d56d8e6fad053804ed8d730bff9003..79078edbb9bc12d38bb144a88754b83390429c95 100644 (file)
@@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
        evt->features   = CLOCK_EVT_FEAT_ONESHOT |
                          CLOCK_EVT_FEAT_PERIODIC |
                          CLOCK_EVT_FEAT_DUMMY;
-       evt->rating     = 400;
+       evt->rating     = 100;
        evt->mult       = 1;
        evt->set_mode   = broadcast_timer_set_mode;
 
index d912e7397ecc94a190a132b649f667b94774c1b9..94b0650ea98fd42492c280d20c7610382a2f4a81 100644 (file)
 
        .text
        .align  5
-       .word   0
-
-1:     subs    r2, r2, #4              @ 1 do we have enough
-       blt     5f                      @ 1 bytes to align with?
-       cmp     r3, #2                  @ 1
-       strltb  r1, [ip], #1            @ 1
-       strleb  r1, [ip], #1            @ 1
-       strb    r1, [ip], #1            @ 1
-       add     r2, r2, r3              @ 1 (r2 = r2 - (4 - r3))
-/*
- * The pointer is now aligned and the length is adjusted.  Try doing the
- * memset again.
- */
 
 ENTRY(memset)
-/*
- * Preserve the contents of r0 for the return value.
- */
-       mov     ip, r0
-       ands    r3, ip, #3              @ 1 unaligned?
-       bne     1b                      @ 1
+       ands    r3, r0, #3              @ 1 unaligned?
+       mov     ip, r0                  @ preserve r0 as return value
+       bne     6f                      @ 1
 /*
  * we know that the pointer in ip is aligned to a word boundary.
  */
-       orr     r1, r1, r1, lsl #8
+1:     orr     r1, r1, r1, lsl #8
        orr     r1, r1, r1, lsl #16
        mov     r3, r1
        cmp     r2, #16
@@ -127,4 +111,13 @@ ENTRY(memset)
        tst     r2, #1
        strneb  r1, [ip], #1
        mov     pc, lr
+
+6:     subs    r2, r2, #4              @ 1 do we have enough
+       blt     5b                      @ 1 bytes to align with?
+       cmp     r3, #2                  @ 1
+       strltb  r1, [ip], #1            @ 1
+       strleb  r1, [ip], #1            @ 1
+       strb    r1, [ip], #1            @ 1
+       add     r2, r2, r3              @ 1 (r2 = r2 - (4 - r3))
+       b       1b
 ENDPROC(memset)
index eed465ab0dd7d14c589880f4a242ce74138e7030..5fc23771c15455a4874211ba49ae2b012ffe6566 100644 (file)
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin);
 extern void at91_gpio_suspend(void);
 extern void at91_gpio_resume(void);
 
+#ifdef CONFIG_PINCTRL_AT91
+extern void at91_pinctrl_gpio_suspend(void);
+extern void at91_pinctrl_gpio_resume(void);
+#else
+static inline void at91_pinctrl_gpio_suspend(void) {}
+static inline void at91_pinctrl_gpio_resume(void) {}
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif
index 8e210262aeee59c3aef9ea13ab2a8d4e5d0af120..e0ca59171022fb8f2cee7a488cc2ee622c238384 100644 (file)
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value)
 
 void at91_irq_suspend(void)
 {
-       int i = 0, bit;
+       int bit = -1;
 
        if (has_aic5()) {
                /* disable enabled irqs */
-               while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+               while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
                        at91_aic_write(AT91_AIC5_SSR,
                                       bit & AT91_AIC5_INTSEL_MSK);
                        at91_aic_write(AT91_AIC5_IDCR, 1);
-                       i = bit;
                }
                /* enable wakeup irqs */
-               i = 0;
-               while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+               bit = -1;
+               while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
                        at91_aic_write(AT91_AIC5_SSR,
                                       bit & AT91_AIC5_INTSEL_MSK);
                        at91_aic_write(AT91_AIC5_IECR, 1);
-                       i = bit;
                }
        } else {
                at91_aic_write(AT91_AIC_IDCR, *backups);
@@ -118,23 +116,21 @@ void at91_irq_suspend(void)
 
 void at91_irq_resume(void)
 {
-       int i = 0, bit;
+       int bit = -1;
 
        if (has_aic5()) {
                /* disable wakeup irqs */
-               while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+               while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) {
                        at91_aic_write(AT91_AIC5_SSR,
                                       bit & AT91_AIC5_INTSEL_MSK);
                        at91_aic_write(AT91_AIC5_IDCR, 1);
-                       i = bit;
                }
                /* enable irqs disabled for suspend */
-               i = 0;
-               while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+               bit = -1;
+               while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) {
                        at91_aic_write(AT91_AIC5_SSR,
                                       bit & AT91_AIC5_INTSEL_MSK);
                        at91_aic_write(AT91_AIC5_IECR, 1);
-                       i = bit;
                }
        } else {
                at91_aic_write(AT91_AIC_IDCR, *wakeups);
index adb6db888a1f1ebda8a088e9ae1e5780ba4b4555..73f1f250403a68575b78e02a8e8d72078ca42375 100644 (file)
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz;
 
 static int at91_pm_enter(suspend_state_t state)
 {
-       at91_gpio_suspend();
+       if (of_have_populated_dt())
+               at91_pinctrl_gpio_suspend();
+       else
+               at91_gpio_suspend();
        at91_irq_suspend();
 
        pr_debug("AT91: PM - wake mask %08x, pm state %d\n",
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state)
 error:
        target_state = PM_SUSPEND_ON;
        at91_irq_resume();
-       at91_gpio_resume();
+       if (of_have_populated_dt())
+               at91_pinctrl_gpio_resume();
+       else
+               at91_gpio_resume();
        return 0;
 }
 
index a685e9706b7ba305b462b103899398b3521f2917..45b7c71d9cc1966bb514862364b5180cdd307060 100644 (file)
@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel);
  */
 int edma_alloc_slot(unsigned ctlr, int slot)
 {
+       if (!edma_cc[ctlr])
+               return -EINVAL;
+
        if (slot >= 0)
                slot = EDMA_CHAN_SLOT(slot);
 
index abda5a18a664734a5d29a015cfabfbe7d0a5c26a..0f2111a11315853a046284b71b290890f3ae8fdd 100644 (file)
@@ -67,6 +67,7 @@ config ARCH_NETWINDER
        select ISA
        select ISA_DMA
        select PCI
+       select VIRT_TO_BUS
        help
          Say Y here if you intend to run this kernel on the Rebel.COM
          NetWinder.  Information about this machine can be found at:
index 74e3a34d78b80a25be2edeb9afcc2f027f610368..e13a8fa5e62c5aa4f04b0aba66f4e77df0ac24ce 100644 (file)
@@ -264,6 +264,7 @@ int __init mx35_clocks_init(void)
        clk_prepare_enable(clk[gpio3_gate]);
        clk_prepare_enable(clk[iim_gate]);
        clk_prepare_enable(clk[emi_gate]);
+       clk_prepare_enable(clk[max_gate]);
 
        /*
         * SCC is needed to boot via mmc after a watchdog reset. The clock code
index 03b65e5ea541613a062ea5719b32ab045b9e723f..82348391582a0944108a8d3ab06a4abb81771e20 100644 (file)
@@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = {
        NULL
 };
 
+static void __init imx25_timer_init(void)
+{
+       mx25_clocks_init_dt();
+}
+
 DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
        .map_io         = mx25_map_io,
        .init_early     = imx25_init_early,
index d1e2d595e79cafd44b737d49904b07b65042d3fa..f62b68d926f4237bf70911ba43487d27c98d4e97 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/platform_device.h>
 #include <linux/gpio.h>
 
 #include <asm/mach/arch.h>
index 3218f1f2c0e05dfe3b067324d1fecefadb86ae29..e7b781d3788f2c9a9e896ef75b502f789fd15d64 100644 (file)
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
                .lower_margin   = 4,
                .hsync_len      = 1,
                .vsync_len      = 1,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                                 FB_SYNC_DOTCLK_FAILING_ACT,
        },
 };
 
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
                .lower_margin   = 10,
                .hsync_len      = 10,
                .vsync_len      = 10,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                                 FB_SYNC_DOTCLK_FAILING_ACT,
        },
 };
 
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
                .lower_margin   = 45,
                .hsync_len      = 1,
                .vsync_len      = 1,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT,
        },
 };
 
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
                .lower_margin   = 13,
                .hsync_len      = 48,
                .vsync_len      = 3,
-               .sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
-                                 FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                                 FB_SYNC_DOTCLK_FAILING_ACT,
+               .sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
        },
 };
 
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
                .lower_margin = 0x15,
                .hsync_len = 64,
                .vsync_len = 4,
-               .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
-                               FB_SYNC_DATA_ENABLE_HIGH_ACT |
-                               FB_SYNC_DOTCLK_FAILING_ACT,
+               .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
        },
 };
 
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
                .lower_margin   = 2,
                .hsync_len      = 15,
                .vsync_len      = 15,
-               .sync           = FB_SYNC_DATA_ENABLE_HIGH_ACT
        },
 };
 
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 }
 
 static inline void enable_clk_enet_out(void)
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 
        mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
 }
@@ -297,6 +291,7 @@ static void __init m28evk_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
        mxsfb_pdata.default_bpp = 16;
        mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
 }
 
 static void __init sc_sps1_init(void)
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 }
 
 #define ENET0_MDC__GPIO_4_0    MXS_GPIO_NR(4, 0)
@@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
        mxsfb_pdata.default_bpp = 32;
        mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
 }
 
 static void __init cfa10037_init(void)
@@ -423,6 +421,8 @@ static void __init apf28_init(void)
        mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
        mxsfb_pdata.default_bpp = 16;
        mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
+       mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
+                               MXSFB_SYNC_DOTCLK_FAILING_ACT;
 }
 
 static void __init mxs_machine_init(void)
index fcdf52dbcc49f9bb8525790bc77953db7f452b73..f051f53e35b7bcb3d2ec85af45d2e4d8071fa821 100644 (file)
@@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = {
        .name           = "pcmcdclk",
 };
 
-static struct clk dummy_apb_pclk = {
-       .name           = "apb_pclk",
-       .id             = -1,
-};
-
 static struct clk *clkset_vpllsrc_list[] = {
        [0] = &clk_fin_vpll,
        [1] = &clk_sclk_hdmi27m,
@@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = {
 
 static struct clk init_clocks_off[] = {
        {
-               .name           = "dma",
-               .devname        = "dma-pl330.0",
-               .parent         = &clk_hclk_psys.clk,
-               .enable         = s5pv210_clk_ip0_ctrl,
-               .ctrlbit        = (1 << 3),
-       }, {
-               .name           = "dma",
-               .devname        = "dma-pl330.1",
-               .parent         = &clk_hclk_psys.clk,
-               .enable         = s5pv210_clk_ip0_ctrl,
-               .ctrlbit        = (1 << 4),
-       }, {
                .name           = "rot",
                .parent         = &clk_hclk_dsys.clk,
                .enable         = s5pv210_clk_ip0_ctrl,
@@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = {
        .ctrlbit        = (1<<19),
 };
 
+static struct clk clk_pdma0 = {
+       .name           = "pdma0",
+       .parent         = &clk_hclk_psys.clk,
+       .enable         = s5pv210_clk_ip0_ctrl,
+       .ctrlbit        = (1 << 3),
+};
+
+static struct clk clk_pdma1 = {
+       .name           = "pdma1",
+       .parent         = &clk_hclk_psys.clk,
+       .enable         = s5pv210_clk_ip0_ctrl,
+       .ctrlbit        = (1 << 4),
+};
+
 static struct clk *clkset_uart_list[] = {
        [6] = &clk_mout_mpll.clk,
        [7] = &clk_mout_epll.clk,
@@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = {
        &clk_hsmmc1,
        &clk_hsmmc2,
        &clk_hsmmc3,
+       &clk_pdma0,
+       &clk_pdma1,
 };
 
 /* Clock initialisation code */
@@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = {
        CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
        CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
        CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+       CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0),
+       CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1),
 };
 
 void __init s5pv210_register_clocks(void)
@@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void)
        for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++)
                s3c_disable_clocks(clk_cdev[ptr], 1);
 
-       s3c24xx_register_clock(&dummy_apb_pclk);
        s3c_pwmclk_init();
 }
index 3a38f7b34b9400e52293456729a0b51c8cd29051..e373de44a8b6f473672e1f10cd1d0a95287182aa 100644 (file)
@@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = {
                .mux_id         = 0,
                .flags          = V4L2_MBUS_PCLK_SAMPLE_FALLING |
                                  V4L2_MBUS_VSYNC_ACTIVE_LOW,
-               .bus_type       = FIMC_BUS_TYPE_ITU_601,
+               .fimc_bus_type  = FIMC_BUS_TYPE_ITU_601,
                .board_info     = &noon010pc30_board_info,
                .i2c_bus_num    = 0,
                .clk_frequency  = 16000000UL,
index cdcb799e802f0e2e5bcdaeaf42e50107a18b0197..fec49ebc359a56497b1aa7cf819177edd0293728 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/smsc911x.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/sh_hspi.h>
+#include <linux/mmc/host.h>
 #include <linux/mmc/sh_mobile_sdhi.h>
 #include <linux/mfd/tmio.h>
 #include <linux/usb/otg.h>
index 6828ef6ce80e69c5ce360190917abd75a3028041..a0bd8a755bdfc9616c0515116d37dc41bc16e812 100644 (file)
@@ -576,7 +576,7 @@ load_ind:
                        /* x = ((*(frame + k)) & 0xf) << 2; */
                        ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
                        /* the interpreter should deal with the negative K */
-                       if (k < 0)
+                       if ((int)k < 0)
                                return -1;
                        /* offset in r1: we might have to take the slow path */
                        emit_mov_i(r_off, k, ctx);
index fd70a68387ebe5d3baeec3b18adf93f5523e4661..9b6d19f74078e0a2118e6fbd1c517e5f4f82203c 100644 (file)
@@ -9,7 +9,6 @@ config ARM64
        select CLONE_BACKWARDS
        select COMMON_CLK
        select GENERIC_CLOCKEVENTS
-       select GENERIC_HARDIRQS_NO_DEPRECATED
        select GENERIC_IOMAP
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
index 51493430f142d8c1cce428826bb8371d98250916..1a6bfe954d4926de3a8dcee10fdbe0b0698279b3 100644 (file)
@@ -6,17 +6,6 @@ config FRAME_POINTER
        bool
        default y
 
-config DEBUG_ERRORS
-       bool "Verbose kernel error messages"
-       depends on DEBUG_KERNEL
-       help
-         This option controls verbose debugging information which can be
-         printed when the kernel detects an internal error. This debugging
-         information is useful to kernel hackers when tracking down problems,
-         but mostly meaningless to other people. It's safe to say Y unless
-         you are concerned with the code size or don't want to see these
-         messages.
-
 config DEBUG_STACK_USAGE
        bool "Enable stack utilization instrumentation"
        depends on DEBUG_KERNEL
index 9212c7880da742e54e0bebbe3709ef941aa6282d..09bef29f3a09aa02a9441df34773670649cd19c2 100644 (file)
@@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_INFO=y
 # CONFIG_FTRACE is not set
 CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_DEBUG_ERRORS=y
index bde960720892a694bda151a3288c9ecd30c8954d..42e04c87742837e1e204492a3a675dfae29fea4f 100644 (file)
@@ -22,7 +22,7 @@ struct ucontext {
        stack_t           uc_stack;
        sigset_t          uc_sigmask;
        /* glibc uses a 1024-bit sigset_t */
-       __u8              __unused[(1024 - sizeof(sigset_t)) / 8];
+       __u8              __unused[1024 / 8 - sizeof(sigset_t)];
        /* last for future expansion */
        struct sigcontext uc_mcontext;
 };
index cef3925eaf60d3367c610df3db9769d87dfe918d..aa3e948f78857bb1e64278c79e57916abbb2e5a4 100644 (file)
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 
        /* bitops */
+#ifdef CONFIG_SMP
 EXPORT_SYMBOL(__atomic_hash);
+#endif
 
        /* physical memory */
 EXPORT_SYMBOL(memstart_addr);
index 7f4f3673f2bc0e1ad4f1d0dd5f978cbb1b7f4255..e393174fe859721e7f6538bc377577cb69bb68aa 100644 (file)
@@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
                          sigset_t *set, struct pt_regs *regs)
 {
        struct compat_rt_sigframe __user *frame;
-       compat_stack_t stack;
        int err = 0;
 
        frame = compat_get_sigframe(ka, regs, sizeof(*frame));
index 224b44ab534ee4682ffb004e50c3d92b86a39a85..70b8cd4021c46cb800cbf76bd21a001e2e73e86c 100644 (file)
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
 void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
 {
        unsigned long size, mask;
-       bool page64k = IS_ENABLED(ARM64_64K_PAGES);
+       bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
index e34f565f595ae2d54613309af8013b2e735e9881..6f7dc8b7b35ccc8ba435caa08ed1a8ab8e7863a5 100644 (file)
@@ -291,7 +291,6 @@ cpu_idle (void)
                }
 
                if (!need_resched()) {
-                       void (*idle)(void);
 #ifdef CONFIG_SMP
                        min_xtp();
 #endif
@@ -299,9 +298,7 @@ cpu_idle (void)
                        if (mark_idle)
                                (*mark_idle)(1);
 
-                       if (!idle)
-                               idle = default_idle;
-                       (*idle)();
+                       default_idle();
                        if (mark_idle)
                                (*mark_idle)(0);
 #ifdef CONFIG_SMP
index 80821512e9cc13327b4438d6ba96317b930097e9..ea5bb045983a19070958b003a8408841bce35a7c 100644 (file)
@@ -90,6 +90,7 @@ config GENERIC_GPIO
 config PPC
        bool
        default y
+       select BINFMT_ELF
        select OF
        select OF_EARLY_FLATTREE
        select HAVE_FTRACE_MCOUNT_RECORD
index 2fdb47a19efda298b8ddc3749f174be75e1e7a63..b59e06f507ea6c3f3af39688752e20a23096b666 100644 (file)
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
 /*
  * VSID allocation (256MB segment)
  *
- * We first generate a 38-bit "proto-VSID".  For kernel addresses this
- * is equal to the ESID | 1 << 37, for user addresses it is:
- *     (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
+ * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
+ * from mmu context id and effective segment id of the address.
  *
- * This splits the proto-VSID into the below range
- *  0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range
- *  2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range
- *
- * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1
- * That is, we assign half of the space to user processes and half
- * to the kernel.
+ * For user processes max context id is limited to ((1ul << 19) - 5)
+ * for kernel space, we use the top 4 context ids to map address as below
+ * NOTE: each context only support 64TB now.
+ * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
  *
  * The proto-VSIDs are then scrambled into real VSIDs with the
  * multiplicative hash:
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size);
  * VSID_MULTIPLIER is prime, so in particular it is
  * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
  * Because the modulus is 2^n-1 we can compute it efficiently without
- * a divide or extra multiply (see below).
- *
- * This scheme has several advantages over older methods:
- *
- *     - We have VSIDs allocated for every kernel address
- * (i.e. everything above 0xC000000000000000), except the very top
- * segment, which simplifies several things.
+ * a divide or extra multiply (see below). The scramble function gives
+ * robust scattering in the hash table (at least based on some initial
+ * results).
  *
- *     - We allow for USER_ESID_BITS significant bits of ESID and
- * CONTEXT_BITS  bits of context for user addresses.
- *  i.e. 64T (46 bits) of address space for up to half a million contexts.
+ * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
+ * bad address. This enables us to consolidate bad address handling in
+ * hash_page.
  *
- *     - The scramble function gives robust scattering in the hash
- * table (at least based on some initial results).  The previous
- * method was more susceptible to pathological cases giving excessive
- * hash collisions.
+ * We also need to avoid the last segment of the last context, because that
+ * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
+ * because of the modulo operation in vsid scramble. But the vmemmap
+ * (which is what uses region 0xf) will never be close to 64TB in size
+ * (it's 56 bytes per page of system memory).
  */
 
+#define CONTEXT_BITS           19
+#define ESID_BITS              18
+#define ESID_BITS_1T           6
+
+/*
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
+ * available for user + kernel mapping. The top 4 contexts are used for
+ * kernel mapping. Each segment contains 2^28 bytes. Each
+ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+ * (19 == 37 + 28 - 46).
+ */
+#define MAX_USER_CONTEXT       ((ASM_CONST(1) << CONTEXT_BITS) - 5)
+
 /*
  * This should be computed such that protovosid * vsid_mulitplier
  * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
  */
 #define VSID_MULTIPLIER_256M   ASM_CONST(12538073)     /* 24-bit prime */
-#define VSID_BITS_256M         38
+#define VSID_BITS_256M         (CONTEXT_BITS + ESID_BITS)
 #define VSID_MODULUS_256M      ((1UL<<VSID_BITS_256M)-1)
 
 #define VSID_MULTIPLIER_1T     ASM_CONST(12538073)     /* 24-bit prime */
-#define VSID_BITS_1T           26
+#define VSID_BITS_1T           (CONTEXT_BITS + ESID_BITS_1T)
 #define VSID_MODULUS_1T                ((1UL<<VSID_BITS_1T)-1)
 
-#define CONTEXT_BITS           19
-#define USER_ESID_BITS         18
-#define USER_ESID_BITS_1T      6
 
-#define USER_VSID_RANGE        (1UL << (USER_ESID_BITS + SID_SHIFT))
+#define USER_VSID_RANGE        (1UL << (ESID_BITS + SID_SHIFT))
 
 /*
  * This macro generates asm code to compute the VSID scramble
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size);
        srdi    rx,rt,VSID_BITS_##size;                                 \
        clrldi  rt,rt,(64-VSID_BITS_##size);                            \
        add     rt,rt,rx;               /* add high and low bits */     \
-       /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and         \
+       /* NOTE: explanation based on VSID_BITS_##size = 36             \
+        * Now, r3 == VSID (mod 2^36-1), and lies between 0 and         \
         * 2^36-1+2^28-1.  That in particular means that if r3 >=       \
         * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has     \
         * the bit clear, r3 already has the answer we want, if it      \
@@ -513,34 +521,6 @@ typedef struct {
        })
 #endif /* 1 */
 
-/*
- * This is only valid for addresses >= PAGE_OFFSET
- * The proto-VSID space is divided into two class
- * User:   0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
- * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
- *
- * With KERNEL_START at 0xc000000000000000, the proto vsid for
- * the kernel ends up with 0xc00000000 (36 bits). With 64TB
- * support we need to have kernel proto-VSID in the
- * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
- */
-static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
-{
-       unsigned long proto_vsid;
-       /*
-        * We need to make sure proto_vsid for the kernel is
-        * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
-        */
-       if (ssize == MMU_SEGSIZE_256M) {
-               proto_vsid = ea >> SID_SHIFT;
-               proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
-               return vsid_scramble(proto_vsid, 256M);
-       }
-       proto_vsid = ea >> SID_SHIFT_1T;
-       proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
-       return vsid_scramble(proto_vsid, 1T);
-}
-
 /* Returns the segment size indicator for a user address */
 static inline int user_segment_size(unsigned long addr)
 {
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr)
        return MMU_SEGSIZE_256M;
 }
 
-/* This is only valid for user addresses (which are below 2^44) */
 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
                                     int ssize)
 {
+       /*
+        * Bad address. We return VSID 0 for that
+        */
+       if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
+               return 0;
+
        if (ssize == MMU_SEGSIZE_256M)
-               return vsid_scramble((context << USER_ESID_BITS)
+               return vsid_scramble((context << ESID_BITS)
                                     | (ea >> SID_SHIFT), 256M);
-       return vsid_scramble((context << USER_ESID_BITS_1T)
+       return vsid_scramble((context << ESID_BITS_1T)
                             | (ea >> SID_SHIFT_1T), 1T);
 }
 
+/*
+ * This is only valid for addresses >= PAGE_OFFSET
+ *
+ * For kernel space, we use the top 4 context ids to map address as below
+ * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
+ * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
+ * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
+ * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ */
+static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
+{
+       unsigned long context;
+
+       /*
+        * kernel take the top 4 context from the available range
+        */
+       context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
+       return get_vsid(context, ea, ssize);
+}
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
index 75a3d71b895d985e20307bdd90dda88e4449ed9c..19599ef352bc89ddb8039a65cf22419d82c25c9f 100644 (file)
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .cpu_features           = CPU_FTRS_PPC970,
                .cpu_user_features      = COMMON_USER_POWER4 |
                        PPC_FEATURE_HAS_ALTIVEC_COMP,
-               .mmu_features           = MMU_FTR_HPTE_TABLE,
+               .mmu_features           = MMU_FTRS_PPC970,
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
                .num_pmcs               = 8,
index f3eab8594d9f873432deb94c770db354f20ffc76..d44a571e45a79dae9b7976c7e281c93021b2847e 100644 (file)
 #include <asm/code-patching.h>
 #include <asm/machdep.h>
 
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
 extern void epapr_ev_idle(void);
 extern u32 epapr_ev_idle_start[];
+#endif
 
 bool epapr_paravirt_enabled;
 
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
 
        for (i = 0; i < (len / 4); i++) {
                patch_instruction(epapr_hypercall_start + i, insts[i]);
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
                patch_instruction(epapr_ev_idle_start + i, insts[i]);
+#endif
        }
 
+#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
        if (of_get_property(hyper_node, "has-idle", NULL))
                ppc_md.power_save = epapr_ev_idle;
+#endif
 
        epapr_paravirt_enabled = true;
 
index 87ef8f5ee5bc52242af82aec893b274a6a8ec47d..56bd92362ce131e8742f33d56ff8613e05bcced0 100644 (file)
@@ -1066,78 +1066,6 @@ unrecov_user_slb:
 #endif /* __DISABLED__ */
 
 
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
-       mflr    r10
-#ifdef CONFIG_RELOCATABLE
-       mtctr   r11
-#endif
-
-       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
-       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
-
-       bl      .slb_allocate_realmode
-
-       /* All done -- return from exception. */
-
-       ld      r10,PACA_EXSLB+EX_LR(r13)
-       ld      r3,PACA_EXSLB+EX_R3(r13)
-       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
-
-       mtlr    r10
-
-       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
-       beq-    2f
-
-.machine       push
-.machine       "power4"
-       mtcrf   0x80,r9
-       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
-.machine       pop
-
-       RESTORE_PPR_PACA(PACA_EXSLB, r9)
-       ld      r9,PACA_EXSLB+EX_R9(r13)
-       ld      r10,PACA_EXSLB+EX_R10(r13)
-       ld      r11,PACA_EXSLB+EX_R11(r13)
-       ld      r12,PACA_EXSLB+EX_R12(r13)
-       ld      r13,PACA_EXSLB+EX_R13(r13)
-       rfid
-       b       .       /* prevent speculative execution */
-
-2:     mfspr   r11,SPRN_SRR0
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10,unrecov_slb)
-       mtspr   SPRN_SRR0,r10
-       ld      r10,PACAKMSR(r13)
-       mtspr   SPRN_SRR1,r10
-       rfid
-       b       .
-
-unrecov_slb:
-       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
-       DISABLE_INTS
-       bl      .save_nvgprs
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      .unrecoverable_exception
-       b       1b
-
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
-       andc    r9,r9,r10
-       std     r9,TI_LOCAL_FLAGS(r11)
-       ld      r10,_LINK(r1)           /* make idle task do the */
-       std     r10,_NIP(r1)            /* equivalent of a blr */
-       blr
-#endif
-
        .align  7
        .globl alignment_common
 alignment_common:
@@ -1335,6 +1263,78 @@ _GLOBAL(opal_mc_secondary_handler)
 #endif /* CONFIG_PPC_POWERNV */
 
 
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+       mflr    r10
+#ifdef CONFIG_RELOCATABLE
+       mtctr   r11
+#endif
+
+       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
+       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
+
+       bl      .slb_allocate_realmode
+
+       /* All done -- return from exception. */
+
+       ld      r10,PACA_EXSLB+EX_LR(r13)
+       ld      r3,PACA_EXSLB+EX_R3(r13)
+       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
+
+       mtlr    r10
+
+       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
+       beq-    2f
+
+.machine       push
+.machine       "power4"
+       mtcrf   0x80,r9
+       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
+.machine       pop
+
+       RESTORE_PPR_PACA(PACA_EXSLB, r9)
+       ld      r9,PACA_EXSLB+EX_R9(r13)
+       ld      r10,PACA_EXSLB+EX_R10(r13)
+       ld      r11,PACA_EXSLB+EX_R11(r13)
+       ld      r12,PACA_EXSLB+EX_R12(r13)
+       ld      r13,PACA_EXSLB+EX_R13(r13)
+       rfid
+       b       .       /* prevent speculative execution */
+
+2:     mfspr   r11,SPRN_SRR0
+       ld      r10,PACAKBASE(r13)
+       LOAD_HANDLER(r10,unrecov_slb)
+       mtspr   SPRN_SRR0,r10
+       ld      r10,PACAKMSR(r13)
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .
+
+unrecov_slb:
+       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+       DISABLE_INTS
+       bl      .save_nvgprs
+1:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      .unrecoverable_exception
+       b       1b
+
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+       andc    r9,r9,r10
+       std     r9,TI_LOCAL_FLAGS(r11)
+       ld      r10,_LINK(r1)           /* make idle task do the */
+       std     r10,_NIP(r1)            /* equivalent of a blr */
+       blr
+#endif
+
 /*
  * Hash table stuff
  */
@@ -1452,20 +1452,36 @@ do_ste_alloc:
 _GLOBAL(do_stab_bolted)
        stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
        std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
+       mfspr   r11,SPRN_DAR                    /* ea */
 
+       /*
+        * check for bad kernel/user address
+        * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+        */
+       rldicr. r9,r11,4,(63 - 46 - 4)
+       li      r9,0    /* VSID = 0 for bad address */
+       bne-    0f
+
+       /*
+        * Calculate VSID:
+        * This is the kernel vsid, we take the top for context from
+        * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+        * Here we know that (ea >> 60) == 0xc
+        */
+       lis     r9,(MAX_USER_CONTEXT + 1)@ha
+       addi    r9,r9,(MAX_USER_CONTEXT + 1)@l
+
+       srdi    r10,r11,SID_SHIFT
+       rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
+       ASM_VSID_SCRAMBLE(r10, r9, 256M)
+       rldic   r9,r10,12,16    /* r9 = vsid << 12 */
+
+0:
        /* Hash to the primary group */
        ld      r10,PACASTABVIRT(r13)
-       mfspr   r11,SPRN_DAR
-       srdi    r11,r11,28
+       srdi    r11,r11,SID_SHIFT
        rldimi  r10,r11,7,52    /* r10 = first ste of the group */
 
-       /* Calculate VSID */
-       /* This is a kernel address, so protovsid = ESID | 1 << 37 */
-       li      r9,0x1
-       rldimi  r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
-       ASM_VSID_SCRAMBLE(r11, r9, 256M)
-       rldic   r9,r11,12,16    /* r9 = vsid << 12 */
-
        /* Search the primary group for a free entry */
 1:     ld      r11,0(r10)      /* Test valid bit of the current ste    */
        andi.   r11,r11,0x80
index 7f7fb7fd991bd4e6185937faf1864499433b0eb6..13f8d168b3f1e467599248c92457c442b456b7ee 100644 (file)
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void)
 {
 }
 #else
-static void __reloc_toc(void *tocstart, unsigned long offset,
-                       unsigned long nr_entries)
+static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
 {
        unsigned long i;
-       unsigned long *toc_entry = (unsigned long *)tocstart;
+       unsigned long *toc_entry;
+
+       /* Get the start of the TOC by using r2 directly. */
+       asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
 
        for (i = 0; i < nr_entries; i++) {
                *toc_entry = *toc_entry + offset;
@@ -2850,8 +2852,7 @@ static void reloc_toc(void)
        unsigned long nr_entries =
                (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
 
-       /* Need to add offset to get at __prom_init_toc_start */
-       __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
+       __reloc_toc(offset, nr_entries);
 
        mb();
 }
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void)
 
        mb();
 
-       /* __prom_init_toc_start has been relocated, no need to add offset */
-       __reloc_toc(__prom_init_toc_start, -offset, nr_entries);
+       __reloc_toc(-offset, nr_entries);
 }
 #endif
 #endif
index 245c1b6a08589d216d3389308c565668b720b597..f9b30c68ba473a7f51607936da0e124507f40362 100644 (file)
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
 
        brk.address = bp_info->addr & ~7UL;
        brk.type = HW_BRK_TYPE_TRANSLATE;
+       brk.len = 8;
        if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
                brk.type |= HW_BRK_TYPE_READ;
        if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
index ead58e3172945d81e863ba937cc1d88b93922558..5d7d29a313eb99d8581f7bf2e515421020628fbe 100644 (file)
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
        vcpu3s->context_id[0] = err;
 
        vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
-                                 << USER_ESID_BITS) - 1;
-       vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+                                 << ESID_BITS) - 1;
+       vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
        vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
 
        kvmppc_mmu_hpte_init(vcpu);
index 1b6e1271719f994e1d16b21621851733658bffa5..f410c3e12c1e78c0a6c5e24ad9f41d86fdf01337 100644 (file)
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
                unsigned long vpn  = hpt_vpn(vaddr, vsid, ssize);
                unsigned long tprot = prot;
 
+               /*
+                * If we hit a bad address return error.
+                */
+               if (!vsid)
+                       return -1;
                /* Make kernel text executable */
                if (overlaps_kernel_text(vaddr, vaddr + step))
                        tprot &= ~HPTE_R_N;
@@ -759,6 +764,8 @@ void __init early_init_mmu(void)
        /* Initialize stab / SLB management */
        if (mmu_has_feature(MMU_FTR_SLB))
                slb_initialize();
+       else
+               stab_initialize(get_paca()->stab_real);
 }
 
 #ifdef CONFIG_SMP
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
                ea, access, trap);
 
-       if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
-               DBG_LOW(" out of pgtable range !\n");
-               return 1;
-       }
-
        /* Get region & vsid */
        switch (REGION_ID(ea)) {
        case USER_REGION_ID:
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
        }
        DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
 
+       /* Bad address. */
+       if (!vsid) {
+               DBG_LOW("Bad address!\n");
+               return 1;
+       }
        /* Get pgdir */
        pgdir = mm->pgd;
        if (pgdir == NULL)
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
        /* Get VSID */
        ssize = user_segment_size(ea);
        vsid = get_vsid(mm->context.id, ea, ssize);
+       if (!vsid)
+               return;
 
        /* Hash doesn't like irqs */
        local_irq_save(flags);
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
        hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
        hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
 
+       /* Don't create HPTE entries for bad address */
+       if (!vsid)
+               return;
        ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
                                 mode, HPTE_V_BOLTED,
                                 mmu_linear_psize, mmu_kernel_ssize);
index 40bc5b0ace54354aea6944d77b6ef07e6f7f7b3d..d1d1b92c5b9964c3363f84a1719f0759507ba41d 100644 (file)
 static DEFINE_SPINLOCK(mmu_context_lock);
 static DEFINE_IDA(mmu_context_ida);
 
-/*
- * 256MB segment
- * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
- * available for user mappings. Each segment contains 2^28 bytes. Each
- * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
- * (19 == 37 + 28 - 46).
- */
-#define MAX_CONTEXT    ((1UL << CONTEXT_BITS) - 1)
-
 int __init_new_context(void)
 {
        int index;
@@ -56,7 +47,7 @@ again:
        else if (err)
                return err;
 
-       if (index > MAX_CONTEXT) {
+       if (index > MAX_USER_CONTEXT) {
                spin_lock(&mmu_context_lock);
                ida_remove(&mmu_context_ida, index);
                spin_unlock(&mmu_context_lock);
index e212a271c7a4bdadac512efd24da89de6c9ddd77..654258f165aeb9496ae96131c0f9b383927e8d20 100644 (file)
@@ -61,7 +61,7 @@
 #endif
 
 #ifdef CONFIG_PPC_STD_MMU_64
-#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
 #error TASK_SIZE_USER64 exceeds user VSID range
 #endif
 #endif
index 1a16ca2277575fef9c0614df84f6e4f93df376f1..17aa6dfceb3498cbb0a09302485bdf1ab8049cbf 100644 (file)
  * No other registers are examined or changed.
  */
 _GLOBAL(slb_allocate_realmode)
-       /* r3 = faulting address */
+       /*
+        * check for bad kernel/user address
+        * (ea & ~REGION_MASK) >= PGTABLE_RANGE
+        */
+       rldicr. r9,r3,4,(63 - 46 - 4)
+       bne-    8f
 
        srdi    r9,r3,60                /* get region */
-       srdi    r10,r3,28               /* get esid */
+       srdi    r10,r3,SID_SHIFT        /* get esid */
        cmpldi  cr7,r9,0xc              /* cmp PAGE_OFFSET for later use */
 
        /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
         */
 _GLOBAL(slb_miss_kernel_load_linear)
        li      r11,0
-       li      r9,0x1
        /*
-        * for 1T we shift 12 bits more.  slb_finish_load_1T will do
-        * the necessary adjustment
+        * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+        * r9 = region id.
         */
-       rldimi  r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+       addis   r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+       addi    r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
+
 BEGIN_FTR_SECTION
        b       slb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
        _GLOBAL(slb_miss_kernel_load_io)
        li      r11,0
 6:
-       li      r9,0x1
        /*
-        * for 1T we shift 12 bits more.  slb_finish_load_1T will do
-        * the necessary adjustment
+        * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
+        * r9 = region id.
         */
-       rldimi  r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
+       addis   r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
+       addi    r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
+
 BEGIN_FTR_SECTION
        b       slb_finish_load
 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
        b       slb_finish_load_1T
 
-0:     /* user address: proto-VSID = context << 15 | ESID. First check
-        * if the address is within the boundaries of the user region
-        */
-       srdi.   r9,r10,USER_ESID_BITS
-       bne-    8f                      /* invalid ea bits set */
-
-
+0:
        /* when using slices, we extract the psize off the slice bitmaps
         * and then we need to get the sllp encoding off the mmu_psize_defs
         * array.
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
        ld      r9,PACACONTEXTID(r13)
 BEGIN_FTR_SECTION
        cmpldi  r10,0x1000
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       rldimi  r10,r9,USER_ESID_BITS,0
-BEGIN_FTR_SECTION
        bge     slb_finish_load_1T
 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        b       slb_finish_load
 
 8:     /* invalid EA */
        li      r10,0                   /* BAD_VSID */
+       li      r9,0                    /* BAD_VSID */
        li      r11,SLB_VSID_USER       /* flags don't much matter */
        b       slb_finish_load
 
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
 
        /* get context to calculate proto-VSID */
        ld      r9,PACACONTEXTID(r13)
-       rldimi  r10,r9,USER_ESID_BITS,0
-
        /* fall through slb_finish_load */
 
 #endif /* __DISABLED__ */
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
 /*
  * Finish loading of an SLB entry and return
  *
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
+ * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  */
 slb_finish_load:
+       rldimi  r10,r9,ESID_BITS,0
        ASM_VSID_SCRAMBLE(r10,r9,256M)
        /*
         * bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
 /*
  * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
  *
- * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
+ * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
  */
 slb_finish_load_1T:
-       srdi    r10,r10,40-28           /* get 1T ESID */
+       srdi    r10,r10,(SID_SHIFT_1T - SID_SHIFT)      /* get 1T ESID */
+       rldimi  r10,r9,ESID_BITS_1T,0
        ASM_VSID_SCRAMBLE(r10,r9,1T)
        /*
         * bits above VSID_BITS_1T need to be ignored from r10
index 0d82ef50dc3faf9269042bb09001320fbdacdaad..023ec8a13f38eed82e45fe37fc0f6e4be9518b83 100644 (file)
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
        if (!is_kernel_addr(addr)) {
                ssize = user_segment_size(addr);
                vsid = get_vsid(mm->context.id, addr, ssize);
-               WARN_ON(vsid == 0);
        } else {
                vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
                ssize = mmu_kernel_ssize;
        }
+       WARN_ON(vsid == 0);
        vpn = hpt_vpn(addr, vsid, ssize);
        rpte = __real_pte(__pte(pte), ptep);
 
index b554879bd31e4c48cc6769092695c7f32a899af4..3c475d6267c75b0db98584b0f4c7cd0379fd19d1 100644 (file)
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
        .attrs = power7_events_attr,
 };
 
+PMU_FORMAT_ATTR(event, "config:0-19");
+
+static struct attribute *power7_pmu_format_attr[] = {
+       &format_attr_event.attr,
+       NULL,
+};
+
+struct attribute_group power7_pmu_format_group = {
+       .name = "format",
+       .attrs = power7_pmu_format_attr,
+};
+
 static const struct attribute_group *power7_pmu_attr_groups[] = {
+       &power7_pmu_format_group,
        &power7_pmu_events_group,
        NULL,
 };
index 611e92f291c428765378140dbf5dc06890c44973..7179726ba5c5fdb521afa9a78509eefca7695131 100644 (file)
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data)
         return IRQ_HANDLED;
 };
 
-static int __devinit gpio_halt_probe(struct platform_device *pdev)
+static int gpio_halt_probe(struct platform_device *pdev)
 {
        enum of_gpio_flags flags;
        struct device_node *node = pdev->dev.of_node;
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __devexit gpio_halt_remove(struct platform_device *pdev)
+static int gpio_halt_remove(struct platform_device *pdev)
 {
        if (halt_node) {
                int gpio = of_get_gpio(halt_node, 0);
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = {
                .of_match_table = gpio_halt_match,
        },
        .probe          = gpio_halt_probe,
-       .remove         = __devexit_p(gpio_halt_remove),
+       .remove         = gpio_halt_remove,
 };
 
 module_platform_driver(gpio_halt_driver);
index cea2f09c42418d101c78b445b18c9286a714b53b..18e3b76c78d7ca3d8372ccda9808a5b122deb78d 100644 (file)
@@ -124,9 +124,8 @@ config 6xx
        select PPC_HAVE_PMU_SUPPORT
 
 config POWER3
-       bool
        depends on PPC64 && PPC_BOOK3S
-       default y if !POWER4_ONLY
+       def_bool y
 
 config POWER4
        depends on PPC64 && PPC_BOOK3S
@@ -145,8 +144,7 @@ config TUNE_CELL
          but somewhat slower on other machines. This option only changes
          the scheduling of instructions, not the selection of instructions
          itself, so the resulting kernel will keep running on all other
-         machines. When building a kernel that is supposed to run only
-         on Cell, you should also select the POWER4_ONLY option.
+         machines.
 
 # this is temp to handle compat with arch=ppc
 config 8xx
index 8d4847191ecc52121cbe2afaa04fff7e2c6d2ad8..dc9200ca32eda4d3e5a819025b176f7af5a06d60 100644 (file)
@@ -34,6 +34,8 @@ struct arsb {
        u32 reserved[4];
 } __packed;
 
+#define EQC_WR_PROHIBIT 22
+
 struct msb {
        u8 fmt:4;
        u8 oc:4;
@@ -96,11 +98,13 @@ struct scm_device {
 #define OP_STATE_TEMP_ERR      2
 #define OP_STATE_PERM_ERR      3
 
+enum scm_event {SCM_CHANGE, SCM_AVAIL};
+
 struct scm_driver {
        struct device_driver drv;
        int (*probe) (struct scm_device *scmdev);
        int (*remove) (struct scm_device *scmdev);
-       void (*notify) (struct scm_device *scmdev);
+       void (*notify) (struct scm_device *scmdev, enum scm_event event);
        void (*handler) (struct scm_device *scmdev, void *data, int error);
 };
 
index 1d8fe2b17ef6f1d0069c0e3cc8ff70c7d9cc402a..6b32af30878cc6276c57079aeefcda38ea6ea685 100644 (file)
@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
 
 static inline void __tlb_flush_mm(struct mm_struct * mm)
 {
-       if (unlikely(cpumask_empty(mm_cpumask(mm))))
-               return;
        /*
         * If the machine has IDTE we prefer to do a per mm flush
         * on all cpus instead of doing a local flush if the mm
index 55022852326748599c4a8f4a74e77468e3d1f5a3..94feff7d613233dca1ad45702181f161bb425caf 100644 (file)
@@ -636,7 +636,8 @@ ENTRY(mcck_int_handler)
        UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
 mcck_skip:
        SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
-       mvc     __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA
+       stm     %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
        stm     %r8,%r9,__PT_PSW(%r11)
        xc      __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
        l       %r1,BASED(.Ldo_machine_check)
index 9c837c101297a52536c726d2e6b2e2c2e9c79ce9..2e6d60c55f909259803a1b52d7fdc6d04277fd0b 100644 (file)
@@ -678,8 +678,9 @@ ENTRY(mcck_int_handler)
        UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
        LAST_BREAK %r14
 mcck_skip:
-       lghi    %r14,__LC_GPREGS_SAVE_AREA
-       mvc     __PT_R0(128,%r11),0(%r14)
+       lghi    %r14,__LC_GPREGS_SAVE_AREA+64
+       stmg    %r0,%r7,__PT_R0(%r11)
+       mvc     __PT_R8(64,%r11),0(%r14)
        stmg    %r8,%r9,__PT_PSW(%r11)
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
        lgr     %r2,%r11                # pass pointer to pt_regs
index a5360de85ec7e7eb190f64de6028d08ea2e2c68e..29268859d8eebc405bc013fee0a2d05ee86d71b8 100644 (file)
@@ -571,6 +571,8 @@ static void __init setup_memory_end(void)
 
        /* Split remaining virtual space between 1:1 mapping & vmemmap array */
        tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
+       /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
+       tmp = SECTION_ALIGN_UP(tmp);
        tmp = VMALLOC_START - tmp * sizeof(struct page);
        tmp &= ~((vmax >> 11) - 1);     /* align to page table level */
        tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
index 289127d5241ce7a2ee646e8b8211849ae1cc2eda..3d361f236308c13cde14286379ec418af3e52990 100644 (file)
@@ -84,12 +84,6 @@ config ARCH_DEFCONFIG
        default "arch/sparc/configs/sparc32_defconfig" if SPARC32
        default "arch/sparc/configs/sparc64_defconfig" if SPARC64
 
-# CONFIG_BITS can be used at source level to get 32/64 bits
-config BITS
-       int
-       default 32 if SPARC32
-       default 64 if SPARC64
-
 config IOMMU_HELPER
        bool
        default y if SPARC64
@@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM
 
 config GENERIC_HWEIGHT
        bool
-       default y if !ULTRA_HAS_POPULATION_COUNT
+       default y
 
 config GENERIC_CALIBRATE_DELAY
        bool
index d06a266017534f406b41d0488e0c00eb296d0156..6b67e50fb9b4cf934dade73c9d10dfeec6afe364 100644 (file)
@@ -45,6 +45,7 @@
 #define SUN4V_CHIP_NIAGARA3    0x03
 #define SUN4V_CHIP_NIAGARA4    0x04
 #define SUN4V_CHIP_NIAGARA5    0x05
+#define SUN4V_CHIP_SPARC64X    0x8a
 #define SUN4V_CHIP_UNKNOWN     0xff
 
 #ifndef __ASSEMBLY__
index a6c94a2bf9d4b1150e41b63d3e71a81138fc1f63..5c5125895db86e4dacd9de9dee91b508628b30c2 100644 (file)
@@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "niagara5";
                break;
 
+       case SUN4V_CHIP_SPARC64X:
+               sparc_cpu_type = "SPARC64-X";
+               sparc_fpu_type = "SPARC64-X integrated FPU";
+               sparc_pmu_type = "sparc64-x";
+               break;
+
        default:
                printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
                       prom_cpu_compatible);
index 2feb15c35d9e1bb0154d1f023accccca3a8ee460..26b706a1867dc6b9976b52e1e61ef8e54ce0df91 100644 (file)
@@ -134,6 +134,8 @@ prom_niagara_prefix:
        .asciz  "SUNW,UltraSPARC-T"
 prom_sparc_prefix:
        .asciz  "SPARC-"
+prom_sparc64x_prefix:
+       .asciz  "SPARC64-X"
        .align  4
 prom_root_compatible:
        .skip   64
@@ -412,7 +414,7 @@ sun4v_chip_type:
        cmp     %g2, 'T'
        be,pt   %xcc, 70f
         cmp    %g2, 'M'
-       bne,pn  %xcc, 4f
+       bne,pn  %xcc, 49f
         nop
 
 70:    ldub    [%g1 + 7], %g2
@@ -425,7 +427,7 @@ sun4v_chip_type:
        cmp     %g2, '5'
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_NIAGARA5, %g4
-       ba,pt   %xcc, 4f
+       ba,pt   %xcc, 49f
         nop
 
 91:    sethi   %hi(prom_cpu_compatible), %g1
@@ -439,6 +441,25 @@ sun4v_chip_type:
         mov    SUN4V_CHIP_NIAGARA2, %g4
        
 4:
+       /* Athena */
+       sethi   %hi(prom_cpu_compatible), %g1
+       or      %g1, %lo(prom_cpu_compatible), %g1
+       sethi   %hi(prom_sparc64x_prefix), %g7
+       or      %g7, %lo(prom_sparc64x_prefix), %g7
+       mov     9, %g3
+41:    ldub    [%g7], %g2
+       ldub    [%g1], %g4
+       cmp     %g2, %g4
+       bne,pn  %icc, 49f
+       add     %g7, 1, %g7
+       subcc   %g3, 1, %g3
+       bne,pt  %xcc, 41b
+       add     %g1, 1, %g1
+       mov     SUN4V_CHIP_SPARC64X, %g4
+       ba,pt   %xcc, 5f
+       nop
+
+49:
        mov     SUN4V_CHIP_UNKNOWN, %g4
 5:     sethi   %hi(sun4v_chip_type), %g2
        or      %g2, %lo(sun4v_chip_type), %g2
index fc4320886a3aa5d15f817075cfca261487d21438..4d1487138d260553005af8036734ef519395dd22 100644 (file)
@@ -186,6 +186,8 @@ struct grpci2_cap_first {
 #define CAP9_IOMAP_OFS 0x20
 #define CAP9_BARSIZE_OFS 0x24
 
+#define TGT 256
+
 struct grpci2_priv {
        struct leon_pci_info    info; /* must be on top of this structure */
        struct grpci2_regs      *regs;
@@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
        if (where & 0x3)
                return -EINVAL;
 
-       if (bus == 0 && PCI_SLOT(devfn) != 0)
-               devfn += (0x8 * 6);
+       if (bus == 0) {
+               devfn += (0x8 * 6); /* start at AD16=Device0 */
+       } else if (bus == TGT) {
+               bus = 0;
+               devfn = 0; /* special case: bridge controller itself */
+       }
 
        /* Select bus */
        spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
        if (where & 0x3)
                return -EINVAL;
 
-       if (bus == 0 && PCI_SLOT(devfn) != 0)
-               devfn += (0x8 * 6);
+       if (bus == 0) {
+               devfn += (0x8 * 6); /* start at AD16=Device0 */
+       } else if (bus == TGT) {
+               bus = 0;
+               devfn = 0; /* special case: bridge controller itself */
+       }
 
        /* Select bus */
        spin_lock_irqsave(&grpci2_dev_lock, flags);
@@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
        unsigned int busno = bus->number;
        int ret;
 
-       if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
+       if (PCI_SLOT(devfn) > 15 || busno > 255) {
                *val = ~0;
                return 0;
        }
@@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
        struct grpci2_priv *priv = grpci2priv;
        unsigned int busno = bus->number;
 
-       if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
+       if (PCI_SLOT(devfn) > 15 || busno > 255)
                return 0;
 
 #ifdef GRPCI2_DEBUG_CFGACCESS
@@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv)
                REGSTORE(regs->ahbmst_map[i], priv->pci_area);
 
        /* Get the GRPCI2 Host PCI ID */
-       grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
+       grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid);
 
        /* Get address to first (always defined) capability structure */
-       grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
+       grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr);
 
        /* Enable/Disable Byte twisting */
-       grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
+       grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map);
        io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
-       grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
+       grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map);
 
        /* Setup the Host's PCI Target BARs for other peripherals to access,
         * and do DMA to the host's memory. The target BARs can be sized and
@@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv)
                                pciadr = 0;
                        }
                }
-               grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
-               grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
-               grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
+               grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4,
+                               bar_sz);
+               grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
+               grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
                printk(KERN_INFO "        TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
                        i, pciadr, ahbadr);
        }
 
        /* set as bus master and enable pci memory responses */
-       grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
+       grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data);
        data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
-       grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
+       grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data);
 
        /* Enable Error respone (CPU-TRAP) on illegal memory access. */
        REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
index 8c5eff6d6df5577ea987d01cbf79cde62cab3506..47684815e5c8a27bf51fd8854ca11ae7cb2a07f8 100644 (file)
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
 CONFIG_MD_RAID1=m
 CONFIG_MD_RAID10=m
 CONFIG_MD_RAID456=m
-CONFIG_MULTICORE_RAID456=y
 CONFIG_MD_FAULTY=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_DEBUG=y
index e7a3dfcbcda7094ef4c7fa818c650716d2103397..dd2b8f0c631fc4a1ffb047d322753d3b4cef95b4 100644 (file)
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
 CONFIG_MD_RAID1=m
 CONFIG_MD_RAID10=m
 CONFIG_MD_RAID456=m
-CONFIG_MULTICORE_RAID456=y
 CONFIG_MD_FAULTY=m
 CONFIG_BLK_DEV_DM=m
 CONFIG_DM_DEBUG=y
index d3ddd17405d07b1e288828a949ac9ecb363c3c2e..5a6d2873f80eb61e878ded58ff289cf59adc0341 100644 (file)
@@ -77,6 +77,7 @@ struct arch_specific_insn {
         * a post_handler or break_handler).
         */
        int boostable;
+       bool if_modifier;
 };
 
 struct arch_optimized_insn {
index 635a74d224098523c2fcdee0f26a71618f96bdd8..4979778cc7fb51a14f5fa5960eb02b36d5c235f9 100644 (file)
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
        gpa_t time;
        struct pvclock_vcpu_time_info hv_clock;
        unsigned int hw_tsc_khz;
-       unsigned int time_offset;
-       struct page *time_page;
+       struct gfn_to_hva_cache pv_time;
+       bool pv_time_enabled;
        /* set guest stopped flag in pvclock flags field */
        bool pvclock_set_guest_stopped_request;
 
index c20d1ce62dc6a0a236280ab51b7ac8386fb1c32b..e709884d0ef9edbdd0b7228f180814bef1c0b174 100644 (file)
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
        return _hypercall3(int, console_io, cmd, count, str);
 }
 
-extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
+extern int __must_check xen_physdev_op_compat(int, void *);
 
 static inline int
 HYPERVISOR_physdev_op(int cmd, void *arg)
 {
        int rc = _hypercall2(int, physdev_op, cmd, arg);
        if (unlikely(rc == -ENOSYS))
-               rc = HYPERVISOR_physdev_op_compat(cmd, arg);
+               rc = xen_physdev_op_compat(cmd, arg);
        return rc;
 }
 
index 892ce40a7470515543e09d80115efae395717b0b..7a060f4b411f753ed065c389b705e7b2a010ab7b 100644 (file)
@@ -44,6 +44,7 @@
 #define SNB_C1_AUTO_UNDEMOTE           (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE           (1UL << 28)
 
+#define MSR_PLATFORM_INFO              0x000000ce
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
index 529c8931fc029faffb4f4437ef74dd4fedb538f3..dab7580c47aee2e71e501afbfa94c17e2790777d 100644 (file)
@@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
+       INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
+       INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
+       INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
+       INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
        INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
index 3f06e61499814ee368ddcf6ec41a690cdfbea3c5..7bfe318d3d8a4a50b07bd8c02865c9f4af3496d7 100644 (file)
@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
        else
                p->ainsn.boostable = -1;
 
+       /* Check whether the instruction modifies Interrupt Flag or not */
+       p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
+
        /* Also, displacement change doesn't affect the first byte */
        p->opcode = p->ainsn.insn[0];
 }
@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
        __this_cpu_write(current_kprobe, p);
        kcb->kprobe_saved_flags = kcb->kprobe_old_flags
                = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
-       if (is_IF_modifier(p->ainsn.insn))
+       if (p->ainsn.if_modifier)
                kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
 }
 
index 7890bc8389524d4a1e934e23b041ab1010abf4f7..d893e8ed8ac96559b2175b00c3d68d71658a4105 100644 (file)
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
        struct microcode_intel ***mc_saved;
 
        mc_saved = (struct microcode_intel ***)
-                  __pa_symbol(&mc_saved_data->mc_saved);
+                  __pa_nodebug(&mc_saved_data->mc_saved);
        for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
                struct microcode_intel *p;
 
                p = *(struct microcode_intel **)
-                       __pa(mc_saved_data->mc_saved + i);
-               mc_saved_tmp[i] = (struct microcode_intel *)__pa(p);
+                       __pa_nodebug(mc_saved_data->mc_saved + i);
+               mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
        }
 }
 #endif
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
        struct cpio_data cd;
        long offset = 0;
 #ifdef CONFIG_X86_32
-       char *p = (char *)__pa_symbol(ucode_name);
+       char *p = (char *)__pa_nodebug(ucode_name);
 #else
        char *p = ucode_name;
 #endif
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
        if (mc_intel == NULL)
                return;
 
-       delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info);
-       current_mc_date_p = (int *)__pa_symbol(&current_mc_date);
+       delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
+       current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
 
        *delay_ucode_info_p = 1;
        *current_mc_date_p = mc_intel->hdr.date;
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
 }
 #endif
 
-static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
-                                struct ucode_cpu_info *uci)
+static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
+                                          struct ucode_cpu_info *uci)
 {
        struct microcode_intel *mc_intel;
        unsigned int val[2];
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
 #ifdef CONFIG_X86_32
        struct boot_params *boot_params_p;
 
-       boot_params_p = (struct boot_params *)__pa_symbol(&boot_params);
+       boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
        ramdisk_image = boot_params_p->hdr.ramdisk_image;
        ramdisk_size  = boot_params_p->hdr.ramdisk_size;
        initrd_start_early = ramdisk_image;
        initrd_end_early = initrd_start_early + ramdisk_size;
 
        _load_ucode_intel_bsp(
-               (struct mc_saved_data *)__pa_symbol(&mc_saved_data),
-               (unsigned long *)__pa_symbol(&mc_saved_in_initrd),
+               (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+               (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
                initrd_start_early, initrd_end_early, &uci);
 #else
        ramdisk_image = boot_params.hdr.ramdisk_image;
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
        unsigned long *initrd_start_p;
 
        mc_saved_in_initrd_p =
-               (unsigned long *)__pa_symbol(mc_saved_in_initrd);
-       mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data);
-       initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start);
-       initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p);
+               (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
+       mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
+       initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+       initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
 #else
        mc_saved_data_p = &mc_saved_data;
        mc_saved_in_initrd_p = mc_saved_in_initrd;
index f71500af1f813245bb12092665ac7dea3ba5f24f..f19ac0aca60d9379ea5c625e8e6f35024d7bd239 100644 (file)
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        unsigned long flags, this_tsc_khz;
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct kvm_arch *ka = &v->kvm->arch;
-       void *shared_kaddr;
        s64 kernel_ns, max_kernel_ns;
        u64 tsc_timestamp, host_tsc;
-       struct pvclock_vcpu_time_info *guest_hv_clock;
+       struct pvclock_vcpu_time_info guest_hv_clock;
        u8 pvclock_flags;
        bool use_master_clock;
 
        kernel_ns = 0;
        host_tsc = 0;
 
-       /* Keep irq disabled to prevent changes to the clock */
-       local_irq_save(flags);
-       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
-       if (unlikely(this_tsc_khz == 0)) {
-               local_irq_restore(flags);
-               kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
-               return 1;
-       }
-
        /*
         * If the host uses TSC clock, then passthrough TSC as stable
         * to the guest.
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                kernel_ns = ka->master_kernel_ns;
        }
        spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+       /* Keep irq disabled to prevent changes to the clock */
+       local_irq_save(flags);
+       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+       if (unlikely(this_tsc_khz == 0)) {
+               local_irq_restore(flags);
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+               return 1;
+       }
        if (!use_master_clock) {
                host_tsc = native_read_tsc();
                kernel_ns = get_kernel_ns();
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 
        local_irq_restore(flags);
 
-       if (!vcpu->time_page)
+       if (!vcpu->pv_time_enabled)
                return 0;
 
        /*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         */
        vcpu->hv_clock.version += 2;
 
-       shared_kaddr = kmap_atomic(vcpu->time_page);
-
-       guest_hv_clock = shared_kaddr + vcpu->time_offset;
+       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+               &guest_hv_clock, sizeof(guest_hv_clock))))
+               return 0;
 
        /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
-       pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+       pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
 
        if (vcpu->pvclock_set_guest_stopped_request) {
                pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 
        vcpu->hv_clock.flags = pvclock_flags;
 
-       memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
-              sizeof(vcpu->hv_clock));
-
-       kunmap_atomic(shared_kaddr);
-
-       mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock));
        return 0;
 }
 
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 
 static void kvmclock_reset(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.time_page) {
-               kvm_release_page_dirty(vcpu->arch.time_page);
-               vcpu->arch.time_page = NULL;
-       }
+       vcpu->arch.pv_time_enabled = false;
 }
 
 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        case MSR_KVM_SYSTEM_TIME_NEW:
        case MSR_KVM_SYSTEM_TIME: {
+               u64 gpa_offset;
                kvmclock_reset(vcpu);
 
                vcpu->arch.time = data;
@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!(data & 1))
                        break;
 
-               /* ...but clean it before doing the actual write */
-               vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+               gpa_offset = data & ~(PAGE_MASK | 1);
 
-               vcpu->arch.time_page =
-                               gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+               /* Check that the address is 32-byte aligned. */
+               if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+                       break;
 
-               if (is_error_page(vcpu->arch.time_page))
-                       vcpu->arch.time_page = NULL;
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+                    &vcpu->arch.pv_time, data & ~1ULL))
+                       vcpu->arch.pv_time_enabled = false;
+               else
+                       vcpu->arch.pv_time_enabled = true;
 
                break;
        }
@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
  */
 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->arch.time_page)
+       if (!vcpu->arch.pv_time_enabled)
                return -EINVAL;
        vcpu->arch.pvclock_set_guest_stopped_request = true;
        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                goto fail_free_wbinvd_dirty_mask;
 
        vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+       vcpu->arch.pv_time_enabled = false;
        kvm_async_pf_hash_reset(vcpu);
        kvm_pmu_init(vcpu);
 
index 05928aae911e0addaaaf1bd0fce5002ba3c9f96e..906fea3157919dcffabfe2bc4b23a1becb98aa7c 100644 (file)
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
        char c;
        unsigned zero_len;
 
-       for (; len; --len) {
+       for (; len; --len, to++) {
                if (__get_user_nocheck(c, from++, sizeof(char)))
                        break;
-               if (__put_user_nocheck(c, to++, sizeof(char)))
+               if (__put_user_nocheck(c, to, sizeof(char)))
                        break;
        }
 
index e8e34938c57d86be6ba133abb033fbd7616d8f9a..6afbb2ca9a0ac450f79c6628b7cd9cd7c2ceeccf 100644 (file)
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
        __xen_write_cr3(true, cr3);
 
        xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
-
-       pv_mmu_ops.write_cr3 = &xen_write_cr3;
 }
 #endif
 
@@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void)
 #endif
 
 #ifdef CONFIG_X86_64
+       pv_mmu_ops.write_cr3 = &xen_write_cr3;
        SetPagePinned(virt_to_page(level3_user_vsyscall));
 #endif
        xen_mark_init_mm_pinned();
index db8f1b5078570fe98afb8dd748636060511ba683..cc2b827a853cdea378f1c54edeadd65546b33651 100644 (file)
@@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
         * copied from blk_rq_pos(rq).
         */
        if (error_sector)
-               *error_sector = bio->bi_sector;
+               *error_sector = bio->bi_sector;
 
        if (!bio_flagged(bio, BIO_UPTODATE))
                ret = -EIO;
index 789cdea05893bb8e3420ede5d1aa65e7af408e1b..ae95ee6a58aad6d005affa10bb270bdf1008e986 100644 (file)
@@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
 
        hd_struct_put(part);
 }
+EXPORT_SYMBOL(delete_partition);
 
 static ssize_t whole_disk_show(struct device *dev,
                               struct device_attribute *attr, char *buf)
index 1e5d8a40101e274f5d6ce2bfe1a24a486a671af7..fefc2ca7cc3e0199b5f98b707107d424dd40b8eb 100644 (file)
@@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
                return rc;
        data_len = estatus->data_length;
        gdata = (struct acpi_hest_generic_data *)(estatus + 1);
-       while (data_len > sizeof(*gdata)) {
+       while (data_len >= sizeof(*gdata)) {
                gedata_len = gdata->error_data_length;
                if (gedata_len > data_len - sizeof(*gdata))
                        return -EINVAL;
index 0ac546d5e53f5b0f079792c71419dd07001fe892..5ff17306612771d04dd684020041fe61056906ab 100644 (file)
@@ -646,6 +646,7 @@ static void handle_root_bridge_insertion(acpi_handle handle)
 
 static void handle_root_bridge_removal(struct acpi_device *device)
 {
+       acpi_status status;
        struct acpi_eject_event *ej_event;
 
        ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
@@ -661,7 +662,9 @@ static void handle_root_bridge_removal(struct acpi_device *device)
        ej_event->device = device;
        ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
 
-       acpi_bus_hot_remove_device(ej_event);
+       status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
+       if (ACPI_FAILURE(status))
+               kfree(ej_event);
 }
 
 static void _handle_hotplug_event_root(struct work_struct *work)
@@ -676,8 +679,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
        handle = hp_work->handle;
        type = hp_work->type;
 
-       root = acpi_pci_find_root(handle);
+       acpi_scan_lock_acquire();
 
+       root = acpi_pci_find_root(handle);
        acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
        switch (type) {
@@ -711,6 +715,7 @@ static void _handle_hotplug_event_root(struct work_struct *work)
                break;
        }
 
+       acpi_scan_lock_release();
        kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
        kfree(buffer.pointer);
 }
index 24213033fbae2aee5f033c2a776d2b37084fc88e..9c1a435d10e69c8228516346832a8d2d2d242890 100644 (file)
@@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        },
        {
        .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VGN-FW21M",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
        .ident = "Sony Vaio VPCEB17FX",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
index 093c435549631db21748b17827a433d8428ad862..1f44e56cc65d3c0262f4217c0040bd7c82c47569 100644 (file)
@@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
 EXPORT_SYMBOL(tegra_ahb_enable_smmu);
 #endif
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
 static int tegra_ahb_suspend(struct device *dev)
 {
        int i;
index 3e751b74615eac73bbf1835aa404337cd0cd6431..a5a3ebcbdd2cf6295028f62d0fe1d3bb21397633 100644 (file)
@@ -59,15 +59,16 @@ config ATA_ACPI
          option libata.noacpi=1
 
 config SATA_ZPODD
-       bool "SATA Zero Power ODD Support"
+       bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
        depends on ATA_ACPI
        default n
        help
-         This option adds support for SATA ZPODD. It requires both
-         ODD and the platform support, and if enabled, will automatically
-         power on/off the ODD when certain condition is satisfied. This
-         does not impact user's experience of the ODD, only power is saved
-         when ODD is not in use(i.e. no disc inside).
+         This option adds support for SATA Zero Power Optical Disc
+         Drive (ZPODD). It requires both the ODD and the platform
+         support, and if enabled, will automatically power on/off the
+         ODD when certain condition is satisfied. This does not impact
+         end user's experience of the ODD, only power is saved when
+         the ODD is not in use (i.e. no disc inside).
 
          If unsure, say N.
 
index a99112cfd8b1e8524558ce659754d5c501e7c0fe..6a67b07de49439a1bbe3da827ef4750bd74269a8 100644 (file)
@@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
        { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
        { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
+       { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
+       { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
        { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
        { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
        { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
index d2ba439cfe54f33f3dd6ad040b8ea1e909602a9f..ffdd32d22602296875ff395a1fdc08e32509c393 100644 (file)
@@ -1547,6 +1547,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
 
 static int prefer_ms_hyperv = 1;
 module_param(prefer_ms_hyperv, int, 0);
+MODULE_PARM_DESC(prefer_ms_hyperv,
+       "Prefer Hyper-V paravirtualization drivers instead of ATA, "
+       "0 - Use ATA drivers, "
+       "1 (Default) - Use the paravirtualization drivers.");
 
 static void piix_ignore_devices_quirk(struct ata_host *host)
 {
index beea3115577e31a687532cbc1cd3319d900e34a6..8a52dab412e2a8013f36bd547460924c79c996aa 100644 (file)
@@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev)
 
        handle = ata_dev_acpi_handle(dev);
        if (handle)
-               acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
+               acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev);
 }
 
 static void ata_acpi_unregister_power_resource(struct ata_device *dev)
index 70b0e01372b3da34564f53352aa075fa1ab39bf1..6ef27e98c50894190e74f396e19e13a44713157c 100644 (file)
@@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = {
        },
 };
 
-static int __init pata_s3c_init(void)
-{
-       return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
-}
-
-static void __exit pata_s3c_exit(void)
-{
-       platform_driver_unregister(&pata_s3c_driver);
-}
-
-module_init(pata_s3c_init);
-module_exit(pata_s3c_exit);
+module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
 
 MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
 MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
index 124b2c1d9c0b020255870dec161d41626562fcca..608f82fed632be1738c019f6fcfac6117b657a8d 100644 (file)
@@ -1511,8 +1511,7 @@ error_exit_with_cleanup:
 
        if (hcr_base)
                iounmap(hcr_base);
-       if (host_priv)
-               kfree(host_priv);
+       kfree(host_priv);
 
        return retval;
 }
index 5dc0daed8fac523f5d631c3cd4a022f6fb4c5aca..b81ddfea1da075ad3c1d0cbeb9ab7bf854593436 100644 (file)
@@ -532,11 +532,11 @@ config BLK_DEV_RBD
          If unsure, say N.
 
 config BLK_DEV_RSXX
-       tristate "RamSam PCIe Flash SSD Device Driver"
+       tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
        depends on PCI
        help
          Device driver for IBM's high speed PCIe SSD
-         storage devices: RamSan-70 and RamSan-80.
+         storage devices: FlashSystem-70 and FlashSystem-80.
 
          To compile this driver as a module, choose M here: the
          module will be called rsxx.
index 25ef5c014fcaa4255e0e3009991b6a9ac1ff18c7..92b6d7c51e39590b3780f17c88737009b7becbbf 100644 (file)
@@ -51,8 +51,9 @@ new_skb(ulong len)
 {
        struct sk_buff *skb;
 
-       skb = alloc_skb(len, GFP_ATOMIC);
+       skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
        if (skb) {
+               skb_reserve(skb, MAX_HEADER);
                skb_reset_mac_header(skb);
                skb_reset_network_header(skb);
                skb->protocol = __constant_htons(ETH_P_AOE);
index ade58bc8f3c4dee67f69edde4ce20b1ef18d7cc8..1c1b8e544aa250d38710b47c19353c9ef5773c6c 100644 (file)
@@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h)
        if (rc)
                return rc;
        h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
-               cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
+               cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
        if (!h->cfgtable)
                return -ENOMEM;
        rc = write_driver_ver_to_cfgtable(h->cfgtable);
index 747bb2af69dcc55fec530466a9f55d06914ba8f7..fe5f6403417f892d744a034ea0bdda9a21596fda 100644 (file)
@@ -1044,12 +1044,29 @@ static int loop_clr_fd(struct loop_device *lo)
        lo->lo_state = Lo_unbound;
        /* This is safe: open() is still holding a reference. */
        module_put(THIS_MODULE);
-       if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
-               ioctl_by_bdev(bdev, BLKRRPART, 0);
        lo->lo_flags = 0;
        if (!part_shift)
                lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
        mutex_unlock(&lo->lo_ctl_mutex);
+
+       /*
+        * Remove all partitions, since BLKRRPART won't remove user
+        * added partitions when max_part=0
+        */
+       if (bdev) {
+               struct disk_part_iter piter;
+               struct hd_struct *part;
+
+               mutex_lock_nested(&bdev->bd_mutex, 1);
+               invalidate_partition(bdev->bd_disk, 0);
+               disk_part_iter_init(&piter, bdev->bd_disk,
+                                       DISK_PITER_INCL_EMPTY);
+               while ((part = disk_part_iter_next(&piter)))
+                       delete_partition(bdev->bd_disk, part->partno);
+               disk_part_iter_exit(&piter);
+               mutex_unlock(&bdev->bd_mutex);
+       }
+
        /*
         * Need not hold lo_ctl_mutex to fput backing file.
         * Calling fput holding lo_ctl_mutex triggers a circular
@@ -1623,6 +1640,7 @@ static int loop_add(struct loop_device **l, int i)
                goto out_free_dev;
        i = err;
 
+       err = -ENOMEM;
        lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
        if (!lo->lo_queue)
                goto out_free_dev;
index 1788f491e0fb50e13cefb2f81c286d41721e75ef..076ae7f1b781e0b1586631b2708471ce98f978c1 100644 (file)
@@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev)
        gpio_direction_output(host->rst, 1);
 
        /* reset out pin */
-       if (!(prv_data->dev_attr & MG_DEV_MASK))
+       if (!(prv_data->dev_attr & MG_DEV_MASK)) {
+               err = -EINVAL;
                goto probe_err_3a;
+       }
 
        if (prv_data->dev_attr != MG_BOOT_DEV) {
                rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
index 11cc9522cdd439ff3db29825037c4f866bc28abb..92250af84e7d151a8da57ed2cf6d22d8e3313e9d 100644 (file)
@@ -4224,6 +4224,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
        dd->isr_workq = create_workqueue(dd->workq_name);
        if (!dd->isr_workq) {
                dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
+               rv = -ENOMEM;
                goto block_initialize_err;
        }
 
@@ -4282,7 +4283,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
        INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
 
        pci_set_master(pdev);
-       if (pci_enable_msi(pdev)) {
+       rv = pci_enable_msi(pdev);
+       if (rv) {
                dev_warn(&pdev->dev,
                        "Unable to enable MSI interrupt.\n");
                goto block_initialize_err;
index 07fb2dfaae13f02588128ea0d73372a8859b63ba..9dcefe40380bf23ca7c1675b4286c5f97a480d79 100644 (file)
@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
        BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
        BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
 }
 
 typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
                *fn = special_completion;
                return CMD_CTX_INVALID;
        }
-       *fn = info[cmdid].fn;
+       if (fn)
+               *fn = info[cmdid].fn;
        ctx = info[cmdid].ctx;
        info[cmdid].fn = special_completion;
        info[cmdid].ctx = CMD_CTX_COMPLETED;
@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
                iod->offset = offsetof(struct nvme_iod, sg[nseg]);
                iod->npages = -1;
                iod->length = nbytes;
+               iod->nents = 0;
        }
 
        return iod;
@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
        struct bio *bio = iod->private;
        u16 status = le16_to_cpup(&cqe->status) >> 1;
 
-       dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+       if (iod->nents)
+               dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
                        bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
        nvme_free_iod(dev, iod);
        if (status) {
@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
 
        result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
        if (result < 0)
-               goto free_iod;
+               goto free_cmdid;
        length = result;
 
        cmnd->rw.command_id = cmdid;
@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
 
        return 0;
 
+ free_cmdid:
+       free_cmdid(nvmeq, cmdid, NULL);
  free_iod:
        nvme_free_iod(nvmeq->dev, iod);
  nomem:
@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
        return nvme_submit_admin_cmd(dev, &c, NULL);
 }
 
-static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
-                               unsigned nsid, dma_addr_t dma_addr)
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
+                                       dma_addr_t dma_addr, u32 *result)
 {
        struct nvme_command c;
 
@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
        c.features.prp1 = cpu_to_le64(dma_addr);
        c.features.fid = cpu_to_le32(fid);
 
-       return nvme_submit_admin_cmd(dev, &c, NULL);
+       return nvme_submit_admin_cmd(dev, &c, result);
 }
 
 static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
 
        spin_lock_irq(&nvmeq->q_lock);
        nvme_cancel_ios(nvmeq, false);
+       while (bio_list_peek(&nvmeq->sq_cong)) {
+               struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+               bio_endio(bio, -EIO);
+       }
        spin_unlock_irq(&nvmeq->q_lock);
 
        irq_set_affinity_hint(vector, NULL);
@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
        if (length != cmd.data_len)
                status = -ENOMEM;
        else
-               status = nvme_submit_admin_cmd(dev, &c, NULL);
+               status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
 
        if (cmd.data_len) {
                nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
                nvme_free_iod(dev, iod);
        }
+
+       if (!status && copy_to_user(&ucmd->result, &cmd.result,
+                                                       sizeof(cmd.result)))
+               status = -EFAULT;
+
        return status;
 }
 
@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
                        continue;
 
                res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
-                                                       dma_addr + 4096);
+                                                       dma_addr + 4096, NULL);
                if (res)
-                       continue;
+                       memset(mem + 4096, 0, 4096);
 
                ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
                if (ns)
index 6c81a4c040b987e75d4a8b0bb5e4ff65e2108dba..f556f8a8b3f9b476949c6133f39778c49502e380 100644 (file)
@@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request)
        return atomic_read(&obj_request->done) != 0;
 }
 
+static void
+rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
+{
+       dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
+               obj_request, obj_request->img_request, obj_request->result,
+               obj_request->xferred, obj_request->length);
+       /*
+        * ENOENT means a hole in the image.  We zero-fill the
+        * entire length of the request.  A short read also implies
+        * zero-fill to the end of the request.  Either way we
+        * update the xferred count to indicate the whole request
+        * was satisfied.
+        */
+       BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
+       if (obj_request->result == -ENOENT) {
+               zero_bio_chain(obj_request->bio_list, 0);
+               obj_request->result = 0;
+               obj_request->xferred = obj_request->length;
+       } else if (obj_request->xferred < obj_request->length &&
+                       !obj_request->result) {
+               zero_bio_chain(obj_request->bio_list, obj_request->xferred);
+               obj_request->xferred = obj_request->length;
+       }
+       obj_request_done_set(obj_request);
+}
+
 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p cb %p\n", __func__, obj_request,
@@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
                obj_request->result, obj_request->xferred, obj_request->length);
-       /*
-        * ENOENT means a hole in the object.  We zero-fill the
-        * entire length of the request.  A short read also implies
-        * zero-fill to the end of the request.  Either way we
-        * update the xferred count to indicate the whole request
-        * was satisfied.
-        */
-       if (obj_request->result == -ENOENT) {
-               zero_bio_chain(obj_request->bio_list, 0);
-               obj_request->result = 0;
-               obj_request->xferred = obj_request->length;
-       } else if (obj_request->xferred < obj_request->length &&
-                       !obj_request->result) {
-               zero_bio_chain(obj_request->bio_list, obj_request->xferred);
-               obj_request->xferred = obj_request->length;
-       }
-       obj_request_done_set(obj_request);
+       if (obj_request->img_request)
+               rbd_img_obj_request_read_callback(obj_request);
+       else
+               obj_request_done_set(obj_request);
 }
 
 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
index f35cd0b71f7b5802ae521d046c8b582386e6c6f3..b1c53c0aa450c47ddd38bf2138bb0505f341fc61 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
-rsxx-y := config.o core.o cregs.o dev.o dma.o
+rsxx-objs := config.o core.o cregs.o dev.o dma.o
index a295e7e9ee41c98613ded6756ff725a615b5bafb..10cd530d3e1026fcb270e1b402a3a900363a7765 100644 (file)
 #include "rsxx_priv.h"
 #include "rsxx_cfg.h"
 
-static void initialize_config(void *config)
+static void initialize_config(struct rsxx_card_cfg *cfg)
 {
-       struct rsxx_card_cfg *cfg = config;
-
        cfg->hdr.version = RSXX_CFG_VERSION;
 
        cfg->data.block_size        = RSXX_HW_BLK_SIZE;
        cfg->data.stripe_size       = RSXX_HW_BLK_SIZE;
-       cfg->data.vendor_id         = RSXX_VENDOR_ID_TMS_IBM;
+       cfg->data.vendor_id         = RSXX_VENDOR_ID_IBM;
        cfg->data.cache_order       = (-1);
        cfg->data.intr_coal.mode    = RSXX_INTR_COAL_DISABLED;
        cfg->data.intr_coal.count   = 0;
@@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card)
        } else {
                dev_info(CARD_TO_DEV(card),
                        "Initializing card configuration.\n");
-               initialize_config(card);
+               initialize_config(&card->config);
                st = rsxx_save_config(card);
                if (st)
                        return st;
index e5162487686aef6f429b86655e22e27c9a14cddd..5af21f2db29cd1246397e3b4eed8627ba314adf2 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/reboot.h>
 #include <linux/slab.h>
 #include <linux/bitops.h>
+#include <linux/delay.h>
 
 #include <linux/genhd.h>
 #include <linux/idr.h>
@@ -39,8 +40,8 @@
 
 #define NO_LEGACY 0
 
-MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
-MODULE_AUTHOR("IBM <support@ramsan.com>");
+MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
+MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRIVER_VERSION);
 
@@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida);
 static DEFINE_SPINLOCK(rsxx_ida_lock);
 
 /*----------------- Interrupt Control & Handling -------------------*/
+
+static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
+{
+       card->isr_mask = 0;
+       card->ier_mask = 0;
+}
+
 static void __enable_intr(unsigned int *mask, unsigned int intr)
 {
        *mask |= intr;
@@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr)
  */
 void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 {
-       if (unlikely(card->halt))
+       if (unlikely(card->halt) ||
+           unlikely(card->eeh_state))
                return;
 
        __enable_intr(&card->ier_mask, intr);
@@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 
 void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 {
+       if (unlikely(card->eeh_state))
+               return;
+
        __disable_intr(&card->ier_mask, intr);
        iowrite32(card->ier_mask, card->regmap + IER);
 }
@@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
 void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
                                 unsigned int intr)
 {
-       if (unlikely(card->halt))
+       if (unlikely(card->halt) ||
+           unlikely(card->eeh_state))
                return;
 
        __enable_intr(&card->isr_mask, intr);
@@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
 void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
                                  unsigned int intr)
 {
+       if (unlikely(card->eeh_state))
+               return;
+
        __disable_intr(&card->isr_mask, intr);
        __disable_intr(&card->ier_mask, intr);
        iowrite32(card->ier_mask, card->regmap + IER);
@@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
        do {
                reread_isr = 0;
 
+               if (unlikely(card->eeh_state))
+                       break;
+
                isr = ioread32(card->regmap + ISR);
                if (isr == 0xffffffff) {
                        /*
@@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
 }
 
 /*----------------- Card Event Handler -------------------*/
-static char *rsxx_card_state_to_str(unsigned int state)
+static const char * const rsxx_card_state_to_str(unsigned int state)
 {
-       static char *state_strings[] = {
+       static const char * const state_strings[] = {
                "Unknown", "Shutdown", "Starting", "Formatting",
                "Uninitialized", "Good", "Shutting Down",
                "Fault", "Read Only Fault", "dStroying"
@@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card)
        return 0;
 }
 
+static int rsxx_eeh_frozen(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       int i;
+       int st;
+
+       dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
+
+       card->eeh_state = 1;
+       rsxx_mask_interrupts(card);
+
+       /*
+        * We need to guarantee that the write for eeh_state and masking
+        * interrupts does not become reordered. This will prevent a possible
+        * race condition with the EEH code.
+        */
+       wmb();
+
+       pci_disable_device(dev);
+
+       st = rsxx_eeh_save_issued_dmas(card);
+       if (st)
+               return st;
+
+       rsxx_eeh_save_issued_creg(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               if (card->ctrl[i].status.buf)
+                       pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
+                                           card->ctrl[i].status.buf,
+                                           card->ctrl[i].status.dma_addr);
+               if (card->ctrl[i].cmd.buf)
+                       pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
+                                           card->ctrl[i].cmd.buf,
+                                           card->ctrl[i].cmd.dma_addr);
+       }
+
+       return 0;
+}
+
+static void rsxx_eeh_failure(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       int i;
+
+       dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
+
+       card->eeh_state = 1;
+
+       for (i = 0; i < card->n_targets; i++)
+               del_timer_sync(&card->ctrl[i].activity_timer);
+
+       rsxx_eeh_cancel_dmas(card);
+}
+
+static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
+{
+       unsigned int status;
+       int iter = 0;
+
+       /* We need to wait for the hardware to reset */
+       while (iter++ < 10) {
+               status = ioread32(card->regmap + PCI_RECONFIG);
+
+               if (status & RSXX_FLUSH_BUSY) {
+                       ssleep(1);
+                       continue;
+               }
+
+               if (status & RSXX_FLUSH_TIMEOUT)
+                       dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
+               return 0;
+       }
+
+       /* Hardware failed resetting itself. */
+       return -1;
+}
+
+static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
+                                           enum pci_channel_state error)
+{
+       int st;
+
+       if (dev->revision < RSXX_EEH_SUPPORT)
+               return PCI_ERS_RESULT_NONE;
+
+       if (error == pci_channel_io_perm_failure) {
+               rsxx_eeh_failure(dev);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       st = rsxx_eeh_frozen(dev);
+       if (st) {
+               dev_err(&dev->dev, "Slot reset setup failed\n");
+               rsxx_eeh_failure(dev);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
+{
+       struct rsxx_cardinfo *card = pci_get_drvdata(dev);
+       unsigned long flags;
+       int i;
+       int st;
+
+       dev_warn(&dev->dev,
+               "IBM FlashSystem PCI: recovering from slot reset.\n");
+
+       st = pci_enable_device(dev);
+       if (st)
+               goto failed_hw_setup;
+
+       pci_set_master(dev);
+
+       st = rsxx_eeh_fifo_flush_poll(card);
+       if (st)
+               goto failed_hw_setup;
+
+       rsxx_dma_queue_reset(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
+               if (st)
+                       goto failed_hw_buffers_init;
+       }
+
+       if (card->config_valid)
+               rsxx_dma_configure(card);
+
+       /* Clears the ISR register from spurious interrupts */
+       st = ioread32(card->regmap + ISR);
+
+       card->eeh_state = 0;
+
+       st = rsxx_eeh_remap_dmas(card);
+       if (st)
+               goto failed_remap_dmas;
+
+       spin_lock_irqsave(&card->irq_lock, flags);
+       if (card->n_targets & RSXX_MAX_TARGETS)
+               rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
+       else
+               rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
+       spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       rsxx_kick_creg_queue(card);
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock(&card->ctrl[i].queue_lock);
+               if (list_empty(&card->ctrl[i].queue)) {
+                       spin_unlock(&card->ctrl[i].queue_lock);
+                       continue;
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+
+               queue_work(card->ctrl[i].issue_wq,
+                               &card->ctrl[i].issue_dma_work);
+       }
+
+       dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
+
+       return PCI_ERS_RESULT_RECOVERED;
+
+failed_hw_buffers_init:
+failed_remap_dmas:
+       for (i = 0; i < card->n_targets; i++) {
+               if (card->ctrl[i].status.buf)
+                       pci_free_consistent(card->dev,
+                                       STATUS_BUFFER_SIZE8,
+                                       card->ctrl[i].status.buf,
+                                       card->ctrl[i].status.dma_addr);
+               if (card->ctrl[i].cmd.buf)
+                       pci_free_consistent(card->dev,
+                                       COMMAND_BUFFER_SIZE8,
+                                       card->ctrl[i].cmd.buf,
+                                       card->ctrl[i].cmd.dma_addr);
+       }
+failed_hw_setup:
+       rsxx_eeh_failure(dev);
+       return PCI_ERS_RESULT_DISCONNECT;
+
+}
+
 /*----------------- Driver Initialization & Setup -------------------*/
 /* Returns:   0 if the driver is compatible with the device
             -1 if the driver is NOT compatible with the device */
@@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
 
        spin_lock_init(&card->irq_lock);
        card->halt = 0;
+       card->eeh_state = 0;
 
        spin_lock_irq(&card->irq_lock);
        rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
@@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev)
        rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
        spin_unlock_irqrestore(&card->irq_lock, flags);
 
-       /* Prevent work_structs from re-queuing themselves. */
-       card->halt = 1;
-
        cancel_work_sync(&card->event_work);
 
        rsxx_destroy_dev(card);
@@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
        spin_lock_irqsave(&card->irq_lock, flags);
        rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
        spin_unlock_irqrestore(&card->irq_lock, flags);
+
+       /* Prevent work_structs from re-queuing themselves. */
+       card->halt = 1;
+
        free_irq(dev->irq, card);
 
        if (!force_legacy)
@@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev)
        card_shutdown(card);
 }
 
+static const struct pci_error_handlers rsxx_err_handler = {
+       .error_detected = rsxx_error_detected,
+       .slot_reset     = rsxx_slot_reset,
+};
+
 static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
-       {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
+       {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
+       {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
        {0,},
 };
 
@@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = {
        .remove         = rsxx_pci_remove,
        .suspend        = rsxx_pci_suspend,
        .shutdown       = rsxx_pci_shutdown,
+       .err_handler    = &rsxx_err_handler,
 };
 
 static int __init rsxx_core_init(void)
index 80bbe639fccd72d002d269f6486e58ace210aab9..4b5c020a0a65ad8831d75bf3407dd81b2d82b306 100644 (file)
@@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool;
 #error Unknown endianess!!! Aborting...
 #endif
 
-static void copy_to_creg_data(struct rsxx_cardinfo *card,
+static int copy_to_creg_data(struct rsxx_cardinfo *card,
                              int cnt8,
                              void *buf,
                              unsigned int stream)
@@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
        int i = 0;
        u32 *data = buf;
 
+       if (unlikely(card->eeh_state))
+               return -EIO;
+
        for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
                /*
                 * Firmware implementation makes it necessary to byte swap on
@@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
                else
                        iowrite32(data[i], card->regmap + CREG_DATA(i));
        }
+
+       return 0;
 }
 
 
-static void copy_from_creg_data(struct rsxx_cardinfo *card,
+static int copy_from_creg_data(struct rsxx_cardinfo *card,
                                int cnt8,
                                void *buf,
                                unsigned int stream)
@@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
        int i = 0;
        u32 *data = buf;
 
+       if (unlikely(card->eeh_state))
+               return -EIO;
+
        for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
                /*
                 * Firmware implementation makes it necessary to byte swap on
@@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
                else
                        data[i] = ioread32(card->regmap + CREG_DATA(i));
        }
-}
-
-static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
-{
-       struct creg_cmd *cmd;
 
-       /*
-        * Spin lock is needed because this can be called in atomic/interrupt
-        * context.
-        */
-       spin_lock_bh(&card->creg_ctrl.lock);
-       cmd = card->creg_ctrl.active_cmd;
-       card->creg_ctrl.active_cmd = NULL;
-       spin_unlock_bh(&card->creg_ctrl.lock);
-
-       return cmd;
+       return 0;
 }
 
 static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
 {
+       int st;
+
+       if (unlikely(card->eeh_state))
+               return;
+
        iowrite32(cmd->addr, card->regmap + CREG_ADD);
        iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
 
        if (cmd->op == CREG_OP_WRITE) {
-               if (cmd->buf)
-                       copy_to_creg_data(card, cmd->cnt8,
-                                         cmd->buf, cmd->stream);
+               if (cmd->buf) {
+                       st = copy_to_creg_data(card, cmd->cnt8,
+                                              cmd->buf, cmd->stream);
+                       if (st)
+                               return;
+               }
        }
 
-       /*
-        * Data copy must complete before initiating the command. This is
-        * needed for weakly ordered processors (i.e. PowerPC), so that all
-        * neccessary registers are written before we kick the hardware.
-        */
-       wmb();
+       if (unlikely(card->eeh_state))
+               return;
 
        /* Setting the valid bit will kick off the command. */
        iowrite32(cmd->op, card->regmap + CREG_CMD);
@@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
        cmd->cb_private = cb_private;
        cmd->status     = 0;
 
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        list_add_tail(&cmd->list, &card->creg_ctrl.queue);
        card->creg_ctrl.q_depth++;
        creg_kick_queue(card);
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 
        return 0;
 }
@@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data)
        struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
        struct creg_cmd *cmd;
 
-       cmd = pop_active_cmd(card);
+       spin_lock(&card->creg_ctrl.lock);
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+       spin_unlock(&card->creg_ctrl.lock);
+
        if (cmd == NULL) {
                card->creg_ctrl.creg_stats.creg_timeout++;
                dev_warn(CARD_TO_DEV(card),
@@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work)
        if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
                card->creg_ctrl.creg_stats.failed_cancel_timer++;
 
-       cmd = pop_active_cmd(card);
+       spin_lock_bh(&card->creg_ctrl.lock);
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+       spin_unlock_bh(&card->creg_ctrl.lock);
+
        if (cmd == NULL) {
                dev_err(CARD_TO_DEV(card),
                        "Spurious creg interrupt!\n");
@@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work)
                        goto creg_done;
                }
 
-               copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
+               st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
        }
 
 creg_done:
@@ -296,10 +302,10 @@ creg_done:
 
        kmem_cache_free(creg_cmd_pool, cmd);
 
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        card->creg_ctrl.active = 0;
        creg_kick_queue(card);
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 }
 
 static void creg_reset(struct rsxx_cardinfo *card)
@@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
                "Resetting creg interface for recovery\n");
 
        /* Cancel outstanding commands */
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
                list_del(&cmd->list);
                card->creg_ctrl.q_depth--;
@@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
 
                card->creg_ctrl.active = 0;
        }
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 
        card->creg_ctrl.reset = 0;
        spin_lock_irqsave(&card->irq_lock, flags);
@@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
                return st;
 
        /*
-        * This timeout is neccessary for unresponsive hardware. The additional
+        * This timeout is necessary for unresponsive hardware. The additional
         * 20 seconds to used to guarantee that each cregs requests has time to
         * complete.
         */
-       timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
-                               card->creg_ctrl.q_depth) + 20000);
+       timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
+                                  card->creg_ctrl.q_depth + 20000);
 
        /*
         * The creg interface is guaranteed to complete. It has a timeout
@@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card,
        return 0;
 }
 
+void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
+{
+       struct creg_cmd *cmd = NULL;
+
+       cmd = card->creg_ctrl.active_cmd;
+       card->creg_ctrl.active_cmd = NULL;
+
+       if (cmd) {
+               del_timer_sync(&card->creg_ctrl.cmd_timer);
+
+               spin_lock_bh(&card->creg_ctrl.lock);
+               list_add(&cmd->list, &card->creg_ctrl.queue);
+               card->creg_ctrl.q_depth++;
+               card->creg_ctrl.active = 0;
+               spin_unlock_bh(&card->creg_ctrl.lock);
+       }
+}
+
+void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
+{
+       spin_lock_bh(&card->creg_ctrl.lock);
+       if (!list_empty(&card->creg_ctrl.queue))
+               creg_kick_queue(card);
+       spin_unlock_bh(&card->creg_ctrl.lock);
+}
+
 /*------------ Initialization & Setup --------------*/
 int rsxx_creg_setup(struct rsxx_cardinfo *card)
 {
@@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
        int cnt = 0;
 
        /* Cancel outstanding commands */
-       spin_lock(&card->creg_ctrl.lock);
+       spin_lock_bh(&card->creg_ctrl.lock);
        list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
                list_del(&cmd->list);
                if (cmd->cb)
@@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
                        "Canceled active creg command\n");
                kmem_cache_free(creg_cmd_pool, cmd);
        }
-       spin_unlock(&card->creg_ctrl.lock);
+       spin_unlock_bh(&card->creg_ctrl.lock);
 
        cancel_work_sync(&card->creg_ctrl.done_work);
 }
index 63176e67662f5f48d2b358c39d93d86735c21c72..0607513cfb41fa4b2ff88bac32be0665c645a2bb 100644 (file)
@@ -28,7 +28,7 @@
 struct rsxx_dma {
        struct list_head         list;
        u8                       cmd;
-       unsigned int             laddr;     /* Logical address on the ramsan */
+       unsigned int             laddr;     /* Logical address */
        struct {
                u32              off;
                u32              cnt;
@@ -81,9 +81,6 @@ enum rsxx_hw_status {
        HW_STATUS_FAULT         = 0x08,
 };
 
-#define STATUS_BUFFER_SIZE8     4096
-#define COMMAND_BUFFER_SIZE8    4096
-
 static struct kmem_cache *rsxx_dma_pool;
 
 struct dma_tracker {
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
        return tgt;
 }
 
-static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
+void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
 {
        /* Reset all DMA Command/Status Queues */
        iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
        u32 q_depth = 0;
        u32 intr_coal;
 
-       if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
+       if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
+           unlikely(card->eeh_state))
                return;
 
        for (i = 0; i < card->n_targets; i++)
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
 }
 
 /*----------------- RSXX DMA Handling -------------------*/
-static void rsxx_complete_dma(struct rsxx_cardinfo *card,
+static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
                                  struct rsxx_dma *dma,
                                  unsigned int status)
 {
        if (status & DMA_SW_ERR)
-               printk_ratelimited(KERN_ERR
-                                  "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
-                                  dma->cmd, dma->laddr);
+               ctrl->stats.dma_sw_err++;
        if (status & DMA_HW_FAULT)
-               printk_ratelimited(KERN_ERR
-                                  "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
-                                  dma->cmd, dma->laddr);
+               ctrl->stats.dma_hw_fault++;
        if (status & DMA_CANCELLED)
-               printk_ratelimited(KERN_ERR
-                                  "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
-                                  dma->cmd, dma->laddr);
+               ctrl->stats.dma_cancelled++;
 
        if (dma->dma_addr)
-               pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
+               pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+                              get_dma_size(dma),
                               dma->cmd == HW_CMD_BLK_WRITE ?
                                           PCI_DMA_TODEVICE :
                                           PCI_DMA_FROMDEVICE);
 
        if (dma->cb)
-               dma->cb(card, dma->cb_data, status ? 1 : 0);
+               dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
 
        kmem_cache_free(rsxx_dma_pool, dma);
 }
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
        if (requeue_cmd)
                rsxx_requeue_dma(ctrl, dma);
        else
-               rsxx_complete_dma(ctrl->card, dma, status);
+               rsxx_complete_dma(ctrl, dma, status);
 }
 
 static void dma_engine_stalled(unsigned long data)
 {
        struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
 
-       if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
+       if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
+           unlikely(ctrl->card->eeh_state))
                return;
 
        if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
        ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
        hw_cmd_buf = ctrl->cmd.buf;
 
-       if (unlikely(ctrl->card->halt))
+       if (unlikely(ctrl->card->halt) ||
+           unlikely(ctrl->card->eeh_state))
                return;
 
        while (1) {
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work)
                 */
                if (unlikely(ctrl->card->dma_fault)) {
                        push_tracker(ctrl->trackers, tag);
-                       rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
+                       rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
                        continue;
                }
 
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work)
 
        /* Let HW know we've queued commands. */
        if (cmds_pending) {
-               /*
-                * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
-                * (which is in PCI-consistent system-memory) from the loop
-                * above make it into the coherency domain before the
-                * following PIO "trigger" updating the cmd.idx.  A WMB is
-                * sufficient. We need not explicitly CPU cache-flush since
-                * the memory is a PCI-consistent (ie; coherent) mapping.
-                */
-               wmb();
-
                atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
                mod_timer(&ctrl->activity_timer,
                          jiffies + DMA_ACTIVITY_TIMEOUT);
+
+               if (unlikely(ctrl->card->eeh_state)) {
+                       del_timer_sync(&ctrl->activity_timer);
+                       return;
+               }
+
                iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
        }
 }
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work)
        hw_st_buf = ctrl->status.buf;
 
        if (unlikely(ctrl->card->halt) ||
-           unlikely(ctrl->card->dma_fault))
+           unlikely(ctrl->card->dma_fault) ||
+           unlikely(ctrl->card->eeh_state))
                return;
 
        count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work)
                if (status)
                        rsxx_handle_dma_error(ctrl, dma, status);
                else
-                       rsxx_complete_dma(ctrl->card, dma, 0);
+                       rsxx_complete_dma(ctrl, dma, 0);
 
                push_tracker(ctrl->trackers, tag);
 
@@ -727,20 +719,54 @@ bvec_err:
 
 
 /*----------------- DMA Engine Initialization & Setup -------------------*/
+int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
+{
+       ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
+                               &ctrl->status.dma_addr);
+       ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
+                               &ctrl->cmd.dma_addr);
+       if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
+               return -ENOMEM;
+
+       memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
+       iowrite32(lower_32_bits(ctrl->status.dma_addr),
+               ctrl->regmap + SB_ADD_LO);
+       iowrite32(upper_32_bits(ctrl->status.dma_addr),
+               ctrl->regmap + SB_ADD_HI);
+
+       memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
+       iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
+       iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
+
+       ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
+       if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+               dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
+                       ctrl->status.idx);
+               return -EINVAL;
+       }
+       iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
+       iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
+
+       ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
+       if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
+               dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
+                       ctrl->status.idx);
+               return -EINVAL;
+       }
+       iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
+       iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
+
+       return 0;
+}
+
 static int rsxx_dma_ctrl_init(struct pci_dev *dev,
                                  struct rsxx_dma_ctrl *ctrl)
 {
        int i;
+       int st;
 
        memset(&ctrl->stats, 0, sizeof(ctrl->stats));
 
-       ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
-                                               &ctrl->status.dma_addr);
-       ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
-                                            &ctrl->cmd.dma_addr);
-       if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
-               return -ENOMEM;
-
        ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
        if (!ctrl->trackers)
                return -ENOMEM;
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
        INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
        INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
 
-       memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
-       iowrite32(lower_32_bits(ctrl->status.dma_addr),
-                 ctrl->regmap + SB_ADD_LO);
-       iowrite32(upper_32_bits(ctrl->status.dma_addr),
-                 ctrl->regmap + SB_ADD_HI);
-
-       memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
-       iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
-       iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
-
-       ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
-       if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
-               dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
-                        ctrl->status.idx);
-               return -EINVAL;
-       }
-       iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
-       iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
-
-       ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
-       if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
-               dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
-                        ctrl->status.idx);
-               return -EINVAL;
-       }
-       iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
-       iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
-
-       wmb();
+       st = rsxx_hw_buffers_init(dev, ctrl);
+       if (st)
+               return st;
 
        return 0;
 }
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
        return 0;
 }
 
-static int rsxx_dma_configure(struct rsxx_cardinfo *card)
+int rsxx_dma_configure(struct rsxx_cardinfo *card)
 {
        u32 intr_coal;
 
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
        }
 }
 
+int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
+{
+       int i;
+       int j;
+       int cnt;
+       struct rsxx_dma *dma;
+       struct list_head *issued_dmas;
+
+       issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
+                             GFP_KERNEL);
+       if (!issued_dmas)
+               return -ENOMEM;
+
+       for (i = 0; i < card->n_targets; i++) {
+               INIT_LIST_HEAD(&issued_dmas[i]);
+               cnt = 0;
+               for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
+                       dma = get_tracker_dma(card->ctrl[i].trackers, j);
+                       if (dma == NULL)
+                               continue;
+
+                       if (dma->cmd == HW_CMD_BLK_WRITE)
+                               card->ctrl[i].stats.writes_issued--;
+                       else if (dma->cmd == HW_CMD_BLK_DISCARD)
+                               card->ctrl[i].stats.discards_issued--;
+                       else
+                               card->ctrl[i].stats.reads_issued--;
+
+                       list_add_tail(&dma->list, &issued_dmas[i]);
+                       push_tracker(card->ctrl[i].trackers, j);
+                       cnt++;
+               }
+
+               spin_lock(&card->ctrl[i].queue_lock);
+               list_splice(&issued_dmas[i], &card->ctrl[i].queue);
+
+               atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
+               card->ctrl[i].stats.sw_q_depth += cnt;
+               card->ctrl[i].e_cnt = 0;
+
+               list_for_each_entry(dma, &card->ctrl[i].queue, list) {
+                       if (dma->dma_addr)
+                               pci_unmap_page(card->dev, dma->dma_addr,
+                                              get_dma_size(dma),
+                                              dma->cmd == HW_CMD_BLK_WRITE ?
+                                              PCI_DMA_TODEVICE :
+                                              PCI_DMA_FROMDEVICE);
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+       }
+
+       kfree(issued_dmas);
+
+       return 0;
+}
+
+void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
+{
+       struct rsxx_dma *dma;
+       struct rsxx_dma *tmp;
+       int i;
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock(&card->ctrl[i].queue_lock);
+               list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
+                       list_del(&dma->list);
+
+                       rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+       }
+}
+
+int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
+{
+       struct rsxx_dma *dma;
+       int i;
+
+       for (i = 0; i < card->n_targets; i++) {
+               spin_lock(&card->ctrl[i].queue_lock);
+               list_for_each_entry(dma, &card->ctrl[i].queue, list) {
+                       dma->dma_addr = pci_map_page(card->dev, dma->page,
+                                       dma->pg_off, get_dma_size(dma),
+                                       dma->cmd == HW_CMD_BLK_WRITE ?
+                                       PCI_DMA_TODEVICE :
+                                       PCI_DMA_FROMDEVICE);
+                       if (!dma->dma_addr) {
+                               spin_unlock(&card->ctrl[i].queue_lock);
+                               kmem_cache_free(rsxx_dma_pool, dma);
+                               return -ENOMEM;
+                       }
+               }
+               spin_unlock(&card->ctrl[i].queue_lock);
+       }
+
+       return 0;
+}
 
 int rsxx_dma_init(void)
 {
index 2e50b65902b71e564f9a25cba8a22cc09d893805..24ba3642bd89aeaabc113407eb2de6d92f13602a 100644 (file)
 
 /*----------------- IOCTL Definitions -------------------*/
 
+#define RSXX_MAX_DATA 8
+
 struct rsxx_reg_access {
        __u32 addr;
        __u32 cnt;
        __u32 stat;
        __u32 stream;
-       __u32 data[8];
+       __u32 data[RSXX_MAX_DATA];
 };
 
-#define RSXX_MAX_REG_CNT       (8 * (sizeof(__u32)))
+#define RSXX_MAX_REG_CNT       (RSXX_MAX_DATA * (sizeof(__u32)))
 
 #define RSXX_IOC_MAGIC 'r'
 
index c025fe5fdb703af4d1a94ed491814cded627a17d..f384c943846de35a2dab6d5f173df7b15c4bd5f2 100644 (file)
@@ -58,7 +58,7 @@ struct rsxx_card_cfg {
 };
 
 /* Vendor ID Values */
-#define RSXX_VENDOR_ID_TMS_IBM         0
+#define RSXX_VENDOR_ID_IBM             0
 #define RSXX_VENDOR_ID_DSI             1
 #define RSXX_VENDOR_COUNT              2
 
index a1ac907d8f4c3ee60aabb11bdfb439ea3418183c..382e8bf5c03b73dae35fd1ae8865110cc1a85168 100644 (file)
 
 struct proc_cmd;
 
-#define PCI_VENDOR_ID_TMS_IBM          0x15B6
-#define PCI_DEVICE_ID_RS70_FLASH       0x0019
-#define PCI_DEVICE_ID_RS70D_FLASH      0x001A
-#define PCI_DEVICE_ID_RS80_FLASH       0x001C
-#define PCI_DEVICE_ID_RS81_FLASH       0x001E
+#define PCI_DEVICE_ID_FS70_FLASH       0x04A9
+#define PCI_DEVICE_ID_FS80_FLASH       0x04AA
 
 #define RS70_PCI_REV_SUPPORTED 4
 
 #define DRIVER_NAME "rsxx"
-#define DRIVER_VERSION "3.7"
+#define DRIVER_VERSION "4.0"
 
 /* Block size is 4096 */
 #define RSXX_HW_BLK_SHIFT              12
@@ -67,6 +64,9 @@ struct proc_cmd;
 #define RSXX_MAX_OUTSTANDING_CMDS      255
 #define RSXX_CS_IDX_MASK               0xff
 
+#define STATUS_BUFFER_SIZE8     4096
+#define COMMAND_BUFFER_SIZE8    4096
+
 #define RSXX_MAX_TARGETS       8
 
 struct dma_tracker_list;
@@ -91,6 +91,9 @@ struct rsxx_dma_stats {
        u32 discards_failed;
        u32 done_rescheduled;
        u32 issue_rescheduled;
+       u32 dma_sw_err;
+       u32 dma_hw_fault;
+       u32 dma_cancelled;
        u32 sw_q_depth;         /* Number of DMAs on the SW queue. */
        atomic_t hw_q_depth;    /* Number of DMAs queued to HW. */
 };
@@ -116,6 +119,7 @@ struct rsxx_dma_ctrl {
 struct rsxx_cardinfo {
        struct pci_dev          *dev;
        unsigned int            halt;
+       unsigned int            eeh_state;
 
        void                    __iomem *regmap;
        spinlock_t              irq_lock;
@@ -224,6 +228,7 @@ enum rsxx_pci_regmap {
        PERF_RD512_HI   = 0xac,
        PERF_WR512_LO   = 0xb0,
        PERF_WR512_HI   = 0xb4,
+       PCI_RECONFIG    = 0xb8,
 };
 
 enum rsxx_intr {
@@ -237,6 +242,8 @@ enum rsxx_intr {
        CR_INTR_DMA5    = 0x00000080,
        CR_INTR_DMA6    = 0x00000100,
        CR_INTR_DMA7    = 0x00000200,
+       CR_INTR_ALL_C   = 0x0000003f,
+       CR_INTR_ALL_G   = 0x000003ff,
        CR_INTR_DMA_ALL = 0x000003f5,
        CR_INTR_ALL     = 0xffffffff,
 };
@@ -253,8 +260,14 @@ enum rsxx_pci_reset {
        DMA_QUEUE_RESET         = 0x00000001,
 };
 
+enum rsxx_hw_fifo_flush {
+       RSXX_FLUSH_BUSY         = 0x00000002,
+       RSXX_FLUSH_TIMEOUT      = 0x00000004,
+};
+
 enum rsxx_pci_revision {
        RSXX_DISCARD_SUPPORT = 2,
+       RSXX_EEH_SUPPORT     = 3,
 };
 
 enum rsxx_creg_cmd {
@@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card);
 void rsxx_dma_destroy(struct rsxx_cardinfo *card);
 int rsxx_dma_init(void);
 void rsxx_dma_cleanup(void);
+void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
+int rsxx_dma_configure(struct rsxx_cardinfo *card);
 int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
                           struct bio *bio,
                           atomic_t *n_dmas,
                           rsxx_dma_cb cb,
                           void *cb_data);
+int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
+int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
+void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
+int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
 
 /***** cregs.c *****/
 int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
@@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card);
 void rsxx_creg_destroy(struct rsxx_cardinfo *card);
 int rsxx_creg_init(void);
 void rsxx_creg_cleanup(void);
-
 int rsxx_reg_access(struct rsxx_cardinfo *card,
                        struct rsxx_reg_access __user *ucmd,
                        int read);
+void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
+void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
 
 
 
index de1f319f7bd7e0118a960b5bd23fd286a6ba343e..dd5b2fed97e9ae2b7665f13ec3c42db6ee6fb462 100644 (file)
@@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
 
 #define foreach_grant_safe(pos, n, rbtree, node) \
        for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
-            (n) = rb_next(&(pos)->node); \
+            (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
             &(pos)->node != NULL; \
             (pos) = container_of(n, typeof(*(pos)), node), \
             (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
@@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
 
 static void print_stats(struct xen_blkif *blkif)
 {
-       pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d"
-                "  |  ds %4d\n",
+       pr_info("xen-blkback (%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
+                "  |  ds %4llu\n",
                 current->comm, blkif->st_oo_req,
                 blkif->st_rd_req, blkif->st_wr_req,
                 blkif->st_f_req, blkif->st_ds_req);
@@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg)
 }
 
 struct seg_buf {
-       unsigned long buf;
+       unsigned int offset;
        unsigned int nsec;
 };
 /*
@@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req,
                                 * If this is a new persistent grant
                                 * save the handler
                                 */
-                               persistent_gnts[i]->handle = map[j].handle;
-                               persistent_gnts[i]->dev_bus_addr =
-                                       map[j++].dev_bus_addr;
+                               persistent_gnts[i]->handle = map[j++].handle;
                        }
                        pending_handle(pending_req, i) =
                                persistent_gnts[i]->handle;
 
                        if (ret)
                                continue;
-
-                       seg[i].buf = persistent_gnts[i]->dev_bus_addr |
-                               (req->u.rw.seg[i].first_sect << 9);
                } else {
-                       pending_handle(pending_req, i) = map[j].handle;
+                       pending_handle(pending_req, i) = map[j++].handle;
                        bitmap_set(pending_req->unmap_seg, i, 1);
 
-                       if (ret) {
-                               j++;
+                       if (ret)
                                continue;
-                       }
-
-                       seg[i].buf = map[j++].dev_bus_addr |
-                               (req->u.rw.seg[i].first_sect << 9);
                }
+               seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
        }
        return ret;
 }
@@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
        return err;
 }
 
+static int dispatch_other_io(struct xen_blkif *blkif,
+                            struct blkif_request *req,
+                            struct pending_req *pending_req)
+{
+       free_req(pending_req);
+       make_response(blkif, req->u.other.id, req->operation,
+                     BLKIF_RSP_EOPNOTSUPP);
+       return -EIO;
+}
+
 static void xen_blk_drain_io(struct xen_blkif *blkif)
 {
        atomic_set(&blkif->drain, 1);
@@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif)
 
                /* Apply all sanity checks to /private copy/ of request. */
                barrier();
-               if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
+
+               switch (req.operation) {
+               case BLKIF_OP_READ:
+               case BLKIF_OP_WRITE:
+               case BLKIF_OP_WRITE_BARRIER:
+               case BLKIF_OP_FLUSH_DISKCACHE:
+                       if (dispatch_rw_block_io(blkif, &req, pending_req))
+                               goto done;
+                       break;
+               case BLKIF_OP_DISCARD:
                        free_req(pending_req);
                        if (dispatch_discard_io(blkif, &req))
-                               break;
-               } else if (dispatch_rw_block_io(blkif, &req, pending_req))
+                               goto done;
                        break;
+               default:
+                       if (dispatch_other_io(blkif, &req, pending_req))
+                               goto done;
+                       break;
+               }
 
                /* Yield point for this unbounded loop. */
                cond_resched();
        }
-
+done:
        return more_to_do;
 }
 
@@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
                         operation == READ ? "read" : "write",
                         preq.sector_number,
-                        preq.sector_number + preq.nr_sects, preq.dev);
+                        preq.sector_number + preq.nr_sects,
+                        blkif->vbd.pdevice);
                goto fail_response;
        }
 
@@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                       (bio_add_page(bio,
                                     pages[i],
                                     seg[i].nsec << 9,
-                                    seg[i].buf & ~PAGE_MASK) == 0)) {
+                                    seg[i].offset) == 0)) {
 
                        bio = bio_alloc(GFP_KERNEL, nseg-i);
                        if (unlikely(bio == NULL))
@@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
                bio->bi_end_io  = end_block_io_op;
        }
 
-       /*
-        * We set it one so that the last submit_bio does not have to call
-        * atomic_inc.
-        */
        atomic_set(&pending_req->pendcnt, nbio);
-
-       /* Get a reference count for the disk queue and start sending I/O */
        blk_start_plug(&plug);
 
        for (i = 0; i < nbio; i++)
@@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
  fail_put_bio:
        for (i = 0; i < nbio; i++)
                bio_put(biolist[i]);
+       atomic_set(&pending_req->pendcnt, 1);
        __end_block_io_op(pending_req, -EINVAL);
        msleep(1); /* back off a bit */
        return -EIO;
index 6072390c7f57ef724bee4476502eb78d63941c7c..60103e2517ba28ac5c946f32e1bd9e0e6eb7f668 100644 (file)
@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard {
        uint64_t       nr_sectors;
 } __attribute__((__packed__));
 
+struct blkif_x86_32_request_other {
+       uint8_t        _pad1;
+       blkif_vdev_t   _pad2;
+       uint64_t       id;           /* private guest value, echoed in resp  */
+} __attribute__((__packed__));
+
 struct blkif_x86_32_request {
        uint8_t        operation;    /* BLKIF_OP_???                         */
        union {
                struct blkif_x86_32_request_rw rw;
                struct blkif_x86_32_request_discard discard;
+               struct blkif_x86_32_request_other other;
        } u;
 } __attribute__((__packed__));
 
@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard {
        uint64_t       nr_sectors;
 } __attribute__((__packed__));
 
+struct blkif_x86_64_request_other {
+       uint8_t        _pad1;
+       blkif_vdev_t   _pad2;
+       uint32_t       _pad3;        /* offsetof(blkif_..,u.discard.id)==8   */
+       uint64_t       id;           /* private guest value, echoed in resp  */
+} __attribute__((__packed__));
+
 struct blkif_x86_64_request {
        uint8_t        operation;    /* BLKIF_OP_???                         */
        union {
                struct blkif_x86_64_request_rw rw;
                struct blkif_x86_64_request_discard discard;
+               struct blkif_x86_64_request_other other;
        } u;
 } __attribute__((__packed__));
 
@@ -172,7 +187,6 @@ struct persistent_gnt {
        struct page *page;
        grant_ref_t gnt;
        grant_handle_t handle;
-       uint64_t dev_bus_addr;
        struct rb_node node;
 };
 
@@ -208,13 +222,13 @@ struct xen_blkif {
 
        /* statistics */
        unsigned long           st_print;
-       int                     st_rd_req;
-       int                     st_wr_req;
-       int                     st_oo_req;
-       int                     st_f_req;
-       int                     st_ds_req;
-       int                     st_rd_sect;
-       int                     st_wr_sect;
+       unsigned long long                      st_rd_req;
+       unsigned long long                      st_wr_req;
+       unsigned long long                      st_oo_req;
+       unsigned long long                      st_f_req;
+       unsigned long long                      st_ds_req;
+       unsigned long long                      st_rd_sect;
+       unsigned long long                      st_wr_sect;
 
        wait_queue_head_t       waiting_to_free;
 };
@@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
        default:
+               /*
+                * Don't know how to translate this op. Only get the
+                * ID so failure can be reported to the frontend.
+                */
+               dst->u.other.id = src->u.other.id;
                break;
        }
 }
@@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
                dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
                break;
        default:
+               /*
+                * Don't know how to translate this op. Only get the
+                * ID so failure can be reported to the frontend.
+                */
+               dst->u.other.id = src->u.other.id;
                break;
        }
 }
index 5e237f630c47f2b2299749e067744dc89adba1a8..8bfd1bcf95ec0c168f565ac769479cb3be6098d3 100644 (file)
@@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void)
        }                                                               \
        static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
-VBD_SHOW(oo_req,  "%d\n", be->blkif->st_oo_req);
-VBD_SHOW(rd_req,  "%d\n", be->blkif->st_rd_req);
-VBD_SHOW(wr_req,  "%d\n", be->blkif->st_wr_req);
-VBD_SHOW(f_req,  "%d\n", be->blkif->st_f_req);
-VBD_SHOW(ds_req,  "%d\n", be->blkif->st_ds_req);
-VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
-VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
+VBD_SHOW(oo_req,  "%llu\n", be->blkif->st_oo_req);
+VBD_SHOW(rd_req,  "%llu\n", be->blkif->st_rd_req);
+VBD_SHOW(wr_req,  "%llu\n", be->blkif->st_wr_req);
+VBD_SHOW(f_req,  "%llu\n", be->blkif->st_f_req);
+VBD_SHOW(ds_req,  "%llu\n", be->blkif->st_ds_req);
+VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
+VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
 
 static struct attribute *xen_vbdstat_attrs[] = {
        &dev_attr_oo_req.attr,
index c3dae2e0f290e8ad64b4f3e6c869c2fca14cd1a1..a894f88762d8d3a1f72315e805f9494cdad8294d 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/mutex.h>
 #include <linux/scatterlist.h>
 #include <linux/bitmap.h>
-#include <linux/llist.h>
+#include <linux/list.h>
 
 #include <xen/xen.h>
 #include <xen/xenbus.h>
@@ -68,13 +68,12 @@ enum blkif_state {
 struct grant {
        grant_ref_t gref;
        unsigned long pfn;
-       struct llist_node node;
+       struct list_head node;
 };
 
 struct blk_shadow {
        struct blkif_request req;
        struct request *request;
-       unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
        struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 };
 
@@ -105,7 +104,7 @@ struct blkfront_info
        struct work_struct work;
        struct gnttab_free_callback callback;
        struct blk_shadow shadow[BLK_RING_SIZE];
-       struct llist_head persistent_gnts;
+       struct list_head persistent_gnts;
        unsigned int persistent_gnts_c;
        unsigned long shadow_free;
        unsigned int feature_flush;
@@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info,
        return 0;
 }
 
+static int fill_grant_buffer(struct blkfront_info *info, int num)
+{
+       struct page *granted_page;
+       struct grant *gnt_list_entry, *n;
+       int i = 0;
+
+       while(i < num) {
+               gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
+               if (!gnt_list_entry)
+                       goto out_of_memory;
+
+               granted_page = alloc_page(GFP_NOIO);
+               if (!granted_page) {
+                       kfree(gnt_list_entry);
+                       goto out_of_memory;
+               }
+
+               gnt_list_entry->pfn = page_to_pfn(granted_page);
+               gnt_list_entry->gref = GRANT_INVALID_REF;
+               list_add(&gnt_list_entry->node, &info->persistent_gnts);
+               i++;
+       }
+
+       return 0;
+
+out_of_memory:
+       list_for_each_entry_safe(gnt_list_entry, n,
+                                &info->persistent_gnts, node) {
+               list_del(&gnt_list_entry->node);
+               __free_page(pfn_to_page(gnt_list_entry->pfn));
+               kfree(gnt_list_entry);
+               i--;
+       }
+       BUG_ON(i != 0);
+       return -ENOMEM;
+}
+
+static struct grant *get_grant(grant_ref_t *gref_head,
+                               struct blkfront_info *info)
+{
+       struct grant *gnt_list_entry;
+       unsigned long buffer_mfn;
+
+       BUG_ON(list_empty(&info->persistent_gnts));
+       gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
+                                         node);
+       list_del(&gnt_list_entry->node);
+
+       if (gnt_list_entry->gref != GRANT_INVALID_REF) {
+               info->persistent_gnts_c--;
+               return gnt_list_entry;
+       }
+
+       /* Assign a gref to this page */
+       gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
+       BUG_ON(gnt_list_entry->gref == -ENOSPC);
+       buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+       gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
+                                       info->xbdev->otherend_id,
+                                       buffer_mfn, 0);
+       return gnt_list_entry;
+}
+
 static const char *op_name(int op)
 {
        static const char *const names[] = {
@@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
 static int blkif_queue_request(struct request *req)
 {
        struct blkfront_info *info = req->rq_disk->private_data;
-       unsigned long buffer_mfn;
        struct blkif_request *ring_req;
        unsigned long id;
        unsigned int fsect, lsect;
@@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req)
         */
        bool new_persistent_gnts;
        grant_ref_t gref_head;
-       struct page *granted_page;
        struct grant *gnt_list_entry = NULL;
        struct scatterlist *sg;
 
@@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req)
                        fsect = sg->offset >> 9;
                        lsect = fsect + (sg->length >> 9) - 1;
 
-                       if (info->persistent_gnts_c) {
-                               BUG_ON(llist_empty(&info->persistent_gnts));
-                               gnt_list_entry = llist_entry(
-                                       llist_del_first(&info->persistent_gnts),
-                                       struct grant, node);
-
-                               ref = gnt_list_entry->gref;
-                               buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
-                               info->persistent_gnts_c--;
-                       } else {
-                               ref = gnttab_claim_grant_reference(&gref_head);
-                               BUG_ON(ref == -ENOSPC);
-
-                               gnt_list_entry =
-                                       kmalloc(sizeof(struct grant),
-                                                        GFP_ATOMIC);
-                               if (!gnt_list_entry)
-                                       return -ENOMEM;
-
-                               granted_page = alloc_page(GFP_ATOMIC);
-                               if (!granted_page) {
-                                       kfree(gnt_list_entry);
-                                       return -ENOMEM;
-                               }
-
-                               gnt_list_entry->pfn =
-                                       page_to_pfn(granted_page);
-                               gnt_list_entry->gref = ref;
-
-                               buffer_mfn = pfn_to_mfn(page_to_pfn(
-                                                               granted_page));
-                               gnttab_grant_foreign_access_ref(ref,
-                                       info->xbdev->otherend_id,
-                                       buffer_mfn, 0);
-                       }
+                       gnt_list_entry = get_grant(&gref_head, info);
+                       ref = gnt_list_entry->gref;
 
                        info->shadow[id].grants_used[i] = gnt_list_entry;
 
@@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req)
                                kunmap_atomic(shared_data);
                        }
 
-                       info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
                        ring_req->u.rw.seg[i] =
                                        (struct blkif_request_segment) {
                                                .gref       = ref,
@@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work)
 
 static void blkif_free(struct blkfront_info *info, int suspend)
 {
-       struct llist_node *all_gnts;
-       struct grant *persistent_gnt, *tmp;
-       struct llist_node *n;
+       struct grant *persistent_gnt;
+       struct grant *n;
 
        /* Prevent new requests being issued until we fix things up. */
        spin_lock_irq(&info->io_lock);
@@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend)
                blk_stop_queue(info->rq);
 
        /* Remove all persistent grants */
-       if (info->persistent_gnts_c) {
-               all_gnts = llist_del_all(&info->persistent_gnts);
-               persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
-               while (persistent_gnt) {
-                       gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
+       if (!list_empty(&info->persistent_gnts)) {
+               list_for_each_entry_safe(persistent_gnt, n,
+                                        &info->persistent_gnts, node) {
+                       list_del(&persistent_gnt->node);
+                       if (persistent_gnt->gref != GRANT_INVALID_REF) {
+                               gnttab_end_foreign_access(persistent_gnt->gref,
+                                                         0, 0UL);
+                               info->persistent_gnts_c--;
+                       }
                        __free_page(pfn_to_page(persistent_gnt->pfn));
-                       tmp = persistent_gnt;
-                       n = persistent_gnt->node.next;
-                       if (n)
-                               persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
-                       else
-                               persistent_gnt = NULL;
-                       kfree(tmp);
+                       kfree(persistent_gnt);
                }
-               info->persistent_gnts_c = 0;
        }
+       BUG_ON(info->persistent_gnts_c != 0);
 
        /* No more gnttab callback work. */
        gnttab_cancel_free_callback(&info->callback);
@@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
        }
        /* Add the persistent grant into the list of free grants */
        for (i = 0; i < s->req.u.rw.nr_segments; i++) {
-               llist_add(&s->grants_used[i]->node, &info->persistent_gnts);
+               list_add(&s->grants_used[i]->node, &info->persistent_gnts);
                info->persistent_gnts_c++;
        }
 }
@@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev,
 
        sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
 
+       /* Allocate memory for grants */
+       err = fill_grant_buffer(info, BLK_RING_SIZE *
+                                     BLKIF_MAX_SEGMENTS_PER_REQUEST);
+       if (err)
+               goto fail;
+
        err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
        if (err < 0) {
                free_page((unsigned long)sring);
@@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev,
        spin_lock_init(&info->io_lock);
        info->xbdev = dev;
        info->vdevice = vdevice;
-       init_llist_head(&info->persistent_gnts);
+       INIT_LIST_HEAD(&info->persistent_gnts);
        info->persistent_gnts_c = 0;
        info->connected = BLKIF_STATE_DISCONNECTED;
        INIT_WORK(&info->work, blkif_restart_queue);
@@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info)
        int j;
 
        /* Stage 1: Make a safe copy of the shadow state. */
-       copy = kmalloc(sizeof(info->shadow),
+       copy = kmemdup(info->shadow, sizeof(info->shadow),
                       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
        if (!copy)
                return -ENOMEM;
-       memcpy(copy, info->shadow, sizeof(info->shadow));
 
        /* Stage 2: Set up free list. */
        memset(&info->shadow, 0, sizeof(info->shadow));
@@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info)
                                gnttab_grant_foreign_access_ref(
                                        req->u.rw.seg[j].gref,
                                        info->xbdev->otherend_id,
-                                       pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
+                                       pfn_to_mfn(copy[i].grants_used[j]->pfn),
                                        0);
                }
                info->shadow[req->u.rw.id].req = *req;
index a8a41e07a221695684fd78770980749b25ff2b5a..6aab00ef4379a5b9e4dfeb290461d3c177726f07 100644 (file)
@@ -73,9 +73,13 @@ static struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x03F0, 0x311D) },
 
        /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0CF3, 0x0036) },
        { USB_DEVICE(0x0CF3, 0x3004) },
+       { USB_DEVICE(0x0CF3, 0x3008) },
        { USB_DEVICE(0x0CF3, 0x311D) },
+       { USB_DEVICE(0x0CF3, 0x817a) },
        { USB_DEVICE(0x13d3, 0x3375) },
+       { USB_DEVICE(0x04CA, 0x3004) },
        { USB_DEVICE(0x04CA, 0x3005) },
        { USB_DEVICE(0x04CA, 0x3006) },
        { USB_DEVICE(0x04CA, 0x3008) },
@@ -105,9 +109,13 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
 static struct usb_device_id ath3k_blist_tbl[] = {
 
        /* Atheros AR3012 with sflash firmware*/
+       { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
index 7e351e345476c1bcc2141f95e41fe43a3e65e612..2cc5f774a29c47140739361420564b3f7c15b095 100644 (file)
@@ -131,9 +131,13 @@ static struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
 
        /* Atheros 3012 with sflash firmware */
+       { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
index b5538bba7a10b84581c70d51357a8e360fc81545..09c63315e57979f8d726f627279f0e5e88e94c3f 100644 (file)
@@ -157,7 +157,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
        divisor =  parent_rate / rate;
 
        /* If prate / rate would be decimal, incr the divisor */
-       if (rate * divisor < *prate)
+       if (rate * divisor < parent_rate)
                divisor++;
 
        if (divisor == cdev->div_mask + 1)
index 937bc286591f9349b5366295fa210412b8ffb756..57a8774f0b4ee907ab67d9c9f8fefd1e3d18fdbd 100644 (file)
@@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
            policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
                cpumask_copy(policy->cpus, perf->shared_cpu_map);
        }
-       cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
 
 #ifdef CONFIG_SMP
        dmi_check_system(sw_any_bug_dmi_table);
@@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
                cpumask_clear(policy->cpus);
                cpumask_set_cpu(cpu, policy->cpus);
-               cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
                policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
                pr_info_once(PFX "overriding BIOS provided _PSD data\n");
        }
index 2fd779eb1ed1f9fcdb7cb70ab54ed319fe1e4160..bfd6273fd873531d864f3b4e104d37018a5d8dda 100644 (file)
@@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu)
 {
        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 
-       if (!cpufreq_frequency_get_table(cpu))
+       if (!policy)
                return;
 
-       if (policy && !policy_is_shared(policy)) {
+       if (!cpufreq_frequency_get_table(cpu))
+               goto put_ref;
+
+       if (!policy_is_shared(policy)) {
                pr_debug("%s: Free sysfs stat\n", __func__);
                sysfs_remove_group(&policy->kobj, &stats_attr_group);
        }
-       if (policy)
-               cpufreq_cpu_put(policy);
+
+put_ref:
+       cpufreq_cpu_put(policy);
 }
 
 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
index f6dd1e7611293bf5a66531f2acb285713439430c..ad72922919ed79b181a83467dea455216676d56c 100644 (file)
@@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void)
 static int intel_pstate_min_pstate(void)
 {
        u64 value;
-       rdmsrl(0xCE, value);
+       rdmsrl(MSR_PLATFORM_INFO, value);
        return (value >> 40) & 0xFF;
 }
 
 static int intel_pstate_max_pstate(void)
 {
        u64 value;
-       rdmsrl(0xCE, value);
+       rdmsrl(MSR_PLATFORM_INFO, value);
        return (value >> 8) & 0xFF;
 }
 
@@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void)
 {
        u64 value;
        int nont, ret;
-       rdmsrl(0x1AD, value);
+       rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
        nont = intel_pstate_max_pstate();
        ret = ((value) & 255);
        if (ret <= nont)
@@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        sample->idletime_us * 100,
                                        sample->duration_us);
        core_pct = div64_u64(sample->aperf * 100, sample->mperf);
-       sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
+       sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
 
        sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
                                        100);
@@ -752,6 +752,29 @@ static struct cpufreq_driver intel_pstate_driver = {
 
 static int __initdata no_load;
 
+static int intel_pstate_msrs_not_valid(void)
+{
+       /* Check that all the msr's we are using are valid. */
+       u64 aperf, mperf, tmp;
+
+       rdmsrl(MSR_IA32_APERF, aperf);
+       rdmsrl(MSR_IA32_MPERF, mperf);
+
+       if (!intel_pstate_min_pstate() ||
+               !intel_pstate_max_pstate() ||
+               !intel_pstate_turbo_pstate())
+               return -ENODEV;
+
+       rdmsrl(MSR_IA32_APERF, tmp);
+       if (!(tmp - aperf))
+               return -ENODEV;
+
+       rdmsrl(MSR_IA32_MPERF, tmp);
+       if (!(tmp - mperf))
+               return -ENODEV;
+
+       return 0;
+}
 static int __init intel_pstate_init(void)
 {
        int cpu, rc = 0;
@@ -764,6 +787,9 @@ static int __init intel_pstate_init(void)
        if (!id)
                return -ENODEV;
 
+       if (intel_pstate_msrs_not_valid())
+               return -ENODEV;
+
        pr_info("Intel P-state driver initializing.\n");
 
        all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());
index b2a0a0726a5405caf7c0f7d252745d9554e34643..cf268b14ae9a393890ac37597f29008ee2c93126 100644 (file)
@@ -1650,11 +1650,7 @@ struct caam_alg_template {
 };
 
 static struct caam_alg_template driver_algs[] = {
-       /*
-        * single-pass ipsec_esp descriptor
-        * authencesn(*,*) is also registered, although not present
-        * explicitly here.
-        */
+       /* single-pass ipsec_esp descriptor */
        {
                .name = "authenc(hmac(md5),cbc(aes))",
                .driver_name = "authenc-hmac-md5-cbc-aes-caam",
@@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void)
        for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
                /* TODO: check if h/w supports alg */
                struct caam_crypto_alg *t_alg;
-               bool done = false;
 
-authencesn:
                t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
                if (IS_ERR(t_alg)) {
                        err = PTR_ERR(t_alg);
@@ -2233,25 +2227,8 @@ authencesn:
                        dev_warn(ctrldev, "%s alg registration failed\n",
                                t_alg->crypto_alg.cra_driver_name);
                        kfree(t_alg);
-               } else {
+               } else
                        list_add_tail(&t_alg->entry, &priv->alg_list);
-                       if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
-                           !memcmp(driver_algs[i].name, "authenc", 7) &&
-                           !done) {
-                               char *name;
-
-                               name = driver_algs[i].name;
-                               memmove(name + 10, name + 7, strlen(name) - 7);
-                               memcpy(name + 7, "esn", 3);
-
-                               name = driver_algs[i].driver_name;
-                               memmove(name + 10, name + 7, strlen(name) - 7);
-                               memcpy(name + 7, "esn", 3);
-
-                               done = true;
-                               goto authencesn;
-                       }
-               }
        }
        if (!list_empty(&priv->alg_list))
                dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
index cf15e7813801a6cb8e1d631663a2fb4421cb5a7a..762aeff626ac6f7a980c8fe3019fcd72252ecd7c 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/types.h>
 #include <linux/debugfs.h>
 #include <linux/circ_buf.h>
-#include <linux/string.h>
 #include <net/xfrm.h>
 
 #include <crypto/algapi.h>
index 09b184adf31b73902a98651843805e0c129c15b9..5b2b5e61e4f9d0516e4b66090ba56ef79e941632 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/spinlock.h>
 #include <linux/rtnetlink.h>
 #include <linux/slab.h>
-#include <linux/string.h>
 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
@@ -1974,11 +1973,7 @@ struct talitos_alg_template {
 };
 
 static struct talitos_alg_template driver_algs[] = {
-       /*
-        * AEAD algorithms. These use a single-pass ipsec_esp descriptor.
-        * authencesn(*,*) is also registered, although not present
-        * explicitly here.
-        */
+       /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
        {       .type = CRYPTO_ALG_TYPE_AEAD,
                .alg.crypto = {
                        .cra_name = "authenc(hmac(sha1),cbc(aes))",
@@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev)
                if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
                        struct talitos_crypto_alg *t_alg;
                        char *name = NULL;
-                       bool authenc = false;
 
-authencesn:
                        t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
                        if (IS_ERR(t_alg)) {
                                err = PTR_ERR(t_alg);
@@ -2837,8 +2830,6 @@ authencesn:
                                err = crypto_register_alg(
                                                &t_alg->algt.alg.crypto);
                                name = t_alg->algt.alg.crypto.cra_driver_name;
-                               authenc = authenc ? !authenc :
-                                         !(bool)memcmp(name, "authenc", 7);
                                break;
                        case CRYPTO_ALG_TYPE_AHASH:
                                err = crypto_register_ahash(
@@ -2851,25 +2842,8 @@ authencesn:
                                dev_err(dev, "%s alg registration failed\n",
                                        name);
                                kfree(t_alg);
-                       } else {
+                       } else
                                list_add_tail(&t_alg->entry, &priv->alg_list);
-                               if (authenc) {
-                                       struct crypto_alg *alg =
-                                               &driver_algs[i].alg.crypto;
-
-                                       name = alg->cra_name;
-                                       memmove(name + 10, name + 7,
-                                               strlen(name) - 7);
-                                       memcpy(name + 7, "esn", 3);
-
-                                       name = alg->cra_driver_name;
-                                       memmove(name + 10, name + 7,
-                                               strlen(name) - 7);
-                                       memcpy(name + 7, "esn", 3);
-
-                                       goto authencesn;
-                               }
-                       }
                }
        }
        if (!list_empty(&priv->alg_list))
index c599558faedaf29128a5a10bb15eaed7dbc5d41c..43a5329d44837c4042687d6f93436b3caf0627c1 100644 (file)
@@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst)
                *maxburst = 0;
 }
 
+static inline void convert_slave_id(struct dw_dma_chan *dwc)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+
+       dwc->dma_sconfig.slave_id -= dw->request_line_base;
+}
+
 static int
 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 {
@@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
 
        convert_burst(&dwc->dma_sconfig.src_maxburst);
        convert_burst(&dwc->dma_sconfig.dst_maxburst);
+       convert_slave_id(dwc);
 
        return 0;
 }
@@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
        if (dma_spec->args_count != 3)
                return NULL;
 
-       fargs.req = be32_to_cpup(dma_spec->args+0);
-       fargs.src = be32_to_cpup(dma_spec->args+1);
-       fargs.dst = be32_to_cpup(dma_spec->args+2);
+       fargs.req = dma_spec->args[0];
+       fargs.src = dma_spec->args[1];
+       fargs.dst = dma_spec->args[2];
 
        if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
                    fargs.src >= dw->nr_masters ||
@@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
 
 static int dw_probe(struct platform_device *pdev)
 {
+       const struct platform_device_id *match;
        struct dw_dma_platform_data *pdata;
        struct resource         *io;
        struct dw_dma           *dw;
@@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev)
                memcpy(dw->data_width, pdata->data_width, 4);
        }
 
+       /* Get the base request line if set */
+       match = platform_get_device_id(pdev);
+       if (match)
+               dw->request_line_base = (unsigned int)match->driver_data;
+
        /* Calculate all channel mask before DMA setup */
        dw->all_chan_mask = (1 << nr_channels) - 1;
 
@@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
 #endif
 
 static const struct platform_device_id dw_dma_ids[] = {
-       { "INTL9C60", 0 },
+       /* Name,        Request Line Base */
+       { "INTL9C60",   (kernel_ulong_t)16 },
        { }
 };
 
index cf0ce5c77d609d52e024f3426ab521bdb86e7545..4d02c3669b75bcf023a7f03916d02d3749ec2e1e 100644 (file)
@@ -247,6 +247,7 @@ struct dw_dma {
        /* hardware configuration */
        unsigned char           nr_masters;
        unsigned char           data_width[4];
+       unsigned int            request_line_base;
 
        struct dw_dma_chan      chan[0];
 };
index 910b0116c12872aaebb5e72707ddc920d0722ce1..e1d13c463c9094e4f1b996311b1767725619f90f 100644 (file)
@@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_info *mci)
                edac_dbg(1, "MC node: %d, csrow: %d\n",
                            pvt->mc_node_id, i);
 
-               if (row_dct0)
+               if (row_dct0) {
                        nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+                       csrow->channels[0]->dimm->nr_pages = nr_pages;
+               }
 
                /* K8 has only one DCT */
-               if (boot_cpu_data.x86 != 0xf && row_dct1)
-                       nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
+               if (boot_cpu_data.x86 != 0xf && row_dct1) {
+                       int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
+
+                       csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
+                       nr_pages += row_dct1_pages;
+               }
 
                mtype = amd64_determine_memory_type(pvt, i);
 
@@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_info *mci)
                        dimm = csrow->channels[j]->dimm;
                        dimm->mtype = mtype;
                        dimm->edac_mode = edac_mode;
-                       dimm->nr_pages = nr_pages;
                }
-               csrow->nr_pages = nr_pages;
        }
 
        return empty;
@@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struct pci_dev *F2)
 
        mci->pvt_info = pvt;
        mci->pdev = &pvt->F2->dev;
-       mci->csbased = 1;
 
        setup_mci_misc_attrs(mci, fam_type);
 
index cdb81aa73ab7aeb595be53f77430488486947e66..27e86d93826280a62744a559332bf0cd9bc03b3b 100644 (file)
@@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
        edac_dimm_info_location(dimm, location, sizeof(location));
 
        edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
-                dimm->mci->mem_is_per_rank ? "rank" : "dimm",
+                dimm->mci->csbased ? "rank" : "dimm",
                 number, location, dimm->csrow, dimm->cschannel);
        edac_dbg(4, "  dimm = %p\n", dimm);
        edac_dbg(4, "  dimm->label = '%s'\n", dimm->label);
@@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
        memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
        mci->nr_csrows = tot_csrows;
        mci->num_cschannel = tot_channels;
-       mci->mem_is_per_rank = per_rank;
+       mci->csbased = per_rank;
 
        /*
         * Alocate and fill the csrow/channels structs
@@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
                         * incrementing the compat API counters
                         */
                        edac_dbg(4, "%s csrows map: (%d,%d)\n",
-                                mci->mem_is_per_rank ? "rank" : "dimm",
+                                mci->csbased ? "rank" : "dimm",
                                 dimm->csrow, dimm->cschannel);
                        if (row == -1)
                                row = dimm->csrow;
index 4f4b6137d74e6c4ebd894e104cdeaf18cd565969..5899a76eec3bd9086d1edfc24fa1b4aca1ce7969 100644 (file)
@@ -143,7 +143,7 @@ static const char *edac_caps[] = {
  * and the per-dimm/per-rank one
  */
 #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
-       struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
+       static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
 
 struct dev_ch_attribute {
        struct device_attribute attr;
@@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct device *dev,
        int i;
        u32 nr_pages = 0;
 
-       if (csrow->mci->csbased)
-               return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
-
        for (i = 0; i < csrow->nr_channels; i++)
                nr_pages += csrow->channels[i]->dimm->nr_pages;
        return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
@@ -612,7 +609,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
        device_initialize(&dimm->dev);
 
        dimm->dev.parent = &mci->dev;
-       if (mci->mem_is_per_rank)
+       if (mci->csbased)
                dev_set_name(&dimm->dev, "rank%d", index);
        else
                dev_set_name(&dimm->dev, "dimm%d", index);
@@ -778,14 +775,10 @@ static ssize_t mci_size_mb_show(struct device *dev,
        for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
                struct csrow_info *csrow = mci->csrows[csrow_idx];
 
-               if (csrow->mci->csbased) {
-                       total_pages += csrow->nr_pages;
-               } else {
-                       for (j = 0; j < csrow->nr_channels; j++) {
-                               struct dimm_info *dimm = csrow->channels[j]->dimm;
+               for (j = 0; j < csrow->nr_channels; j++) {
+                       struct dimm_info *dimm = csrow->channels[j]->dimm;
 
-                               total_pages += dimm->nr_pages;
-                       }
+                       total_pages += dimm->nr_pages;
                }
        }
 
index b70e3815c45932a3b76f7f30fff98d69907a8483..8f3c947b0029ab4a864d25dcd4a5ccaf4dfce90e 100644 (file)
 #define        DEV_NAME                        "max77693-muic"
 #define        DELAY_MS_DEFAULT                20000           /* unit: millisecond */
 
+/*
+ * Default value of MAX77693 register to bring up MUIC device.
+ * If user don't set some initial value for MUIC device through platform data,
+ * extcon-max77693 driver use 'default_init_data' to bring up base operation
+ * of MAX77693 MUIC device.
+ */
+struct max77693_reg_data default_init_data[] = {
+       {
+               /* STATUS2 - [3]ChgDetRun */
+               .addr = MAX77693_MUIC_REG_STATUS2,
+               .data = STATUS2_CHGDETRUN_MASK,
+       }, {
+               /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
+               .addr = MAX77693_MUIC_REG_INTMASK1,
+               .data = INTMASK1_ADC1K_MASK
+                       | INTMASK1_ADC_MASK,
+       }, {
+               /* INTMASK2 - Unmask [0]ChgTypM */
+               .addr = MAX77693_MUIC_REG_INTMASK2,
+               .data = INTMASK2_CHGTYP_MASK,
+       }, {
+               /* INTMASK3 - Mask all of interrupts */
+               .addr = MAX77693_MUIC_REG_INTMASK3,
+               .data = 0x0,
+       }, {
+               /* CDETCTRL2 */
+               .addr = MAX77693_MUIC_REG_CDETCTRL2,
+               .data = CDETCTRL2_VIDRMEN_MASK
+                       | CDETCTRL2_DXOVPEN_MASK,
+       },
+};
+
 enum max77693_muic_adc_debounce_time {
        ADC_DEBOUNCE_TIME_5MS = 0,
        ADC_DEBOUNCE_TIME_10MS,
@@ -1045,8 +1077,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
 {
        struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
        struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
-       struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
        struct max77693_muic_info *info;
+       struct max77693_reg_data *init_data;
+       int num_init_data;
        int delay_jiffies;
        int ret;
        int i;
@@ -1145,15 +1178,25 @@ static int max77693_muic_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       /* Initialize MUIC register by using platform data */
-       for (i = 0 ; i < muic_pdata->num_init_data ; i++) {
-               enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR;
+
+       /* Initialize MUIC register by using platform data or default data */
+       if (pdata->muic_data) {
+               init_data = pdata->muic_data->init_data;
+               num_init_data = pdata->muic_data->num_init_data;
+       } else {
+               init_data = default_init_data;
+               num_init_data = ARRAY_SIZE(default_init_data);
+       }
+
+       for (i = 0 ; i < num_init_data ; i++) {
+               enum max77693_irq_source irq_src
+                               = MAX77693_IRQ_GROUP_NR;
 
                max77693_write_reg(info->max77693->regmap_muic,
-                               muic_pdata->init_data[i].addr,
-                               muic_pdata->init_data[i].data);
+                               init_data[i].addr,
+                               init_data[i].data);
 
-               switch (muic_pdata->init_data[i].addr) {
+               switch (init_data[i].addr) {
                case MAX77693_MUIC_REG_INTMASK1:
                        irq_src = MUIC_INT1;
                        break;
@@ -1167,22 +1210,40 @@ static int max77693_muic_probe(struct platform_device *pdev)
 
                if (irq_src < MAX77693_IRQ_GROUP_NR)
                        info->max77693->irq_masks_cur[irq_src]
-                               = muic_pdata->init_data[i].data;
+                               = init_data[i].data;
        }
 
-       /*
-        * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
-        * h/w path of COMP2/COMN1 on CONTROL1 register.
-        */
-       if (muic_pdata->path_uart)
-               info->path_uart = muic_pdata->path_uart;
-       else
-               info->path_uart = CONTROL1_SW_UART;
+       if (pdata->muic_data) {
+               struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
 
-       if (muic_pdata->path_usb)
-               info->path_usb = muic_pdata->path_usb;
-       else
+               /*
+                * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+                * h/w path of COMP2/COMN1 on CONTROL1 register.
+                */
+               if (muic_pdata->path_uart)
+                       info->path_uart = muic_pdata->path_uart;
+               else
+                       info->path_uart = CONTROL1_SW_UART;
+
+               if (muic_pdata->path_usb)
+                       info->path_usb = muic_pdata->path_usb;
+               else
+                       info->path_usb = CONTROL1_SW_USB;
+
+               /*
+                * Default delay time for detecting cable state
+                * after certain time.
+                */
+               if (muic_pdata->detcable_delay_ms)
+                       delay_jiffies =
+                               msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+               else
+                       delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       } else {
                info->path_usb = CONTROL1_SW_USB;
+               info->path_uart = CONTROL1_SW_UART;
+               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       }
 
        /* Set initial path for UART */
         max77693_muic_set_path(info, info->path_uart, true);
@@ -1208,10 +1269,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
         * driver should notify cable state to upper layer.
         */
        INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
-       if (muic_pdata->detcable_delay_ms)
-               delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
-       else
-               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
        schedule_delayed_work(&info->wq_detcable, delay_jiffies);
 
        return ret;
index e636d950ad6c15d11d279474d3b55d89b572e257..69641bcae325a8f73e226ee66d9f418730c2ddbd 100644 (file)
@@ -712,29 +712,45 @@ static int max8997_muic_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       /* Initialize registers according to platform data */
        if (pdata->muic_pdata) {
-               struct max8997_muic_platform_data *mdata = info->muic_pdata;
-
-               for (i = 0; i < mdata->num_init_data; i++) {
-                       max8997_write_reg(info->muic, mdata->init_data[i].addr,
-                                       mdata->init_data[i].data);
+               struct max8997_muic_platform_data *muic_pdata
+                       = pdata->muic_pdata;
+
+               /* Initialize registers according to platform data */
+               for (i = 0; i < muic_pdata->num_init_data; i++) {
+                       max8997_write_reg(info->muic,
+                                       muic_pdata->init_data[i].addr,
+                                       muic_pdata->init_data[i].data);
                }
-       }
 
-       /*
-        * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
-        * h/w path of COMP2/COMN1 on CONTROL1 register.
-        */
-       if (pdata->muic_pdata->path_uart)
-               info->path_uart = pdata->muic_pdata->path_uart;
-       else
-               info->path_uart = CONTROL1_SW_UART;
+               /*
+                * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+                * h/w path of COMP2/COMN1 on CONTROL1 register.
+                */
+               if (muic_pdata->path_uart)
+                       info->path_uart = muic_pdata->path_uart;
+               else
+                       info->path_uart = CONTROL1_SW_UART;
 
-       if (pdata->muic_pdata->path_usb)
-               info->path_usb = pdata->muic_pdata->path_usb;
-       else
+               if (muic_pdata->path_usb)
+                       info->path_usb = muic_pdata->path_usb;
+               else
+                       info->path_usb = CONTROL1_SW_USB;
+
+               /*
+                * Default delay time for detecting cable state
+                * after certain time.
+                */
+               if (muic_pdata->detcable_delay_ms)
+                       delay_jiffies =
+                               msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+               else
+                       delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       } else {
+               info->path_uart = CONTROL1_SW_UART;
                info->path_usb = CONTROL1_SW_USB;
+               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+       }
 
        /* Set initial path for UART */
         max8997_muic_set_path(info, info->path_uart, true);
@@ -751,10 +767,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
         * driver should notify cable state to upper layer.
         */
        INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
-       if (pdata->muic_pdata->detcable_delay_ms)
-               delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
-       else
-               delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
        schedule_delayed_work(&info->wq_detcable, delay_jiffies);
 
        return 0;
index 9b00072a020fb5141060c5ee89cfe31edbd651be..42c759a4d047f6754ddf8676a390d8d0dcdffafb 100644 (file)
@@ -53,6 +53,24 @@ config EFI_VARS
          Subsequent efibootmgr releases may be found at:
          <http://linux.dell.com/efibootmgr>
 
+config EFI_VARS_PSTORE
+       bool "Register efivars backend for pstore"
+       depends on EFI_VARS && PSTORE
+       default y
+       help
+         Say Y here to enable use efivars as a backend to pstore. This
+         will allow writing console messages, crash dumps, or anything
+         else supported by pstore to EFI variables.
+
+config EFI_VARS_PSTORE_DEFAULT_DISABLE
+       bool "Disable using efivars as a pstore backend by default"
+       depends on EFI_VARS_PSTORE
+       default n
+       help
+         Saying Y here will disable the use of efivars as a storage
+         backend for pstore by default. This setting can be overridden
+         using the efivars module's pstore_disable parameter.
+
 config EFI_PCDP
        bool "Console device selection via EFI PCDP or HCDP table"
        depends on ACPI && EFI && IA64
index fe62aa3922398ebc52e7f61b586a71814a4b9dfe..7acafb80fd4c79b19b1c53551bb5e7e8612af707 100644 (file)
@@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
  */
 #define GUID_LEN 36
 
+static bool efivars_pstore_disable =
+       IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
+
+module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
+
 /*
  * The maximum size of VariableName + Data = 1024
  * Therefore, it's reasonable to save that much
@@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
 
 static void efivar_update_sysfs_entries(struct work_struct *);
 static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
+static bool efivar_wq_enabled = true;
 
 /* Return the number of unicode characters in data */
 static unsigned long
@@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
        .create = efivarfs_create,
 };
 
-static struct pstore_info efi_pstore_info;
-
-#ifdef CONFIG_PSTORE
+#ifdef CONFIG_EFI_VARS_PSTORE
 
 static int efi_pstore_open(struct pstore_info *psi)
 {
@@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type,
 
        spin_unlock_irqrestore(&efivars->lock, flags);
 
-       if (reason == KMSG_DUMP_OOPS)
+       if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
                schedule_work(&efivar_work);
 
        *id = part;
@@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
 
        return 0;
 }
-#else
-static int efi_pstore_open(struct pstore_info *psi)
-{
-       return 0;
-}
-
-static int efi_pstore_close(struct pstore_info *psi)
-{
-       return 0;
-}
-
-static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
-                              struct timespec *timespec,
-                              char **buf, struct pstore_info *psi)
-{
-       return -1;
-}
-
-static int efi_pstore_write(enum pstore_type_id type,
-               enum kmsg_dump_reason reason, u64 *id,
-               unsigned int part, int count, size_t size,
-               struct pstore_info *psi)
-{
-       return 0;
-}
-
-static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
-                           struct timespec time, struct pstore_info *psi)
-{
-       return 0;
-}
-#endif
 
 static struct pstore_info efi_pstore_info = {
        .owner          = THIS_MODULE,
@@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = {
        .erase          = efi_pstore_erase,
 };
 
+static void efivar_pstore_register(struct efivars *efivars)
+{
+       efivars->efi_pstore_info = efi_pstore_info;
+       efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
+       if (efivars->efi_pstore_info.buf) {
+               efivars->efi_pstore_info.bufsize = 1024;
+               efivars->efi_pstore_info.data = efivars;
+               spin_lock_init(&efivars->efi_pstore_info.buf_lock);
+               pstore_register(&efivars->efi_pstore_info);
+       }
+}
+#else
+static void efivar_pstore_register(struct efivars *efivars)
+{
+       return;
+}
+#endif
+
 static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
                             struct bin_attribute *bin_attr,
                             char *buf, loff_t pos, size_t count)
@@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
        return found;
 }
 
+/*
+ * Returns the size of variable_name, in bytes, including the
+ * terminating NULL character, or variable_name_size if no NULL
+ * character is found among the first variable_name_size bytes.
+ */
+static unsigned long var_name_strnsize(efi_char16_t *variable_name,
+                                      unsigned long variable_name_size)
+{
+       unsigned long len;
+       efi_char16_t c;
+
+       /*
+        * The variable name is, by definition, a NULL-terminated
+        * string, so make absolutely sure that variable_name_size is
+        * the value we expect it to be. If not, return the real size.
+        */
+       for (len = 2; len <= variable_name_size; len += sizeof(c)) {
+               c = variable_name[(len / sizeof(c)) - 1];
+               if (!c)
+                       break;
+       }
+
+       return min(len, variable_name_size);
+}
+
 static void efivar_update_sysfs_entries(struct work_struct *work)
 {
        struct efivars *efivars = &__efivars;
@@ -1756,10 +1771,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
                if (!found) {
                        kfree(variable_name);
                        break;
-               } else
+               } else {
+                       variable_name_size = var_name_strnsize(variable_name,
+                                                              variable_name_size);
                        efivar_create_sysfs_entry(efivars,
                                                  variable_name_size,
                                                  variable_name, &vendor);
+               }
        }
 }
 
@@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars)
 }
 EXPORT_SYMBOL_GPL(unregister_efivars);
 
+/*
+ * Print a warning when duplicate EFI variables are encountered and
+ * disable the sysfs workqueue since the firmware is buggy.
+ */
+static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
+                            unsigned long len16)
+{
+       size_t i, len8 = len16 / sizeof(efi_char16_t);
+       char *s8;
+
+       /*
+        * Disable the workqueue since the algorithm it uses for
+        * detecting new variables won't work with this buggy
+        * implementation of GetNextVariableName().
+        */
+       efivar_wq_enabled = false;
+
+       s8 = kzalloc(len8, GFP_KERNEL);
+       if (!s8)
+               return;
+
+       for (i = 0; i < len8; i++)
+               s8[i] = s16[i];
+
+       printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
+              s8, vendor_guid);
+       kfree(s8);
+}
+
 int register_efivars(struct efivars *efivars,
                     const struct efivar_operations *ops,
                     struct kobject *parent_kobj)
@@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars,
                                                &vendor_guid);
                switch (status) {
                case EFI_SUCCESS:
+                       variable_name_size = var_name_strnsize(variable_name,
+                                                              variable_name_size);
+
+                       /*
+                        * Some firmware implementations return the
+                        * same variable name on multiple calls to
+                        * get_next_variable(). Terminate the loop
+                        * immediately as there is no guarantee that
+                        * we'll ever see a different variable name,
+                        * and may end up looping here forever.
+                        */
+                       if (variable_is_present(variable_name, &vendor_guid)) {
+                               dup_variable_bug(variable_name, &vendor_guid,
+                                                variable_name_size);
+                               status = EFI_NOT_FOUND;
+                               break;
+                       }
+
                        efivar_create_sysfs_entry(efivars,
                                                  variable_name_size,
                                                  variable_name,
@@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars,
        if (error)
                unregister_efivars(efivars);
 
-       efivars->efi_pstore_info = efi_pstore_info;
-
-       efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
-       if (efivars->efi_pstore_info.buf) {
-               efivars->efi_pstore_info.bufsize = 1024;
-               efivars->efi_pstore_info.data = efivars;
-               spin_lock_init(&efivars->efi_pstore_info.buf_lock);
-               pstore_register(&efivars->efi_pstore_info);
-       }
+       if (!efivars_pstore_disable)
+               efivar_pstore_register(efivars);
 
        register_filesystem(&efivarfs_type);
 
index a71a54a3e3f783a4151ed5919c01290ade8ffd5d..5150df6cba0815623c9fc7c86e96ff2ad099f0ed 100644 (file)
@@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
        if (!np)
                return;
 
-       do {
+       for (;; index++) {
                ret = of_parse_phandle_with_args(np, "gpio-ranges",
                                "#gpio-range-cells", index, &pinspec);
                if (ret)
@@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
 
                if (ret)
                        break;
-
-       } while (index++);
+       }
 }
 
 #else
index c194f4e680ad17cf7d2b665a277b10ad2f2edc6a..e2acfdbf7d3cc7f60104ba5b6c3fce3142e538b8 100644 (file)
@@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
        unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
        unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
        unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
-       unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
+       unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
        unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
 
        /* ignore tiny modes */
@@ -1715,6 +1715,7 @@ set_size:
        }
 
        mode->type = DRM_MODE_TYPE_DRIVER;
+       mode->vrefresh = drm_mode_vrefresh(mode);
        drm_mode_set_name(mode);
 
        return mode;
index 36493ce71f9a8c217ebe8af960598b299f1a95bb..98cc14725ba94c4d1dcd0d44242bb26d92eb75fa 100644 (file)
 /* position control register for hardware window 0, 2 ~ 4.*/
 #define VIDOSD_A(win)          (VIDOSD_BASE + 0x00 + (win) * 16)
 #define VIDOSD_B(win)          (VIDOSD_BASE + 0x04 + (win) * 16)
-/* size control register for hardware window 0. */
-#define VIDOSD_C_SIZE_W0       (VIDOSD_BASE + 0x08)
-/* alpha control register for hardware window 1 ~ 4. */
-#define VIDOSD_C(win)          (VIDOSD_BASE + 0x18 + (win) * 16)
-/* size control register for hardware window 1 ~ 4. */
+/*
+ * size control register for hardware windows 0 and alpha control register
+ * for hardware windows 1 ~ 4
+ */
+#define VIDOSD_C(win)          (VIDOSD_BASE + 0x08 + (win) * 16)
+/* size control register for hardware windows 1 ~ 2. */
 #define VIDOSD_D(win)          (VIDOSD_BASE + 0x0C + (win) * 16)
 
 #define VIDWx_BUF_START(win, buf)      (VIDW_BUF_START(buf) + (win) * 8)
@@ -50,9 +51,9 @@
 #define VIDWx_BUF_SIZE(win, buf)       (VIDW_BUF_SIZE(buf) + (win) * 4)
 
 /* color key control register for hardware window 1 ~ 4. */
-#define WKEYCON0_BASE(x)               ((WKEYCON0 + 0x140) + (x * 8))
+#define WKEYCON0_BASE(x)               ((WKEYCON0 + 0x140) + ((x - 1) * 8))
 /* color key value register for hardware window 1 ~ 4. */
-#define WKEYCON1_BASE(x)               ((WKEYCON1 + 0x140) + (x * 8))
+#define WKEYCON1_BASE(x)               ((WKEYCON1 + 0x140) + ((x - 1) * 8))
 
 /* FIMD has totally five hardware windows. */
 #define WINDOWS_NR     5
@@ -109,9 +110,9 @@ struct fimd_context {
 
 #ifdef CONFIG_OF
 static const struct of_device_id fimd_driver_dt_match[] = {
-       { .compatible = "samsung,exynos4-fimd",
+       { .compatible = "samsung,exynos4210-fimd",
          .data = &exynos4_fimd_driver_data },
-       { .compatible = "samsung,exynos5-fimd",
+       { .compatible = "samsung,exynos5250-fimd",
          .data = &exynos5_fimd_driver_data },
        {},
 };
@@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos)
        if (win != 3 && win != 4) {
                u32 offset = VIDOSD_D(win);
                if (win == 0)
-                       offset = VIDOSD_C_SIZE_W0;
+                       offset = VIDOSD_C(win);
                val = win_data->ovl_width * win_data->ovl_height;
                writel(val, ctx->regs + offset);
 
index 3b0da0378acf509976361d85322dc1fbc5c6779f..47a493c8a71f2629b7acf0de109bcd3eefc15bfd 100644 (file)
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR              0x0304
+#define G2D_SRC_COLOR_MODE             0x030C
+#define G2D_SRC_LEFT_TOP               0x0310
+#define G2D_SRC_RIGHT_BOTTOM           0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR       0x0318
 #define G2D_DST_BASE_ADDR              0x0404
+#define G2D_DST_COLOR_MODE             0x040C
+#define G2D_DST_LEFT_TOP               0x0410
+#define G2D_DST_RIGHT_BOTTOM           0x0414
 #define G2D_DST_PLANE2_BASE_ADDR       0x0418
 #define G2D_PAT_BASE_ADDR              0x0500
 #define G2D_MSK_BASE_ADDR              0x0520
@@ -82,7 +88,7 @@
 #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
 
 /* G2D_DMA_HOLD_CMD */
-#define G2D_USET_HOLD                  (1 << 2)
+#define G2D_USER_HOLD                  (1 << 2)
 #define G2D_LIST_HOLD                  (1 << 1)
 #define G2D_BITBLT_HOLD                        (1 << 0)
 
 #define G2D_START_NHOLT                        (1 << 1)
 #define G2D_START_BITBLT               (1 << 0)
 
+/* buffer color format */
+#define G2D_FMT_XRGB8888               0
+#define G2D_FMT_ARGB8888               1
+#define G2D_FMT_RGB565                 2
+#define G2D_FMT_XRGB1555               3
+#define G2D_FMT_ARGB1555               4
+#define G2D_FMT_XRGB4444               5
+#define G2D_FMT_ARGB4444               6
+#define G2D_FMT_PACKED_RGB888          7
+#define G2D_FMT_A8                     11
+#define G2D_FMT_L8                     12
+
+/* buffer valid length */
+#define G2D_LEN_MIN                    1
+#define G2D_LEN_MAX                    8000
+
 #define G2D_CMDLIST_SIZE               (PAGE_SIZE / 4)
 #define G2D_CMDLIST_NUM                        64
 #define G2D_CMDLIST_POOL_SIZE          (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
 #define G2D_CMDLIST_DATA_NUM           (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
 
-#define MAX_BUF_ADDR_NR                        6
-
 /* maximum buffer pool size of userptr is 64MB as default */
 #define MAX_POOL               (64 * 1024 * 1024)
 
@@ -106,6 +126,17 @@ enum {
        BUF_TYPE_USERPTR,
 };
 
+enum g2d_reg_type {
+       REG_TYPE_NONE = -1,
+       REG_TYPE_SRC,
+       REG_TYPE_SRC_PLANE2,
+       REG_TYPE_DST,
+       REG_TYPE_DST_PLANE2,
+       REG_TYPE_PAT,
+       REG_TYPE_MSK,
+       MAX_REG_TYPE_NR
+};
+
 /* cmdlist data structure */
 struct g2d_cmdlist {
        u32             head;
@@ -113,6 +144,42 @@ struct g2d_cmdlist {
        u32             last;   /* last data offset */
 };
 
+/*
+ * A structure of buffer description
+ *
+ * @format: color format
+ * @left_x: the x coordinates of left top corner
+ * @top_y: the y coordinates of left top corner
+ * @right_x: the x coordinates of right bottom corner
+ * @bottom_y: the y coordinates of right bottom corner
+ *
+ */
+struct g2d_buf_desc {
+       unsigned int    format;
+       unsigned int    left_x;
+       unsigned int    top_y;
+       unsigned int    right_x;
+       unsigned int    bottom_y;
+};
+
+/*
+ * A structure of buffer information
+ *
+ * @map_nr: manages the number of mapped buffers
+ * @reg_types: stores regitster type in the order of requested command
+ * @handles: stores buffer handle in its reg_type position
+ * @types: stores buffer type in its reg_type position
+ * @descs: stores buffer description in its reg_type position
+ *
+ */
+struct g2d_buf_info {
+       unsigned int            map_nr;
+       enum g2d_reg_type       reg_types[MAX_REG_TYPE_NR];
+       unsigned long           handles[MAX_REG_TYPE_NR];
+       unsigned int            types[MAX_REG_TYPE_NR];
+       struct g2d_buf_desc     descs[MAX_REG_TYPE_NR];
+};
+
 struct drm_exynos_pending_g2d_event {
        struct drm_pending_event        base;
        struct drm_exynos_g2d_event     event;
@@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr {
        bool                    in_pool;
        bool                    out_of_list;
 };
-
 struct g2d_cmdlist_node {
        struct list_head        list;
        struct g2d_cmdlist      *cmdlist;
-       unsigned int            map_nr;
-       unsigned long           handles[MAX_BUF_ADDR_NR];
-       unsigned int            obj_type[MAX_BUF_ADDR_NR];
        dma_addr_t              dma_addr;
+       struct g2d_buf_info     buf_info;
 
        struct drm_exynos_pending_g2d_event     *event;
 };
@@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
        struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
        int nr;
        int ret;
+       struct g2d_buf_info *buf_info;
 
        init_dma_attrs(&g2d->cmdlist_dma_attrs);
        dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
@@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
        }
 
        for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
+               unsigned int i;
+
                node[nr].cmdlist =
                        g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
                node[nr].dma_addr =
                        g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
 
+               buf_info = &node[nr].buf_info;
+               for (i = 0; i < MAX_REG_TYPE_NR; i++)
+                       buf_info->reg_types[i] = REG_TYPE_NONE;
+
                list_add_tail(&node[nr].list, &g2d->free_cmdlist);
        }
 
@@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
                                                DMA_BIDIRECTIONAL);
        if (ret < 0) {
                DRM_ERROR("failed to map sgt with dma region.\n");
-               goto err_free_sgt;
+               goto err_sg_free_table;
        }
 
        g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
@@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
 
        return &g2d_userptr->dma_addr;
 
-err_free_sgt:
+err_sg_free_table:
        sg_free_table(sgt);
+
+err_free_sgt:
        kfree(sgt);
        sgt = NULL;
 
@@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev,
        g2d->current_pool = 0;
 }
 
+static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
+{
+       enum g2d_reg_type reg_type;
+
+       switch (reg_offset) {
+       case G2D_SRC_BASE_ADDR:
+       case G2D_SRC_COLOR_MODE:
+       case G2D_SRC_LEFT_TOP:
+       case G2D_SRC_RIGHT_BOTTOM:
+               reg_type = REG_TYPE_SRC;
+               break;
+       case G2D_SRC_PLANE2_BASE_ADDR:
+               reg_type = REG_TYPE_SRC_PLANE2;
+               break;
+       case G2D_DST_BASE_ADDR:
+       case G2D_DST_COLOR_MODE:
+       case G2D_DST_LEFT_TOP:
+       case G2D_DST_RIGHT_BOTTOM:
+               reg_type = REG_TYPE_DST;
+               break;
+       case G2D_DST_PLANE2_BASE_ADDR:
+               reg_type = REG_TYPE_DST_PLANE2;
+               break;
+       case G2D_PAT_BASE_ADDR:
+               reg_type = REG_TYPE_PAT;
+               break;
+       case G2D_MSK_BASE_ADDR:
+               reg_type = REG_TYPE_MSK;
+               break;
+       default:
+               reg_type = REG_TYPE_NONE;
+               DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
+               break;
+       };
+
+       return reg_type;
+}
+
+static unsigned long g2d_get_buf_bpp(unsigned int format)
+{
+       unsigned long bpp;
+
+       switch (format) {
+       case G2D_FMT_XRGB8888:
+       case G2D_FMT_ARGB8888:
+               bpp = 4;
+               break;
+       case G2D_FMT_RGB565:
+       case G2D_FMT_XRGB1555:
+       case G2D_FMT_ARGB1555:
+       case G2D_FMT_XRGB4444:
+       case G2D_FMT_ARGB4444:
+               bpp = 2;
+               break;
+       case G2D_FMT_PACKED_RGB888:
+               bpp = 3;
+               break;
+       default:
+               bpp = 1;
+               break;
+       }
+
+       return bpp;
+}
+
+static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
+                                               enum g2d_reg_type reg_type,
+                                               unsigned long size)
+{
+       unsigned int width, height;
+       unsigned long area;
+
+       /*
+        * check source and destination buffers only.
+        * so the others are always valid.
+        */
+       if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
+               return true;
+
+       width = buf_desc->right_x - buf_desc->left_x;
+       if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
+               DRM_ERROR("width[%u] is out of range!\n", width);
+               return false;
+       }
+
+       height = buf_desc->bottom_y - buf_desc->top_y;
+       if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
+               DRM_ERROR("height[%u] is out of range!\n", height);
+               return false;
+       }
+
+       area = (unsigned long)width * (unsigned long)height *
+                                       g2d_get_buf_bpp(buf_desc->format);
+       if (area > size) {
+               DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
+               return false;
+       }
+
+       return true;
+}
+
 static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
                                struct g2d_cmdlist_node *node,
                                struct drm_device *drm_dev,
                                struct drm_file *file)
 {
        struct g2d_cmdlist *cmdlist = node->cmdlist;
+       struct g2d_buf_info *buf_info = &node->buf_info;
        int offset;
+       int ret;
        int i;
 
-       for (i = 0; i < node->map_nr; i++) {
+       for (i = 0; i < buf_info->map_nr; i++) {
+               struct g2d_buf_desc *buf_desc;
+               enum g2d_reg_type reg_type;
+               int reg_pos;
                unsigned long handle;
                dma_addr_t *addr;
 
-               offset = cmdlist->last - (i * 2 + 1);
-               handle = cmdlist->data[offset];
+               reg_pos = cmdlist->last - 2 * (i + 1);
+
+               offset = cmdlist->data[reg_pos];
+               handle = cmdlist->data[reg_pos + 1];
+
+               reg_type = g2d_get_reg_type(offset);
+               if (reg_type == REG_TYPE_NONE) {
+                       ret = -EFAULT;
+                       goto err;
+               }
+
+               buf_desc = &buf_info->descs[reg_type];
+
+               if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
+                       unsigned long size;
+
+                       size = exynos_drm_gem_get_size(drm_dev, handle, file);
+                       if (!size) {
+                               ret = -EFAULT;
+                               goto err;
+                       }
+
+                       if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+                                                                       size)) {
+                               ret = -EFAULT;
+                               goto err;
+                       }
 
-               if (node->obj_type[i] == BUF_TYPE_GEM) {
                        addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
                                                                file);
                        if (IS_ERR(addr)) {
-                               node->map_nr = i;
-                               return -EFAULT;
+                               ret = -EFAULT;
+                               goto err;
                        }
                } else {
                        struct drm_exynos_g2d_userptr g2d_userptr;
 
                        if (copy_from_user(&g2d_userptr, (void __user *)handle,
                                sizeof(struct drm_exynos_g2d_userptr))) {
-                               node->map_nr = i;
-                               return -EFAULT;
+                               ret = -EFAULT;
+                               goto err;
+                       }
+
+                       if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
+                                                       g2d_userptr.size)) {
+                               ret = -EFAULT;
+                               goto err;
                        }
 
                        addr = g2d_userptr_get_dma_addr(drm_dev,
@@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
                                                        file,
                                                        &handle);
                        if (IS_ERR(addr)) {
-                               node->map_nr = i;
-                               return -EFAULT;
+                               ret = -EFAULT;
+                               goto err;
                        }
                }
 
-               cmdlist->data[offset] = *addr;
-               node->handles[i] = handle;
+               cmdlist->data[reg_pos + 1] = *addr;
+               buf_info->reg_types[i] = reg_type;
+               buf_info->handles[reg_type] = handle;
        }
 
        return 0;
+
+err:
+       buf_info->map_nr = i;
+       return ret;
 }
 
 static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
@@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
                                  struct drm_file *filp)
 {
        struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
+       struct g2d_buf_info *buf_info = &node->buf_info;
        int i;
 
-       for (i = 0; i < node->map_nr; i++) {
-               unsigned long handle = node->handles[i];
+       for (i = 0; i < buf_info->map_nr; i++) {
+               struct g2d_buf_desc *buf_desc;
+               enum g2d_reg_type reg_type;
+               unsigned long handle;
+
+               reg_type = buf_info->reg_types[i];
+
+               buf_desc = &buf_info->descs[reg_type];
+               handle = buf_info->handles[reg_type];
 
-               if (node->obj_type[i] == BUF_TYPE_GEM)
+               if (buf_info->types[reg_type] == BUF_TYPE_GEM)
                        exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
                                                        filp);
                else
                        g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
                                                        false);
 
-               node->handles[i] = 0;
+               buf_info->reg_types[i] = REG_TYPE_NONE;
+               buf_info->handles[reg_type] = 0;
+               buf_info->types[reg_type] = 0;
+               memset(buf_desc, 0x00, sizeof(*buf_desc));
        }
 
-       node->map_nr = 0;
+       buf_info->map_nr = 0;
 }
 
 static void g2d_dma_start(struct g2d_data *g2d,
@@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d,
        pm_runtime_get_sync(g2d->dev);
        clk_enable(g2d->gate_clk);
 
-       /* interrupt enable */
-       writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
-                       g2d->regs + G2D_INTEN);
-
        writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
        writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
 }
@@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
        struct g2d_data *g2d = container_of(work, struct g2d_data,
                                            runqueue_work);
 
-
        mutex_lock(&g2d->runqueue_mutex);
        clk_disable(g2d->gate_clk);
        pm_runtime_put_sync(g2d->dev);
@@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev,
        int i;
 
        for (i = 0; i < nr; i++) {
-               index = cmdlist->last - 2 * (i + 1);
+               struct g2d_buf_info *buf_info = &node->buf_info;
+               struct g2d_buf_desc *buf_desc;
+               enum g2d_reg_type reg_type;
+               unsigned long value;
 
-               if (for_addr) {
-                       /* check userptr buffer type. */
-                       reg_offset = (cmdlist->data[index] &
-                                       ~0x7fffffff) >> 31;
-                       if (reg_offset) {
-                               node->obj_type[i] = BUF_TYPE_USERPTR;
-                               cmdlist->data[index] &= ~G2D_BUF_USERPTR;
-                       }
-               }
+               index = cmdlist->last - 2 * (i + 1);
 
                reg_offset = cmdlist->data[index] & ~0xfffff000;
-
                if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
                        goto err;
                if (reg_offset % 4)
@@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev,
                        if (!for_addr)
                                goto err;
 
-                       if (node->obj_type[i] != BUF_TYPE_USERPTR)
-                               node->obj_type[i] = BUF_TYPE_GEM;
+                       reg_type = g2d_get_reg_type(reg_offset);
+                       if (reg_type == REG_TYPE_NONE)
+                               goto err;
+
+                       /* check userptr buffer type. */
+                       if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
+                               buf_info->types[reg_type] = BUF_TYPE_USERPTR;
+                               cmdlist->data[index] &= ~G2D_BUF_USERPTR;
+                       } else
+                               buf_info->types[reg_type] = BUF_TYPE_GEM;
+                       break;
+               case G2D_SRC_COLOR_MODE:
+               case G2D_DST_COLOR_MODE:
+                       if (for_addr)
+                               goto err;
+
+                       reg_type = g2d_get_reg_type(reg_offset);
+                       if (reg_type == REG_TYPE_NONE)
+                               goto err;
+
+                       buf_desc = &buf_info->descs[reg_type];
+                       value = cmdlist->data[index + 1];
+
+                       buf_desc->format = value & 0xf;
+                       break;
+               case G2D_SRC_LEFT_TOP:
+               case G2D_DST_LEFT_TOP:
+                       if (for_addr)
+                               goto err;
+
+                       reg_type = g2d_get_reg_type(reg_offset);
+                       if (reg_type == REG_TYPE_NONE)
+                               goto err;
+
+                       buf_desc = &buf_info->descs[reg_type];
+                       value = cmdlist->data[index + 1];
+
+                       buf_desc->left_x = value & 0x1fff;
+                       buf_desc->top_y = (value & 0x1fff0000) >> 16;
+                       break;
+               case G2D_SRC_RIGHT_BOTTOM:
+               case G2D_DST_RIGHT_BOTTOM:
+                       if (for_addr)
+                               goto err;
+
+                       reg_type = g2d_get_reg_type(reg_offset);
+                       if (reg_type == REG_TYPE_NONE)
+                               goto err;
+
+                       buf_desc = &buf_info->descs[reg_type];
+                       value = cmdlist->data[index + 1];
+
+                       buf_desc->right_x = value & 0x1fff;
+                       buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
                        break;
                default:
                        if (for_addr)
@@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
        cmdlist->data[cmdlist->last++] = 0;
 
+       /*
+        * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
+        * and GCF bit should be set to INTEN register if user wants
+        * G2D interrupt event once current command list execution is
+        * finished.
+        * Otherwise only ACF bit should be set to INTEN register so
+        * that one interrupt is occured after all command lists
+        * have been completed.
+        */
        if (node->event) {
+               cmdlist->data[cmdlist->last++] = G2D_INTEN;
+               cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
                cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
                cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
+       } else {
+               cmdlist->data[cmdlist->last++] = G2D_INTEN;
+               cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
        }
 
        /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
@@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        if (ret < 0)
                goto err_free_event;
 
-       node->map_nr = req->cmd_buf_nr;
+       node->buf_info.map_nr = req->cmd_buf_nr;
        if (req->cmd_buf_nr) {
                struct drm_exynos_g2d_cmd *cmd_buf;
 
index 67e17ce112b69eb5ce795358e1471fd9db2ad841..0e6fe000578c08c78be0df9fa00e2205429e8576 100644 (file)
@@ -164,6 +164,27 @@ out:
        exynos_gem_obj = NULL;
 }
 
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+                                               unsigned int gem_handle,
+                                               struct drm_file *file_priv)
+{
+       struct exynos_drm_gem_obj *exynos_gem_obj;
+       struct drm_gem_object *obj;
+
+       obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
+       if (!obj) {
+               DRM_ERROR("failed to lookup gem object.\n");
+               return 0;
+       }
+
+       exynos_gem_obj = to_exynos_gem_obj(obj);
+
+       drm_gem_object_unreference_unlocked(obj);
+
+       return exynos_gem_obj->buffer->size;
+}
+
+
 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
                                                      unsigned long size)
 {
index 35ebac47dc2bacfec928d54001b6065aa9e75564..468766bee450837eefe19f37db7e7fdcc00133c6 100644 (file)
@@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
                                      struct drm_file *file_priv);
 
+/* get buffer size to gem handle. */
+unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
+                                               unsigned int gem_handle,
+                                               struct drm_file *file_priv);
+
 /* initialize gem object. */
 int exynos_drm_gem_init_object(struct drm_gem_object *obj);
 
index 13ccbd4bcfaa8c863f27d24870101c32166ecb47..9504b0cd825a4dc9d4b81c2be674eb0897eb137d 100644 (file)
@@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev,
        }
 
        edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
-       edid = kzalloc(edid_len, GFP_KERNEL);
+       edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
        if (!edid) {
                DRM_DEBUG_KMS("failed to allocate edid\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       memcpy(edid, ctx->raw_edid, edid_len);
        return edid;
 }
 
@@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
                        return -EINVAL;
                }
                edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
-               ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
+               ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
                if (!ctx->raw_edid) {
                        DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
                        return -ENOMEM;
                }
-               memcpy(ctx->raw_edid, raw_edid, edid_len);
        } else {
                /*
                 * with connection = 0, free raw_edid
index e919aba29b3dfde038979f5c417966bc8bf147c1..2f4f72f070475a62f77ce15451540962eb011153 100644 (file)
@@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win)
        mixer_ctx->win_data[win].enabled = false;
 }
 
-int mixer_check_timing(void *ctx, struct fb_videomode *timing)
+static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
 {
        struct mixer_context *mixer_ctx = ctx;
        u32 w, h;
index aae31489c89304f8d79ad2319bbb0ecb97897cef..7299ea45dd03dec217944f31422f5f1a62bcc6eb 100644 (file)
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-       seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
+       seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
                   &obj->base,
                   get_pin_flag(obj),
                   get_tiling_flag(obj),
index 0a8eceb7590244af233d153845a91c79e6662420..e9b57893db2b1ebba1348a71e37c845f98c3286b 100644 (file)
@@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support,
                "Enable Haswell and ValleyView Support. "
                "(default: false)");
 
+int i915_disable_power_well __read_mostly = 0;
+module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
+MODULE_PARM_DESC(disable_power_well,
+                "Disable the power well when possible (default: false)");
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
index e95337c974594d34c936d071975f5040c78754d1..01769e2a99538e9e99cd191b328d7746439d328b 100644 (file)
@@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 extern int i915_enable_ppgtt __read_mostly;
 extern unsigned int i915_preliminary_hw_support __read_mostly;
+extern int i915_disable_power_well __read_mostly;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
index 2f2daebd0eefd581c38415489164ed080e0d141a..3b11ab0fbc960ab1ff58fd842a2d51ff10ca15db 100644 (file)
@@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
                   int count)
 {
        int i;
+       int relocs_total = 0;
+       int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
        for (i = 0; i < count; i++) {
                char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
@@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
                if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
                        return -EINVAL;
 
-               /* First check for malicious input causing overflow */
-               if (exec[i].relocation_count >
-                   INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+               /* First check for malicious input causing overflow in
+                * the worst case where we need to allocate the entire
+                * relocation tree as a single array.
+                */
+               if (exec[i].relocation_count > relocs_max - relocs_total)
                        return -EINVAL;
+               relocs_total += exec[i].relocation_count;
 
                length = exec[i].relocation_count *
                        sizeof(struct drm_i915_gem_relocation_entry);
index 287b42c9d1a8250e56d8a72d1b527cbb7dc1ad38..b20d50192fcc8f3dda5c7efbe098d1a560e95f71 100644 (file)
@@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
                num_connectors++;
        }
 
+       if (is_cpu_edp)
+               intel_crtc->cpu_transcoder = TRANSCODER_EDP;
+       else
+               intel_crtc->cpu_transcoder = pipe;
+
        /* We are not sure yet this won't happen. */
        WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
             INTEL_PCH_TYPE(dev));
@@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        int pipe = intel_crtc->pipe;
        int ret;
 
-       if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
-               intel_crtc->cpu_transcoder = TRANSCODER_EDP;
-       else
-               intel_crtc->cpu_transcoder = pipe;
-
        drm_vblank_pre_modeset(dev, pipe);
 
        ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
index 6f728e5ee79391d3bf726f971eb7b96af307df0d..d7d4afe013417f489934dbab8e5bafbf48614aa9 100644 (file)
@@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
        struct intel_link_m_n m_n;
        int pipe = intel_crtc->pipe;
        enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
+       int target_clock;
 
        /*
         * Find the lane count in the intel_encoder private
@@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
                }
        }
 
+       target_clock = mode->clock;
+       for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+               if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+                       target_clock = intel_edp_target_clock(intel_encoder,
+                                                             mode);
+                       break;
+               }
+       }
+
        /*
         * Compute the GMCH and Link ratios. The '3' here is
         * the number of bytes_per_pixel post-LUT, which we always
         * set up for 8-bits of R/G/B, or 3 bytes total.
         */
        intel_link_compute_m_n(intel_crtc->bpp, lane_count,
-                              mode->clock, adjusted_mode->clock, &m_n);
+                              target_clock, adjusted_mode->clock, &m_n);
 
        if (IS_HASWELL(dev)) {
                I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                for (i = 0; i < intel_dp->lane_count; i++)
                        if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
                                break;
-               if (i == intel_dp->lane_count && voltage_tries == 5) {
+               if (i == intel_dp->lane_count) {
                        ++loop_tries;
                        if (loop_tries == 5) {
                                DRM_DEBUG_KMS("too many full retries, give up\n");
index acf8aec9ada7162543954e4adf7c7b28d1bb4770..ef4744e1bf0bc44be8424a2cb936c26ff716da28 100644 (file)
@@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
        algo->data = bus;
 }
 
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
+/*
+ * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
+ * mode. This results in spurious interrupt warnings if the legacy irq no. is
+ * shared with another device. The kernel then disables that interrupt source
+ * and so prevents the other device from working properly.
+ */
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
 static int
 gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
                     u32 gmbus2_status,
@@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
        u32 gmbus2 = 0;
        DEFINE_WAIT(wait);
 
+       if (!HAS_GMBUS_IRQ(dev_priv->dev))
+               gmbus4_irq_en = 0;
+
        /* Important: The hw handles only the first bit, so set only one! Since
         * we also need to check for NAKs besides the hw ready/idle signal, we
         * need to wake up periodically and check that ourselves. */
index a3730e0289e5eefa15afb9f04a54c65c817f7886..bee8cb6108a7d44883286ef4b35bb6d7f8713a0f 100644 (file)
@@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
        if (dev_priv->backlight_level == 0)
                dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
 
-       dev_priv->backlight_enabled = true;
-       intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
-
        if (INTEL_INFO(dev)->gen >= 4) {
                uint32_t reg, tmp;
 
@@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
        }
 
 set_level:
-       /* Check the current backlight level and try to set again if it's zero.
-        * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
-        * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
+       /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
+        * BLC_PWM_CPU_CTL may be cleared to zero automatically when these
+        * registers are set.
         */
-       if (!intel_panel_get_backlight(dev))
-               intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
+       dev_priv->backlight_enabled = true;
+       intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
 }
 
 static void intel_panel_init_backlight(struct drm_device *dev)
index a1794c6df1bf1b80917f60e2145c39ae741aa478..adca00783e61b304723d3d05197b53f47b6b33db 100644 (file)
@@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
        if (!IS_HASWELL(dev))
                return;
 
+       if (!i915_disable_power_well && !enable)
+               return;
+
        tmp = I915_READ(HSW_PWR_WELL_DRIVER);
        is_enabled = tmp & HSW_PWR_WELL_STATE;
        enable_requested = tmp & HSW_PWR_WELL_ENABLE;
index a274b9906ef892334460f061e542839a752a591f..fe22bb780e1d048efc76e92d7296ab98f94294db 100644 (file)
@@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
        m = n = p = 0;
        vcomax = 800000;
        vcomin = 400000;
-       pllreffreq = 3333;
+       pllreffreq = 33333;
 
        delta = 0xffffffff;
        permitteddelta = clock * 5 / 1000;
 
-       for (testp = 16; testp > 0; testp--) {
+       for (testp = 16; testp > 0; testp >>= 1) {
                if (clock * testp > vcomax)
                        continue;
                if (clock * testp < vcomin)
                        continue;
 
                for (testm = 1; testm < 33; testm++) {
-                       for (testn = 1; testn < 257; testn++) {
+                       for (testn = 17; testn < 257; testn++) {
                                computed = (pllreffreq * testn) /
                                        (testm * testp);
                                if (computed > clock)
@@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
                                if (tmpdelta < delta) {
                                        delta = tmpdelta;
                                        n = testn - 1;
-                                       m = (testm - 1) | ((n >> 1) & 0x80);
+                                       m = (testm - 1);
                                        p = testp - 1;
                                }
                                if ((clock * testp) >= 600000)
-                                       p |= 80;
+                                       p |= 0x80;
                        }
                }
        }
index 0daab62ea14c68a13f9fa1e58b681f4d39d760e0..3b2e7b6304d39862ac60159416018d4417f33ec6 100644 (file)
@@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
        struct nouveau_object *parent = NULL;
        struct nouveau_object *namedb = NULL;
        struct nouveau_handle *handle = NULL;
-       int ret = -EINVAL;
 
        parent = nouveau_handle_ref(client, _parent);
        if (!parent)
@@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
        }
 
        nouveau_object_ref(NULL, &parent);
-       return ret;
+       return handle ? 0 : -EINVAL;
 }
 
 int
index 6b17b614629f64172a6ace41ed5a1593889c94ad..0b20fc0d19c16861f969f5acdda2938559243be1 100644 (file)
@@ -4,7 +4,7 @@
 #include <core/device.h>
 #include <core/subdev.h>
 
-enum nouveau_therm_mode {
+enum nouveau_therm_fan_mode {
        NOUVEAU_THERM_CTRL_NONE = 0,
        NOUVEAU_THERM_CTRL_MANUAL = 1,
        NOUVEAU_THERM_CTRL_AUTO = 2,
index f794dc89a3b2a1ddaf68dc686b5ef13c0f9bbbcb..a00a5a76e2d62b82d5717923e98e85541bb7597c 100644 (file)
@@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm)
 }
 
 int
-nouveau_therm_mode(struct nouveau_therm *therm, int mode)
+nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
 {
        struct nouveau_therm_priv *priv = (void *)therm;
        struct nouveau_device *device = nv_device(therm);
@@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode)
            (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
                return -EINVAL;
 
+       /* do not allow automatic fan management if the thermal sensor is
+        * not available */
+       if (priv->mode == 2 && therm->temp_get(therm) < 0)
+               return -EINVAL;
+
        if (priv->mode == mode)
                return 0;
 
-       nv_info(therm, "Thermal management: %s\n", name[mode]);
+       nv_info(therm, "fan management: %s\n", name[mode]);
        nouveau_therm_update(therm, mode);
        return 0;
 }
@@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
                priv->fan->bios.max_duty = value;
                return 0;
        case NOUVEAU_THERM_ATTR_FAN_MODE:
-               return nouveau_therm_mode(therm, value);
+               return nouveau_therm_fan_mode(therm, value);
        case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
                priv->bios_sensor.thrs_fan_boost.temp = value;
                priv->sensor.program_alarms(therm);
@@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object)
                return ret;
 
        if (priv->suspend >= 0)
-               nouveau_therm_mode(therm, priv->mode);
+               nouveau_therm_fan_mode(therm, priv->mode);
        priv->sensor.program_alarms(therm);
        return 0;
 }
@@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent,
 int
 nouveau_therm_preinit(struct nouveau_therm *therm)
 {
-       nouveau_therm_ic_ctor(therm);
        nouveau_therm_sensor_ctor(therm);
+       nouveau_therm_ic_ctor(therm);
        nouveau_therm_fan_ctor(therm);
 
-       nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+       nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
+       nouveau_therm_sensor_preinit(therm);
        return 0;
 }
 
index e24090bac195687c34f9175103b34a9e7e0cb8d5..8b3adec5fbb19e8a34cdb53fa83420f7b57f21b2 100644 (file)
@@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
                        struct i2c_board_info *info)
 {
        struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
+       struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
        struct i2c_client *client;
 
        request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
        }
 
        nv_info(priv,
-               "Found an %s at address 0x%x (controlled by lm_sensors)\n",
-               info->type, info->addr);
+               "Found an %s at address 0x%x (controlled by lm_sensors, "
+               "temp offset %+i C)\n",
+               info->type, info->addr, sensor->offset_constant);
        priv->ic = client;
 
        return true;
index 0f5363edb96435ec8fb992abab3479a462bcdf88..a70d1b7e397b4b3ffe280b1f0ad6152679f6f15b 100644 (file)
@@ -29,54 +29,83 @@ struct nv40_therm_priv {
        struct nouveau_therm_priv base;
 };
 
+enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
+
+static enum nv40_sensor_style
+nv40_sensor_style(struct nouveau_therm *therm)
+{
+       struct nouveau_device *device = nv_device(therm);
+
+       switch (device->chipset) {
+       case 0x43:
+       case 0x44:
+       case 0x4a:
+       case 0x47:
+               return OLD_STYLE;
+
+       case 0x46:
+       case 0x49:
+       case 0x4b:
+       case 0x4e:
+       case 0x4c:
+       case 0x67:
+       case 0x68:
+       case 0x63:
+               return NEW_STYLE;
+       default:
+               return INVALID_STYLE;
+       }
+}
+
 static int
 nv40_sensor_setup(struct nouveau_therm *therm)
 {
-       struct nouveau_device *device = nv_device(therm);
+       enum nv40_sensor_style style = nv40_sensor_style(therm);
 
        /* enable ADC readout and disable the ALARM threshold */
-       if (device->chipset >= 0x46) {
+       if (style == NEW_STYLE) {
                nv_mask(therm, 0x15b8, 0x80000000, 0);
                nv_wr32(therm, 0x15b0, 0x80003fff);
-               mdelay(10); /* wait for the temperature to stabilize */
+               mdelay(20); /* wait for the temperature to stabilize */
                return nv_rd32(therm, 0x15b4) & 0x3fff;
-       } else {
+       } else if (style == OLD_STYLE) {
                nv_wr32(therm, 0x15b0, 0xff);
+               mdelay(20); /* wait for the temperature to stabilize */
                return nv_rd32(therm, 0x15b4) & 0xff;
-       }
+       } else
+               return -ENODEV;
 }
 
 static int
 nv40_temp_get(struct nouveau_therm *therm)
 {
        struct nouveau_therm_priv *priv = (void *)therm;
-       struct nouveau_device *device = nv_device(therm);
        struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
+       enum nv40_sensor_style style = nv40_sensor_style(therm);
        int core_temp;
 
-       if (device->chipset >= 0x46) {
+       if (style == NEW_STYLE) {
                nv_wr32(therm, 0x15b0, 0x80003fff);
                core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
-       } else {
+       } else if (style == OLD_STYLE) {
                nv_wr32(therm, 0x15b0, 0xff);
                core_temp = nv_rd32(therm, 0x15b4) & 0xff;
-       }
-
-       /* Setup the sensor if the temperature is 0 */
-       if (core_temp == 0)
-               core_temp = nv40_sensor_setup(therm);
+       } else
+               return -ENODEV;
 
-       if (sensor->slope_div == 0)
-               sensor->slope_div = 1;
-       if (sensor->offset_den == 0)
-               sensor->offset_den = 1;
-       if (sensor->slope_mult < 1)
-               sensor->slope_mult = 1;
+       /* if the slope or the offset is unset, do no use the sensor */
+       if (!sensor->slope_div || !sensor->slope_mult ||
+           !sensor->offset_num || !sensor->offset_den)
+           return -ENODEV;
 
        core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
        core_temp = core_temp + sensor->offset_num / sensor->offset_den;
        core_temp = core_temp + sensor->offset_constant - 8;
 
+       /* reserve negative temperatures for errors */
+       if (core_temp < 0)
+               core_temp = 0;
+
        return core_temp;
 }
 
index 06b98706b3fc163460aa807af2a699ec62a8af98..438d9824b7741c5733bf2aa0f8ec306784569efe 100644 (file)
@@ -102,7 +102,7 @@ struct nouveau_therm_priv {
        struct i2c_client *ic;
 };
 
-int nouveau_therm_mode(struct nouveau_therm *therm, int mode);
+int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode);
 int nouveau_therm_attr_get(struct nouveau_therm *therm,
                       enum nouveau_therm_attr_type type);
 int nouveau_therm_attr_set(struct nouveau_therm *therm,
@@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm);
 
 int nouveau_therm_preinit(struct nouveau_therm *);
 
+void nouveau_therm_sensor_preinit(struct nouveau_therm *);
 void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
                                             enum nouveau_therm_thrs thrs,
                                             enum nouveau_therm_thrs_state st);
index b37624af82977543142f7035a2f2ba7b8a89cc63..470f6a47b6565bfe4d3ad257fa5d7f038ca25755 100644 (file)
@@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
 {
        struct nouveau_therm_priv *priv = (void *)therm;
 
-       priv->bios_sensor.slope_mult = 1;
-       priv->bios_sensor.slope_div = 1;
-       priv->bios_sensor.offset_num = 0;
-       priv->bios_sensor.offset_den = 1;
        priv->bios_sensor.offset_constant = 0;
 
        priv->bios_sensor.thrs_fan_boost.temp = 90;
@@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
        struct nouveau_therm_priv *priv = (void *)therm;
        struct nvbios_therm_sensor *s = &priv->bios_sensor;
 
-       if (!priv->bios_sensor.slope_div)
-               priv->bios_sensor.slope_div = 1;
-       if (!priv->bios_sensor.offset_den)
-               priv->bios_sensor.offset_den = 1;
-
        /* enforce a minimum hysteresis on thresholds */
        s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
        s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
@@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
        const char *thresolds[] = {
                "fanboost", "downclock", "critical", "shutdown"
        };
-       uint8_t temperature = therm->temp_get(therm);
+       int temperature = therm->temp_get(therm);
 
        if (thrs < 0 || thrs > 3)
                return;
 
        if (dir == NOUVEAU_THERM_THRS_FALLING)
-               nv_info(therm, "temperature (%u C) went below the '%s' threshold\n",
+               nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
                        temperature, thresolds[thrs]);
        else
-               nv_info(therm, "temperature (%u C) hit the '%s' threshold\n",
+               nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
                        temperature, thresolds[thrs]);
 
        active = (dir == NOUVEAU_THERM_THRS_RISING);
@@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
        case NOUVEAU_THERM_THRS_FANBOOST:
                if (active) {
                        nouveau_therm_fan_set(therm, true, 100);
-                       nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
+                       nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
                }
                break;
        case NOUVEAU_THERM_THRS_DOWNCLOCK:
@@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
                                             NOUVEAU_THERM_THRS_SHUTDOWN);
 
        /* schedule the next poll in one second */
-       if (list_empty(&alarm->head))
+       if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
                ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
 
        spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
@@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
        alarm_timer_callback(&priv->sensor.therm_poll_alarm);
 }
 
+void
+nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
+{
+       const char *sensor_avail = "yes";
+
+       if (therm->temp_get(therm) < 0)
+               sensor_avail = "no";
+
+       nv_info(therm, "internal sensor: %s\n", sensor_avail);
+}
+
 int
 nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
 {
index bb54098c6d970bd4c0b45582220490fb11b15426..936b442a6ab7f3f2e09ecea341e3105df51ed630 100644 (file)
@@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
        struct drm_device *dev = dev_get_drvdata(d);
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_therm *therm = nouveau_therm(drm->device);
+       int temp = therm->temp_get(therm);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000);
+       if (temp < 0)
+               return temp;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
 }
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
                                                  NULL, 0);
@@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
                          nouveau_hwmon_get_pwm1_max,
                          nouveau_hwmon_set_pwm1_max, 0);
 
-static struct attribute *hwmon_attributes[] = {
+static struct attribute *hwmon_default_attributes[] = {
+       &sensor_dev_attr_name.dev_attr.attr,
+       &sensor_dev_attr_update_rate.dev_attr.attr,
+       NULL
+};
+static struct attribute *hwmon_temp_attributes[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
        &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
@@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
        &sensor_dev_attr_temp1_emergency.dev_attr.attr,
        &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
-       &sensor_dev_attr_name.dev_attr.attr,
-       &sensor_dev_attr_update_rate.dev_attr.attr,
        NULL
 };
 static struct attribute *hwmon_fan_rpm_attributes[] = {
@@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
        NULL
 };
 
-static const struct attribute_group hwmon_attrgroup = {
-       .attrs = hwmon_attributes,
+static const struct attribute_group hwmon_default_attrgroup = {
+       .attrs = hwmon_default_attributes,
+};
+static const struct attribute_group hwmon_temp_attrgroup = {
+       .attrs = hwmon_temp_attributes,
 };
 static const struct attribute_group hwmon_fan_rpm_attrgroup = {
        .attrs = hwmon_fan_rpm_attributes,
@@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev)
        }
        dev_set_drvdata(hwmon_dev, dev);
 
-       /* default sysfs entries */
-       ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup);
+       /* set the default attributes */
+       ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
        if (ret) {
                if (ret)
                        goto error;
        }
 
+       /* if the card has a working thermal sensor */
+       if (therm->temp_get(therm) >= 0) {
+               ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
+               if (ret) {
+                       if (ret)
+                               goto error;
+               }
+       }
+
        /* if the card has a pwm fan */
        /*XXX: incorrect, need better detection for this, some boards have
         *     the gpio entries for pwm fan control even when there's no
@@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
        struct nouveau_pm *pm = nouveau_pm(dev);
 
        if (pm->hwmon) {
-               sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
-               sysfs_remove_group(&pm->hwmon->kobj,
-                                  &hwmon_pwm_fan_attrgroup);
-               sysfs_remove_group(&pm->hwmon->kobj,
-                                  &hwmon_fan_rpm_attrgroup);
+               sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
+               sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
+               sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
+               sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
 
                hwmon_device_unregister(pm->hwmon);
        }
index 2db57990f65c3c7c6a3aca012b30384e36942876..7f0e6c3f37d1fae4983bd6d8270364e52f00fc30 100644 (file)
@@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        swap_interval <<= 4;
        if (swap_interval == 0)
                swap_interval |= 0x100;
+       if (chan == NULL)
+               evo_sync(crtc->dev);
 
        push = evo_wait(sync, 128);
        if (unlikely(push == NULL))
@@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                sync->addr ^= 0x10;
                sync->data++;
                FIRE_RING (chan);
-       } else {
-               evo_sync(crtc->dev);
        }
 
        /* queue the flip */
index d4c633e12863bb9c546d6eae81d3fb7f835303e9..27769e724b6de1d238fc402b844d056ed69bb4b8 100644 (file)
@@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                    (rdev->pdev->device == 0x9907) ||
                    (rdev->pdev->device == 0x9908) ||
                    (rdev->pdev->device == 0x9909) ||
+                   (rdev->pdev->device == 0x990B) ||
+                   (rdev->pdev->device == 0x990C) ||
+                   (rdev->pdev->device == 0x990F) ||
                    (rdev->pdev->device == 0x9910) ||
-                   (rdev->pdev->device == 0x9917)) {
+                   (rdev->pdev->device == 0x9917) ||
+                   (rdev->pdev->device == 0x9999)) {
                        rdev->config.cayman.max_simds_per_se = 6;
                        rdev->config.cayman.max_backends_per_se = 2;
                } else if ((rdev->pdev->device == 0x9903) ||
                           (rdev->pdev->device == 0x9904) ||
                           (rdev->pdev->device == 0x990A) ||
+                          (rdev->pdev->device == 0x990D) ||
+                          (rdev->pdev->device == 0x990E) ||
                           (rdev->pdev->device == 0x9913) ||
                           (rdev->pdev->device == 0x9918)) {
                        rdev->config.cayman.max_simds_per_se = 4;
@@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev)
                           (rdev->pdev->device == 0x9990) ||
                           (rdev->pdev->device == 0x9991) ||
                           (rdev->pdev->device == 0x9994) ||
+                          (rdev->pdev->device == 0x9995) ||
+                          (rdev->pdev->device == 0x9996) ||
+                          (rdev->pdev->device == 0x999A) ||
                           (rdev->pdev->device == 0x99A0)) {
                        rdev->config.cayman.max_simds_per_se = 3;
                        rdev->config.cayman.max_backends_per_se = 1;
@@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev)
        WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
        WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
 
-       tmp = gb_addr_config & NUM_PIPES_MASK;
-       tmp = r6xx_remap_render_backend(rdev, tmp,
-                                       rdev->config.cayman.max_backends_per_se *
-                                       rdev->config.cayman.max_shader_engines,
-                                       CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+       if ((rdev->config.cayman.max_backends_per_se == 1) &&
+           (rdev->flags & RADEON_IS_IGP)) {
+               if ((disabled_rb_mask & 3) == 1) {
+                       /* RB0 disabled, RB1 enabled */
+                       tmp = 0x11111111;
+               } else {
+                       /* RB1 disabled, RB0 enabled */
+                       tmp = 0x00000000;
+               }
+       } else {
+               tmp = gb_addr_config & NUM_PIPES_MASK;
+               tmp = r6xx_remap_render_backend(rdev, tmp,
+                                               rdev->config.cayman.max_backends_per_se *
+                                               rdev->config.cayman.max_shader_engines,
+                                               CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+       }
        WREG32(GB_BACKEND_MAP, tmp);
 
        cgts_tcc_disable = 0xffff0000;
@@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev)
 int cayman_suspend(struct radeon_device *rdev)
 {
        r600_audio_fini(rdev);
+       radeon_vm_manager_fini(rdev);
        cayman_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
        evergreen_irq_suspend(rdev);
index bedda9caadd9fe7d43bca2539bf51fd5df669f1f..6e05a2e75a46c211bcd5b4236e7c094b1636c9a1 100644 (file)
@@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
                goto out_cleanup;
        }
 
-       /* r100 doesn't have dma engine so skip the test */
-       /* also, VRAM-to-VRAM test doesn't make much sense for DMA */
-       /* skip it as well if domains are the same */
-       if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
+       if (rdev->asic->copy.dma) {
                time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
                                                RADEON_BENCHMARK_COPY_DMA, n);
                if (time < 0)
@@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
                                                     sdomain, ddomain, "dma");
        }
 
-       time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
-                                       RADEON_BENCHMARK_COPY_BLIT, n);
-       if (time < 0)
-               goto out_cleanup;
-       if (time > 0)
-               radeon_benchmark_log_results(n, size, time,
-                                            sdomain, ddomain, "blit");
+       if (rdev->asic->copy.blit) {
+               time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
+                                               RADEON_BENCHMARK_COPY_BLIT, n);
+               if (time < 0)
+                       goto out_cleanup;
+               if (time > 0)
+                       radeon_benchmark_log_results(n, size, time,
+                                                    sdomain, ddomain, "blit");
+       }
 
 out_cleanup:
        if (sobj) {
index 9128120da04441621ec1050b8cb0ff236704b7cf..bafbe3216952510dc4736c823c08aa4e4de0a8c6 100644 (file)
@@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev)
 
 int si_suspend(struct radeon_device *rdev)
 {
+       radeon_vm_manager_fini(rdev);
        si_cp_enable(rdev, false);
        cayman_dma_stop(rdev);
        si_irq_suspend(rdev);
index 92e47e5c956426878e0e69d80811831d9b92e483..c4388776f4e470ae527470fb47357c220bbf9624 100644 (file)
 #define USB_VENDOR_ID_MONTEREY         0x0566
 #define USB_DEVICE_ID_GENIUS_KB29E     0x3004
 
+#define USB_VENDOR_ID_MSI              0x1770
+#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL     0xff00
+
 #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
 #define USB_DEVICE_ID_N_S_HARMONY      0xc359
 
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001                0x3001
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008                0x3008
 
+#define USB_VENDOR_ID_REALTEK          0x0bda
+#define USB_DEVICE_ID_REALTEK_READER   0x0152
+
 #define USB_VENDOR_ID_ROCCAT           0x1e7d
 #define USB_DEVICE_ID_ROCCAT_ARVO      0x30d4
 #define USB_DEVICE_ID_ROCCAT_ISKU      0x319c
index 7a1ebb867cf499596e9c4fe5b1a43860b86d4351..82e9211b3ca97cfec8f316d33870f22f6fac0617 100644 (file)
@@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
 {
        struct mt_device *td = hid_get_drvdata(hid);
        __s32 quirks = td->mtclass.quirks;
+       struct input_dev *input = field->hidinput->input;
 
        if (hid->claimed & HID_CLAIMED_INPUT) {
                switch (usage->hid) {
@@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
                        break;
 
                default:
+                       if (usage->type)
+                               input_event(input, usage->type, usage->code,
+                                               value);
                        return;
                }
 
                if (usage->usage_index + 1 == field->report_count) {
                        /* we only take into account the last report. */
                        if (usage->hid == td->last_slot_field)
-                               mt_complete_slot(td, field->hidinput->input);
+                               mt_complete_slot(td, input);
 
                        if (field->index == td->last_field_index
                                && td->num_received >= td->num_expected)
index e0e6abf1cd3bd9b3cfa3d7cc73a2247c84c31be4..19b8360f2330ddec85856e09605a1ce69e32d880 100644 (file)
@@ -73,6 +73,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
@@ -80,6 +81,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
index 668ff4721323a22842b94823d17505da6bf68b27..5cde94e56f17cf3105b9d84d5b5dd0359661821a 100644 (file)
@@ -25,7 +25,7 @@
     which contains this code, we don't worry about the wasted space.
 */
 
-#include <linux/hwmon.h>
+#include <linux/kernel.h>
 
 /* straight from the datasheet */
 #define LM75_TEMP_MIN (-55000)
index 46cde098c11c2fe993dea722f1dd37d0ec3c7498..e380c6eef3af6500edc94b3a8db668bb59dfa258 100644 (file)
@@ -4,7 +4,6 @@
 
 menuconfig I2C
        tristate "I2C support"
-       depends on !S390
        select RT_MUTEXES
        ---help---
          I2C (pronounce: I-squared-C) is a slow serial bus protocol used in
@@ -76,6 +75,7 @@ config I2C_HELPER_AUTO
 
 config I2C_SMBUS
        tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
+       depends on GENERIC_HARDIRQS
        help
          Say Y here if you want support for SMBus extensions to the I2C
          specification. At the moment, the only supported extension is
index a3725de923848baba3c55f3067588f42759b5227..adfee98486b159843b38ff107c8ea9dd25189ed5 100644 (file)
@@ -114,7 +114,7 @@ config I2C_I801
 
 config I2C_ISCH
        tristate "Intel SCH SMBus 1.0"
-       depends on PCI
+       depends on PCI && GENERIC_HARDIRQS
        select LPC_SCH
        help
          Say Y here if you want to use SMBus controller on the Intel SCH
@@ -543,6 +543,7 @@ config I2C_NUC900
 
 config I2C_OCORES
        tristate "OpenCores I2C Controller"
+       depends on GENERIC_HARDIRQS
        help
          If you say yes to this option, support will be included for the
          OpenCores I2C controller. For details see
@@ -777,7 +778,7 @@ config I2C_DIOLAN_U2C
 
 config I2C_PARPORT
        tristate "Parallel port adapter"
-       depends on PARPORT
+       depends on PARPORT && GENERIC_HARDIRQS
        select I2C_ALGOBIT
        select I2C_SMBUS
        help
@@ -802,6 +803,7 @@ config I2C_PARPORT
 
 config I2C_PARPORT_LIGHT
        tristate "Parallel port adapter (light)"
+       depends on GENERIC_HARDIRQS
        select I2C_ALGOBIT
        select I2C_SMBUS
        help
index e9205ee8cf94cb5a822635346df8451e5d5155bc..130f02cc9d94a233eacce7857214d34278141939 100644 (file)
@@ -80,6 +80,7 @@
 /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
 #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
 #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
 
 #define ISMT_DESC_ENTRIES      32      /* number of descriptor entries */
 #define ISMT_MAX_RETRIES       3       /* number of SMBus retries to attempt */
@@ -185,6 +186,7 @@ struct ismt_priv {
 static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
        { 0, }
 };
 
index 36704e3ab3fa9a87cdd9ca53e825f80792c28732..b714776b6ddd1828258ee1a81d33728d1aba3c75 100644 (file)
@@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
        int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
        u32 clk_divisor;
 
-       tegra_i2c_clock_enable(i2c_dev);
+       err = tegra_i2c_clock_enable(i2c_dev);
+       if (err < 0) {
+               dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
+               return err;
+       }
 
        tegra_periph_reset_assert(i2c_dev->div_clk);
        udelay(2);
@@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
        if (i2c_dev->is_suspended)
                return -EBUSY;
 
-       tegra_i2c_clock_enable(i2c_dev);
+       ret = tegra_i2c_clock_enable(i2c_dev);
+       if (ret < 0) {
+               dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
+               return ret;
+       }
+
        for (i = 0; i < num; i++) {
                enum msg_end_type end_type = MSG_END_STOP;
                if (i < (num - 1)) {
index f3b8f9a6a89b65eb29050c591e181b908d0358f3..966a18a5d12d2397d2a43f25bcd22a65420cc5e4 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (c) 2010 Ericsson AB.
  *
- * Author: Guenter Roeck <guenter.roeck@ericsson.com>
+ * Author: Guenter Roeck <linux@roeck-us.net>
  *
  * Derived from:
  *  pca954x.c
index 565bfb161c1a7d34098cb1c6ef7919aebe38f382..a3fde52840ca29b5c046e11533a86b2dcdad67b3 100644 (file)
@@ -1575,6 +1575,12 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
 
        neigh = dst_neigh_lookup(ep->dst,
                        &ep->com.cm_id->remote_addr.sin_addr.s_addr);
+       if (!neigh) {
+               pr_err("%s - cannot alloc neigh.\n", __func__);
+               err = -ENOMEM;
+               goto fail4;
+       }
+
        /* get a l2t entry */
        if (neigh->dev->flags & IFF_LOOPBACK) {
                PDBG("%s LOOPBACK\n", __func__);
@@ -3053,6 +3059,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
        dst = &rt->dst;
        neigh = dst_neigh_lookup_skb(dst, skb);
 
+       if (!neigh) {
+               pr_err("%s - failed to allocate neigh!\n",
+                      __func__);
+               goto free_dst;
+       }
+
        if (neigh->dev->flags & IFF_LOOPBACK) {
                pdev = ip_dev_find(&init_net, iph->daddr);
                e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
index 17ba4f8bc12d0b19ec7d380dc6677796d79f12de..70b1808a08f4d12bf9d1668cb5bae06d1cd2e202 100644 (file)
@@ -186,8 +186,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
                                          wq->rq.memsize, &(wq->rq.dma_addr),
                                          GFP_KERNEL);
-       if (!wq->rq.queue)
+       if (!wq->rq.queue) {
+               ret = -ENOMEM;
                goto free_sq;
+       }
        PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
                __func__, wq->sq.queue,
                (unsigned long long)virt_to_phys(wq->sq.queue),
index 439c35d4a669978aba8820151f2f6c98f5a47558..ea93870266eb7fda30ddf502b5d233fb1116423b 100644 (file)
@@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
                goto bail;
        }
 
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
+       opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
        dev->opstats[opcode].n_bytes += tlen;
        dev->opstats[opcode].n_packets++;
 
index 8349f9c5064c042d568cf56619b76dd391410402..1e603a375069022bfed295f071f8c729b4ad826b 100644 (file)
@@ -1,7 +1,7 @@
 config INFINIBAND_QIB
-       tristate "QLogic PCIe HCA support"
+       tristate "Intel PCIe HCA support"
        depends on 64BIT
        ---help---
-       This is a low-level driver for QLogic PCIe QLE InfiniBand host
-       channel adapters.  This driver does not support the QLogic
+       This is a low-level driver for Intel PCIe QLE InfiniBand host
+       channel adapters.  This driver does not support the Intel
        HyperTransport card (model QHT7140).
index 5423edcab51f4404905e47674957cb32e5f33b9f..216092477dfcf7eb4a51187ffe825110c5eb3286 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
                 "Attempt pre-IBTA 1.2 DDR speed negotiation");
 
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("QLogic <support@qlogic.com>");
-MODULE_DESCRIPTION("QLogic IB driver");
+MODULE_AUTHOR("Intel <ibsupport@intel.com>");
+MODULE_DESCRIPTION("Intel IB driver");
 MODULE_VERSION(QIB_DRIVER_VERSION);
 
 /*
index a099ac171e226f1073f4a70ccb680e669b2ff048..0232ae56b1fa2b185e1a551e074e5d256f15c087 100644 (file)
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
  * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
  * All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
 
 /*
  * This file contains all the chip-specific register information and
- * access functions for the QLogic QLogic_IB PCI-Express chip.
+ * access functions for the Intel Intel_IB PCI-Express chip.
  *
  */
 
index 50e33aa0b4e39628c5de95e314d57de68bbc4be5..173f805790da4522f3f4a8881b8b2df5883b9b4b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
 static void qib_remove_one(struct pci_dev *);
 static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
 
-#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
+#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
 #define PFX QIB_DRV_NAME ": "
 
 static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
@@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                dd = qib_init_iba6120_funcs(pdev, ent);
 #else
                qib_early_err(&pdev->dev,
-                       "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
+                       "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
                        ent->device);
                dd = ERR_PTR(-ENODEV);
 #endif
@@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        default:
                qib_early_err(&pdev->dev,
-                       "Failing on unknown QLogic deviceid 0x%x\n",
+                       "Failing on unknown Intel deviceid 0x%x\n",
                        ent->device);
                ret = -ENODEV;
        }
index 50a8a0d4fe676f5467c677d66a4985170900059d..08a6c6d39e5666228cd842b1597fc5d9dcf48052 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2013 Intel Corporation. All rights reserved.
  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -44,7 +44,7 @@
 #include "qib.h"
 #include "qib_7220.h"
 
-#define SD7220_FW_NAME "qlogic/sd7220.fw"
+#define SD7220_FW_NAME "intel/sd7220.fw"
 MODULE_FIRMWARE(SD7220_FW_NAME);
 
 /*
index ba51a4715a1dcdd5963261d84497dc1e6974c070..7c0ab16a2fe230fc31b44fd43b04f3e92d2a26b8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2012, 2013 Intel Corporation.  All rights reserved.
  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
  *
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        ibdev->dma_ops = &qib_dma_mapping_ops;
 
        snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
-                "QLogic Infiniband HCA %s", init_utsname()->nodename);
+                "Intel Infiniband HCA %s", init_utsname()->nodename);
 
        ret = ib_register_device(ibdev, qib_create_port_files);
        if (ret)
index 67b0c1d23678d26565981cf9815993027c0a1b5e..1ef880de3a41d97fe328951069f3b6c30066daf1 100644 (file)
@@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                if (++priv->tx_outstanding == ipoib_sendq_size) {
                        ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
                                  tx->qp->qp_num);
-                       if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
-                               ipoib_warn(priv, "request notify on send CQ failed\n");
                        netif_stop_queue(dev);
+                       rc = ib_req_notify_cq(priv->send_cq,
+                               IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
+                       if (rc < 0)
+                               ipoib_warn(priv, "request notify on send CQ failed\n");
+                       else if (rc)
+                               ipoib_send_comp_handler(priv->send_cq, dev);
                }
        }
 }
index 7cd74e29cbc87a6495277ecd74d7135ebcd75ad3..9135606c86496511919a5238898aa98cf1c74996 100644 (file)
@@ -158,14 +158,10 @@ static unsigned int get_time_pit(void)
 #define GET_TIME(x)    rdtscl(x)
 #define DELTA(x,y)     ((y)-(x))
 #define TIME_NAME      "TSC"
-#elif defined(__alpha__)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE)
 #define GET_TIME(x)    do { x = get_cycles(); } while (0)
 #define DELTA(x,y)     ((y)-(x))
-#define TIME_NAME      "PCC"
-#elif defined(CONFIG_MN10300) || defined(CONFIG_TILE)
-#define GET_TIME(x)    do { x = get_cycles(); } while (0)
-#define DELTA(x, y)    ((x) - (y))
-#define TIME_NAME      "TSC"
+#define TIME_NAME      "get_cycles"
 #else
 #define FAKE_TIME
 static unsigned long analog_faketime = 0;
index 5c514d0711d1b57669b6acee551400bf88d44265..c332fb98480d28e42739e9b674b7b99ce8be427d 100644 (file)
@@ -130,7 +130,7 @@ config IRQ_REMAP
 # OMAP IOMMU support
 config OMAP_IOMMU
        bool "OMAP IOMMU Support"
-       depends on ARCH_OMAP
+       depends on ARCH_OMAP2PLUS
        select IOMMU_API
 
 config OMAP_IOVMM
index 98f555dafb55c8edc32393919ee48140d024b184..b287ca33833df792350baad6f82fefbe1494e038 100644 (file)
@@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb,
 
                /* allocate a protection domain if a device is added */
                dma_domain = find_protection_domain(devid);
-               if (dma_domain)
-                       goto out;
-               dma_domain = dma_ops_domain_alloc();
-               if (!dma_domain)
-                       goto out;
-               dma_domain->target_dev = devid;
-
-               spin_lock_irqsave(&iommu_pd_list_lock, flags);
-               list_add_tail(&dma_domain->list, &iommu_pd_list);
-               spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
-
-               dev_data = get_dev_data(dev);
+               if (!dma_domain) {
+                       dma_domain = dma_ops_domain_alloc();
+                       if (!dma_domain)
+                               goto out;
+                       dma_domain->target_dev = devid;
+
+                       spin_lock_irqsave(&iommu_pd_list_lock, flags);
+                       list_add_tail(&dma_domain->list, &iommu_pd_list);
+                       spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+               }
 
                dev->archdata.dma_ops = &amd_iommu_dma_ops;
 
index b6ecddb63cd0fc9397ccf24e0951dbf13ea9a719..e3c2d74b7684596790fd0293461dc74922a850ba 100644 (file)
@@ -980,7 +980,7 @@ static void __init free_iommu_all(void)
  *     BIOS should disable L2B micellaneous clock gating by setting
  *     L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
  */
-static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
 {
        u32 value;
 
index d56f8c17c5fe4dcc2e4db23085d4914b977c0fd0..7c11ff368d07f88dc357dd3f6e22c9fbe2ed6cc1 100644 (file)
@@ -2,7 +2,6 @@
 #include <linux/cpumask.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/cpumask.h>
 #include <linux/errno.h>
 #include <linux/msi.h>
 #include <linux/irq.h>
index 5313c9ea44dc906acc7ce2069693bbc94c5be260..d9edcc94c2a878d2636f49ed7e7b682838749266 100644 (file)
@@ -237,7 +237,8 @@ config HISAX_MIC
 
 config HISAX_NETJET
        bool "NETjet card"
-       depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+       depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+       depends on VIRT_TO_BUS
        help
          This enables HiSax support for the NetJet from Traverse
          Technologies.
@@ -248,7 +249,8 @@ config HISAX_NETJET
 
 config HISAX_NETJET_U
        bool "NETspider U card"
-       depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+       depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN)))
+       depends on VIRT_TO_BUS
        help
          This enables HiSax support for the Netspider U interface ISDN card
          from Traverse Technologies.
index 3c955e10a618c2487422efa8063d1f9924a483cc..c6083132c4b8ccaf21c7addb61cf8596165bf915 100644 (file)
@@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
 {
        struct blk_plug plug;
 
+       BUG_ON(dm_bufio_in_request());
+
        blk_start_plug(&plug);
        dm_bufio_lock(c);
 
index fbd3625f27480f2611d81eed181d13f11b3ab7a1..83e995fece88c1330335c85a23ef42b2ed54a1f4 100644 (file)
@@ -83,6 +83,8 @@ struct cache_disk_superblock {
        __le32 read_misses;
        __le32 write_hits;
        __le32 write_misses;
+
+       __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
 } __packed;
 
 struct dm_cache_metadata {
@@ -109,6 +111,7 @@ struct dm_cache_metadata {
        bool clean_when_opened:1;
 
        char policy_name[CACHE_POLICY_NAME_SIZE];
+       unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
        size_t policy_hint_size;
        struct dm_cache_statistics stats;
 };
@@ -268,7 +271,8 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
        memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
        disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
        disk_super->version = cpu_to_le32(CACHE_VERSION);
-       memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE);
+       memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
+       memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
        disk_super->policy_hint_size = 0;
 
        r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
@@ -284,7 +288,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
        disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
        disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
        disk_super->cache_blocks = cpu_to_le32(0);
-       memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
 
        disk_super->read_hits = cpu_to_le32(0);
        disk_super->read_misses = cpu_to_le32(0);
@@ -478,6 +481,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
        cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
        cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
        strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
+       cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
+       cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
+       cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
        cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
 
        cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
@@ -572,6 +578,9 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
        disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
        disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
        strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
+       disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
+       disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
+       disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
 
        disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
        disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
@@ -854,18 +863,43 @@ struct thunk {
        bool hints_valid;
 };
 
+static bool policy_unchanged(struct dm_cache_metadata *cmd,
+                            struct dm_cache_policy *policy)
+{
+       const char *policy_name = dm_cache_policy_get_name(policy);
+       const unsigned *policy_version = dm_cache_policy_get_version(policy);
+       size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
+
+       /*
+        * Ensure policy names match.
+        */
+       if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
+               return false;
+
+       /*
+        * Ensure policy major versions match.
+        */
+       if (cmd->policy_version[0] != policy_version[0])
+               return false;
+
+       /*
+        * Ensure policy hint sizes match.
+        */
+       if (cmd->policy_hint_size != policy_hint_size)
+               return false;
+
+       return true;
+}
+
 static bool hints_array_initialized(struct dm_cache_metadata *cmd)
 {
        return cmd->hint_root && cmd->policy_hint_size;
 }
 
 static bool hints_array_available(struct dm_cache_metadata *cmd,
-                                 const char *policy_name)
+                                 struct dm_cache_policy *policy)
 {
-       bool policy_names_match = !strncmp(cmd->policy_name, policy_name,
-                                          sizeof(cmd->policy_name));
-
-       return cmd->clean_when_opened && policy_names_match &&
+       return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
                hints_array_initialized(cmd);
 }
 
@@ -899,7 +933,8 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf)
        return r;
 }
 
-static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+static int __load_mappings(struct dm_cache_metadata *cmd,
+                          struct dm_cache_policy *policy,
                           load_mapping_fn fn, void *context)
 {
        struct thunk thunk;
@@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_nam
 
        thunk.cmd = cmd;
        thunk.respect_dirty_flags = cmd->clean_when_opened;
-       thunk.hints_valid = hints_array_available(cmd, policy_name);
+       thunk.hints_valid = hints_array_available(cmd, policy);
 
        return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
 }
 
-int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+                          struct dm_cache_policy *policy,
                           load_mapping_fn fn, void *context)
 {
        int r;
 
        down_read(&cmd->root_lock);
-       r = __load_mappings(cmd, policy_name, fn, context);
+       r = __load_mappings(cmd, policy, fn, context);
        up_read(&cmd->root_lock);
 
        return r;
@@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
                /* nothing to be done */
                return 0;
 
-       value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0));
+       value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
        __dm_bless_for_disk(&value);
 
        r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
@@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
        __le32 value;
        size_t hint_size;
        const char *policy_name = dm_cache_policy_get_name(policy);
+       const unsigned *policy_version = dm_cache_policy_get_version(policy);
 
        if (!policy_name[0] ||
            (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
                return -EINVAL;
 
-       if (strcmp(cmd->policy_name, policy_name)) {
+       if (!policy_unchanged(cmd, policy)) {
                strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+               memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
 
                hint_size = dm_cache_policy_get_hint_size(policy);
                if (!hint_size)
index 135864ea0eee18343853d950e68a96a227a53622..f45cef21f3d0dac7f5437aac6edf5c2551bd0183 100644 (file)
@@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
                               dm_cblock_t cblock, bool dirty,
                               uint32_t hint, bool hint_valid);
 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
-                          const char *policy_name,
+                          struct dm_cache_policy *policy,
                           load_mapping_fn fn,
                           void *context);
 
index cc05d70b3cb89a4f8fdcb8cf1887baadca365323..b04d1f904d0763c43870e38c857fcd25baad4b7e 100644 (file)
@@ -17,7 +17,6 @@
 /*----------------------------------------------------------------*/
 
 #define DM_MSG_PREFIX "cache cleaner"
-#define CLEANER_VERSION "1.0.0"
 
 /* Cache entry struct. */
 struct wb_cache_entry {
@@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
 
 static struct dm_cache_policy_type wb_policy_type = {
        .name = "cleaner",
+       .version = {1, 0, 0},
        .hint_size = 0,
        .owner = THIS_MODULE,
        .create = wb_create
@@ -446,7 +446,10 @@ static int __init wb_init(void)
        if (r < 0)
                DMERR("register failed %d", r);
        else
-               DMINFO("version " CLEANER_VERSION " loaded");
+               DMINFO("version %u.%u.%u loaded",
+                      wb_policy_type.version[0],
+                      wb_policy_type.version[1],
+                      wb_policy_type.version[2]);
 
        return r;
 }
index 52a75beeced59a4ac50a27a7b646bce3d194e668..0928abdc49f023de5a696dd082e160bc65d37cdd 100644 (file)
@@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
  */
 const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
 
+const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
+
 size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
 
 /*----------------------------------------------------------------*/
index 964153255076bad8a1a24765ec9b7f884af4cd4b..dc112a7137fe9280fca348908ed99b77f36f9417 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/vmalloc.h>
 
 #define DM_MSG_PREFIX "cache-policy-mq"
-#define MQ_VERSION     "1.0.0"
 
 static struct kmem_cache *mq_entry_cache;
 
@@ -1133,6 +1132,7 @@ bad_cache_alloc:
 
 static struct dm_cache_policy_type mq_policy_type = {
        .name = "mq",
+       .version = {1, 0, 0},
        .hint_size = 4,
        .owner = THIS_MODULE,
        .create = mq_create
@@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_policy_type = {
 
 static struct dm_cache_policy_type default_policy_type = {
        .name = "default",
+       .version = {1, 0, 0},
        .hint_size = 4,
        .owner = THIS_MODULE,
        .create = mq_create
@@ -1164,7 +1165,10 @@ static int __init mq_init(void)
 
        r = dm_cache_policy_register(&default_policy_type);
        if (!r) {
-               DMINFO("version " MQ_VERSION " loaded");
+               DMINFO("version %u.%u.%u loaded",
+                      mq_policy_type.version[0],
+                      mq_policy_type.version[1],
+                      mq_policy_type.version[2]);
                return 0;
        }
 
index 2cbf5fdaac52e8573eed195fe17330c456243bd6..21c03c570c06b7d2bac4bf1abc1d41147d7cd168 100644 (file)
@@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
 }
 EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
 
+const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
+{
+       struct dm_cache_policy_type *t = p->private;
+
+       return t->version;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_version);
+
 size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
 {
        struct dm_cache_policy_type *t = p->private;
index f0f51b260544decc116a193fc2a2a45ede286a78..558bdfdabf5f2da39e5bef487dc5008a2399b659 100644 (file)
@@ -196,6 +196,7 @@ struct dm_cache_policy {
  * We maintain a little register of the different policy types.
  */
 #define CACHE_POLICY_NAME_SIZE 16
+#define CACHE_POLICY_VERSION_SIZE 3
 
 struct dm_cache_policy_type {
        /* For use by the register code only. */
@@ -206,6 +207,7 @@ struct dm_cache_policy_type {
         * what gets passed on the target line to select your policy.
         */
        char name[CACHE_POLICY_NAME_SIZE];
+       unsigned version[CACHE_POLICY_VERSION_SIZE];
 
        /*
         * Policies may store a hint for each each cache block.
index 0f4e84b15c303c0de380dbeb6a683f693b1b0592..66120bd46d15639613cdeaf09b4474db48550845 100644 (file)
@@ -142,6 +142,7 @@ struct cache {
        spinlock_t lock;
        struct bio_list deferred_bios;
        struct bio_list deferred_flush_bios;
+       struct bio_list deferred_writethrough_bios;
        struct list_head quiesced_migrations;
        struct list_head completed_migrations;
        struct list_head need_commit_migrations;
@@ -158,7 +159,7 @@ struct cache {
        /*
         * origin_blocks entries, discarded if set.
         */
-       sector_t discard_block_size; /* a power of 2 times sectors per block */
+       uint32_t discard_block_size; /* a power of 2 times sectors per block */
        dm_dblock_t discard_nr_blocks;
        unsigned long *discard_bitset;
 
@@ -199,6 +200,11 @@ struct per_bio_data {
        bool tick:1;
        unsigned req_nr:2;
        struct dm_deferred_entry *all_io_entry;
+
+       /* writethrough fields */
+       struct cache *cache;
+       dm_cblock_t cblock;
+       bio_end_io_t *saved_bi_end_io;
 };
 
 struct dm_cache_migration {
@@ -412,17 +418,24 @@ static bool block_size_is_power_of_two(struct cache *cache)
        return cache->sectors_per_block_shift >= 0;
 }
 
+static dm_block_t block_div(dm_block_t b, uint32_t n)
+{
+       do_div(b, n);
+
+       return b;
+}
+
 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
 {
-       sector_t discard_blocks = cache->discard_block_size;
+       uint32_t discard_blocks = cache->discard_block_size;
        dm_block_t b = from_oblock(oblock);
 
        if (!block_size_is_power_of_two(cache))
-               (void) sector_div(discard_blocks, cache->sectors_per_block);
+               discard_blocks = discard_blocks / cache->sectors_per_block;
        else
                discard_blocks >>= cache->sectors_per_block_shift;
 
-       (void) sector_div(b, discard_blocks);
+       b = block_div(b, discard_blocks);
 
        return to_dblock(b);
 }
@@ -609,6 +622,56 @@ static void issue(struct cache *cache, struct bio *bio)
        spin_unlock_irqrestore(&cache->lock, flags);
 }
 
+static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cache->lock, flags);
+       bio_list_add(&cache->deferred_writethrough_bios, bio);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       wake_worker(cache);
+}
+
+static void writethrough_endio(struct bio *bio, int err)
+{
+       struct per_bio_data *pb = get_per_bio_data(bio);
+       bio->bi_end_io = pb->saved_bi_end_io;
+
+       if (err) {
+               bio_endio(bio, err);
+               return;
+       }
+
+       remap_to_cache(pb->cache, bio, pb->cblock);
+
+       /*
+        * We can't issue this bio directly, since we're in interrupt
+        * context.  So it get's put on a bio list for processing by the
+        * worker thread.
+        */
+       defer_writethrough_bio(pb->cache, bio);
+}
+
+/*
+ * When running in writethrough mode we need to send writes to clean blocks
+ * to both the cache and origin devices.  In future we'd like to clone the
+ * bio and send them in parallel, but for now we're doing them in
+ * series as this is easier.
+ */
+static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
+                                      dm_oblock_t oblock, dm_cblock_t cblock)
+{
+       struct per_bio_data *pb = get_per_bio_data(bio);
+
+       pb->cache = cache;
+       pb->cblock = cblock;
+       pb->saved_bi_end_io = bio->bi_end_io;
+       bio->bi_end_io = writethrough_endio;
+
+       remap_to_origin_clear_discard(pb->cache, bio, oblock);
+}
+
 /*----------------------------------------------------------------
  * Migration processing
  *
@@ -1002,7 +1065,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
        dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
        dm_block_t b;
 
-       (void) sector_div(end_block, cache->discard_block_size);
+       end_block = block_div(end_block, cache->discard_block_size);
 
        for (b = start_block; b < end_block; b++)
                set_discard(cache, to_dblock(b));
@@ -1070,14 +1133,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
                inc_hit_counter(cache, bio);
                pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
 
-               if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
-                       /*
-                        * No need to mark anything dirty in write through mode.
-                        */
-                       pb->req_nr == 0 ?
-                               remap_to_cache(cache, bio, lookup_result.cblock) :
-                               remap_to_origin_clear_discard(cache, bio, block);
-               } else
+               if (is_writethrough_io(cache, bio, lookup_result.cblock))
+                       remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+               else
                        remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
 
                issue(cache, bio);
@@ -1086,17 +1144,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
        case POLICY_MISS:
                inc_miss_counter(cache, bio);
                pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
-
-               if (pb->req_nr != 0) {
-                       /*
-                        * This is a duplicate writethrough io that is no
-                        * longer needed because the block has been demoted.
-                        */
-                       bio_endio(bio, 0);
-               } else {
-                       remap_to_origin_clear_discard(cache, bio, block);
-                       issue(cache, bio);
-               }
+               remap_to_origin_clear_discard(cache, bio, block);
+               issue(cache, bio);
                break;
 
        case POLICY_NEW:
@@ -1217,6 +1266,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
                submit_bios ? generic_make_request(bio) : bio_io_error(bio);
 }
 
+static void process_deferred_writethrough_bios(struct cache *cache)
+{
+       unsigned long flags;
+       struct bio_list bios;
+       struct bio *bio;
+
+       bio_list_init(&bios);
+
+       spin_lock_irqsave(&cache->lock, flags);
+       bio_list_merge(&bios, &cache->deferred_writethrough_bios);
+       bio_list_init(&cache->deferred_writethrough_bios);
+       spin_unlock_irqrestore(&cache->lock, flags);
+
+       while ((bio = bio_list_pop(&bios)))
+               generic_make_request(bio);
+}
+
 static void writeback_some_dirty_blocks(struct cache *cache)
 {
        int r = 0;
@@ -1313,6 +1379,7 @@ static int more_work(struct cache *cache)
        else
                return !bio_list_empty(&cache->deferred_bios) ||
                        !bio_list_empty(&cache->deferred_flush_bios) ||
+                       !bio_list_empty(&cache->deferred_writethrough_bios) ||
                        !list_empty(&cache->quiesced_migrations) ||
                        !list_empty(&cache->completed_migrations) ||
                        !list_empty(&cache->need_commit_migrations);
@@ -1331,6 +1398,8 @@ static void do_worker(struct work_struct *ws)
 
                writeback_some_dirty_blocks(cache);
 
+               process_deferred_writethrough_bios(cache);
+
                if (commit_if_needed(cache)) {
                        process_deferred_flush_bios(cache, false);
 
@@ -1756,8 +1825,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
        }
 
        r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
-       if (r)
+       if (r) {
+               *error = "Error setting cache policy's config values";
                dm_cache_policy_destroy(cache->policy);
+               cache->policy = NULL;
+       }
 
        return r;
 }
@@ -1793,8 +1865,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
 
 #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
 
-static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
-
 static int cache_create(struct cache_args *ca, struct cache **result)
 {
        int r = 0;
@@ -1821,9 +1891,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
 
        memcpy(&cache->features, &ca->features, sizeof(cache->features));
 
-       if (cache->features.write_through)
-               ti->num_write_bios = cache_num_write_bios;
-
        cache->callbacks.congested_fn = cache_is_congested;
        dm_table_add_target_callbacks(ti->table, &cache->callbacks);
 
@@ -1835,7 +1902,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
 
        /* FIXME: factor out this whole section */
        origin_blocks = cache->origin_sectors = ca->origin_sectors;
-       (void) sector_div(origin_blocks, ca->block_size);
+       origin_blocks = block_div(origin_blocks, ca->block_size);
        cache->origin_blocks = to_oblock(origin_blocks);
 
        cache->sectors_per_block = ca->block_size;
@@ -1848,7 +1915,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
                dm_block_t cache_size = ca->cache_sectors;
 
                cache->sectors_per_block_shift = -1;
-               (void) sector_div(cache_size, ca->block_size);
+               cache_size = block_div(cache_size, ca->block_size);
                cache->cache_size = to_cblock(cache_size);
        } else {
                cache->sectors_per_block_shift = __ffs(ca->block_size);
@@ -1873,6 +1940,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        spin_lock_init(&cache->lock);
        bio_list_init(&cache->deferred_bios);
        bio_list_init(&cache->deferred_flush_bios);
+       bio_list_init(&cache->deferred_writethrough_bios);
        INIT_LIST_HEAD(&cache->quiesced_migrations);
        INIT_LIST_HEAD(&cache->completed_migrations);
        INIT_LIST_HEAD(&cache->need_commit_migrations);
@@ -2002,6 +2070,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto out;
 
        r = cache_create(ca, &cache);
+       if (r)
+               goto out;
 
        r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
        if (r) {
@@ -2016,20 +2086,6 @@ out:
        return r;
 }
 
-static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
-{
-       int r;
-       struct cache *cache = ti->private;
-       dm_oblock_t block = get_bio_block(cache, bio);
-       dm_cblock_t cblock;
-
-       r = policy_lookup(cache->policy, block, &cblock);
-       if (r < 0)
-               return 2;       /* assume the worst */
-
-       return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
-}
-
 static int cache_map(struct dm_target *ti, struct bio *bio)
 {
        struct cache *cache = ti->private;
@@ -2097,18 +2153,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
                inc_hit_counter(cache, bio);
                pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
 
-               if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
-                       /*
-                        * No need to mark anything dirty in write through mode.
-                        */
-                       pb->req_nr == 0 ?
-                               remap_to_cache(cache, bio, lookup_result.cblock) :
-                               remap_to_origin_clear_discard(cache, bio, block);
-                       cell_defer(cache, cell, false);
-               } else {
+               if (is_writethrough_io(cache, bio, lookup_result.cblock))
+                       remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
+               else
                        remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
-                       cell_defer(cache, cell, false);
-               }
+
+               cell_defer(cache, cell, false);
                break;
 
        case POLICY_MISS:
@@ -2319,8 +2369,7 @@ static int cache_preresume(struct dm_target *ti)
        }
 
        if (!cache->loaded_mappings) {
-               r = dm_cache_load_mappings(cache->cmd,
-                                          dm_cache_policy_get_name(cache->policy),
+               r = dm_cache_load_mappings(cache->cmd, cache->policy,
                                           load_mapping, cache);
                if (r) {
                        DMERR("could not load cache mappings");
@@ -2535,7 +2584,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type cache_target = {
        .name = "cache",
-       .version = {1, 0, 0},
+       .version = {1, 1, 0},
        .module = THIS_MODULE,
        .ctr = cache_ctr,
        .dtr = cache_dtr,
index 009339d628287e9ab124d664677ffa69864732f1..004ad1652b73477dae0194b957842434cf48c38d 100644 (file)
@@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(struct pool_c *pt)
        return q && blk_queue_discard(q);
 }
 
+static bool is_factor(sector_t block_size, uint32_t n)
+{
+       return !sector_div(block_size, n);
+}
+
 /*
  * If discard_passdown was enabled verify that the data device
  * supports discards.  Disable discard_passdown if not.
@@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
        else if (data_limits->discard_granularity > block_size)
                reason = "discard granularity larger than a block";
 
-       else if (block_size & (data_limits->discard_granularity - 1))
+       else if (!is_factor(block_size, data_limits->discard_granularity))
                reason = "discard granularity not a factor of block size";
 
        if (reason) {
@@ -2544,7 +2549,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 6, 1},
+       .version = {1, 7, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct dm_target *ti,
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 7, 1},
+       .version = {1, 8, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
index 6ad538375c3c82ba0d345d2ccab3955b31ab3f30..a746f1d21c661bec7b809c9a688fddf44284581f 100644 (file)
@@ -93,6 +93,13 @@ struct dm_verity_io {
         */
 };
 
+struct dm_verity_prefetch_work {
+       struct work_struct work;
+       struct dm_verity *v;
+       sector_t block;
+       unsigned n_blocks;
+};
+
 static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
 {
        return (struct shash_desc *)(io + 1);
@@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error)
  * The root buffer is not prefetched, it is assumed that it will be cached
  * all the time.
  */
-static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io)
+static void verity_prefetch_io(struct work_struct *work)
 {
+       struct dm_verity_prefetch_work *pw =
+               container_of(work, struct dm_verity_prefetch_work, work);
+       struct dm_verity *v = pw->v;
        int i;
 
        for (i = v->levels - 2; i >= 0; i--) {
                sector_t hash_block_start;
                sector_t hash_block_end;
-               verity_hash_at_level(v, io->block, i, &hash_block_start, NULL);
-               verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL);
+               verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
+               verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
                if (!i) {
                        unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
 
@@ -452,6 +462,25 @@ no_prefetch_cluster:
                dm_bufio_prefetch(v->bufio, hash_block_start,
                                  hash_block_end - hash_block_start + 1);
        }
+
+       kfree(pw);
+}
+
+static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
+{
+       struct dm_verity_prefetch_work *pw;
+
+       pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
+               GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+       if (!pw)
+               return;
+
+       INIT_WORK(&pw->work, verity_prefetch_io);
+       pw->v = v;
+       pw->block = io->block;
+       pw->n_blocks = io->n_blocks;
+       queue_work(v->verify_wq, &pw->work);
 }
 
 /*
@@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        memcpy(io->io_vec, bio_iovec(bio),
               io->io_vec_size * sizeof(struct bio_vec));
 
-       verity_prefetch_io(v, io);
+       verity_submit_prefetch(v, io);
 
        generic_make_request(bio);
 
@@ -858,7 +887,7 @@ bad:
 
 static struct target_type verity_target = {
        .name           = "verity",
-       .version        = {1, 1, 1},
+       .version        = {1, 2, 0},
        .module         = THIS_MODULE,
        .ctr            = verity_ctr,
        .dtr            = verity_dtr,
index fcb878f88796c8ab884da058b36ea5f287fe2eab..aeceedfc530b90b5da5b45e12ffdc9e4d665e16d 100644 (file)
@@ -7663,10 +7663,8 @@ static int remove_and_add_spares(struct mddev *mddev)
                                removed++;
                        }
                }
-       if (removed)
-               sysfs_notify(&mddev->kobj, NULL,
-                            "degraded");
-
+       if (removed && mddev->kobj.sd)
+               sysfs_notify(&mddev->kobj, NULL, "degraded");
 
        rdev_for_each(rdev, mddev) {
                if (rdev->raid_disk >= 0 &&
index eca59c3074ef65bee75301f92411d9968adbaf9d..d90fb1a879e1902200c63117cc9465781a5eb871 100644 (file)
@@ -506,7 +506,7 @@ static inline char * mdname (struct mddev * mddev)
 static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
 {
        char nm[20];
-       if (!test_bit(Replacement, &rdev->flags)) {
+       if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
                sprintf(nm, "rd%d", rdev->raid_disk);
                return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
        } else
@@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
 static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
 {
        char nm[20];
-       if (!test_bit(Replacement, &rdev->flags)) {
+       if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
                sprintf(nm, "rd%d", rdev->raid_disk);
                sysfs_remove_link(&mddev->kobj, nm);
        }
index c4f28133ef829340d966393f7e6d9eba6857111d..b88757cd0d1d93728d27d1c1366edf0c45479daf 100644 (file)
@@ -139,15 +139,8 @@ struct child {
        struct btree_node *n;
 };
 
-static struct dm_btree_value_type le64_type = {
-       .context = NULL,
-       .size = sizeof(__le64),
-       .inc = NULL,
-       .dec = NULL,
-       .equal = NULL
-};
-
-static int init_child(struct dm_btree_info *info, struct btree_node *parent,
+static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
+                     struct btree_node *parent,
                      unsigned index, struct child *result)
 {
        int r, inc;
@@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent,
        result->n = dm_block_data(result->block);
 
        if (inc)
-               inc_children(info->tm, result->n, &le64_type);
+               inc_children(info->tm, result->n, vt);
 
        *((__le64 *) value_ptr(parent, index)) =
                cpu_to_le64(dm_block_location(result->block));
@@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
 }
 
 static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
-                     unsigned left_index)
+                     struct dm_btree_value_type *vt, unsigned left_index)
 {
        int r;
        struct btree_node *parent;
@@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
 
        parent = dm_block_data(shadow_current(s));
 
-       r = init_child(info, parent, left_index, &left);
+       r = init_child(info, vt, parent, left_index, &left);
        if (r)
                return r;
 
-       r = init_child(info, parent, left_index + 1, &right);
+       r = init_child(info, vt, parent, left_index + 1, &right);
        if (r) {
                exit_child(info, &left);
                return r;
@@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
 }
 
 static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
-                     unsigned left_index)
+                     struct dm_btree_value_type *vt, unsigned left_index)
 {
        int r;
        struct btree_node *parent = dm_block_data(shadow_current(s));
@@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
        /*
         * FIXME: fill out an array?
         */
-       r = init_child(info, parent, left_index, &left);
+       r = init_child(info, vt, parent, left_index, &left);
        if (r)
                return r;
 
-       r = init_child(info, parent, left_index + 1, &center);
+       r = init_child(info, vt, parent, left_index + 1, &center);
        if (r) {
                exit_child(info, &left);
                return r;
        }
 
-       r = init_child(info, parent, left_index + 2, &right);
+       r = init_child(info, vt, parent, left_index + 2, &right);
        if (r) {
                exit_child(info, &left);
                exit_child(info, &center);
@@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
 }
 
 static int rebalance_children(struct shadow_spine *s,
-                             struct dm_btree_info *info, uint64_t key)
+                             struct dm_btree_info *info,
+                             struct dm_btree_value_type *vt, uint64_t key)
 {
        int i, r, has_left_sibling, has_right_sibling;
        uint32_t child_entries;
@@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s,
        has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
 
        if (!has_left_sibling)
-               r = rebalance2(s, info, i);
+               r = rebalance2(s, info, vt, i);
 
        else if (!has_right_sibling)
-               r = rebalance2(s, info, i - 1);
+               r = rebalance2(s, info, vt, i - 1);
 
        else
-               r = rebalance3(s, info, i - 1);
+               r = rebalance3(s, info, vt, i - 1);
 
        return r;
 }
@@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
                if (le32_to_cpu(n->header.flags) & LEAF_NODE)
                        return do_leaf(n, key, index);
 
-               r = rebalance_children(s, info, key);
+               r = rebalance_children(s, info, vt, key);
                if (r)
                        break;
 
@@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
        return r;
 }
 
+static struct dm_btree_value_type le64_type = {
+       .context = NULL,
+       .size = sizeof(__le64),
+       .inc = NULL,
+       .dec = NULL,
+       .equal = NULL
+};
+
 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
                    uint64_t *keys, dm_block_t *new_root)
 {
index 3ee2912889e7110274acabc28df3c6664abcb0f7..24909eb13fec1b0bf22e40caa4ff7a76967f80e4 100644 (file)
@@ -671,9 +671,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_next = NULL;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
-                       trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
-                                             bi, disk_devt(conf->mddev->gendisk),
-                                             sh->dev[i].sector);
+
+                       if (conf->mddev->gendisk)
+                               trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+                                                     bi, disk_devt(conf->mddev->gendisk),
+                                                     sh->dev[i].sector);
                        generic_make_request(bi);
                }
                if (rrdev) {
@@ -701,9 +703,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
                        rbi->bi_next = NULL;
-                       trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
-                                             rbi, disk_devt(conf->mddev->gendisk),
-                                             sh->dev[i].sector);
+                       if (conf->mddev->gendisk)
+                               trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+                                                     rbi, disk_devt(conf->mddev->gendisk),
+                                                     sh->dev[i].sector);
                        generic_make_request(rbi);
                }
                if (!rdev && !rrdev) {
@@ -2280,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
        int level = conf->level;
 
        if (rcw) {
-               /* if we are not expanding this is a proper write request, and
-                * there will be bios with new data to be drained into the
-                * stripe cache
-                */
-               if (!expand) {
-                       sh->reconstruct_state = reconstruct_state_drain_run;
-                       set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
-               } else
-                       sh->reconstruct_state = reconstruct_state_run;
-
-               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
 
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
@@ -2303,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                                s->locked++;
                        }
                }
+               /* if we are not expanding this is a proper write request, and
+                * there will be bios with new data to be drained into the
+                * stripe cache
+                */
+               if (!expand) {
+                       if (!s->locked)
+                               /* False alarm, nothing to do */
+                               return;
+                       sh->reconstruct_state = reconstruct_state_drain_run;
+                       set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+               } else
+                       sh->reconstruct_state = reconstruct_state_run;
+
+               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
+
                if (s->locked + conf->max_degraded == disks)
                        if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
                                atomic_inc(&conf->pending_full_writes);
@@ -2311,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
                        test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
 
-               sh->reconstruct_state = reconstruct_state_prexor_drain_run;
-               set_bit(STRIPE_OP_PREXOR, &s->ops_request);
-               set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
-               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
-
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if (i == pd_idx)
@@ -2330,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                                s->locked++;
                        }
                }
+               if (!s->locked)
+                       /* False alarm - nothing to do */
+                       return;
+               sh->reconstruct_state = reconstruct_state_prexor_drain_run;
+               set_bit(STRIPE_OP_PREXOR, &s->ops_request);
+               set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
        }
 
        /* keep the parity disk(s) locked while asynchronous operations
@@ -2564,6 +2573,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
        int i;
 
        clear_bit(STRIPE_SYNCING, &sh->state);
+       if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
+               wake_up(&conf->wait_for_overlap);
        s->syncing = 0;
        s->replacing = 0;
        /* There is nothing more to do for sync/check/repair.
@@ -2737,6 +2748,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
 {
        int i;
        struct r5dev *dev;
+       int discard_pending = 0;
 
        for (i = disks; i--; )
                if (sh->dev[i].written) {
@@ -2765,9 +2777,23 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                                STRIPE_SECTORS,
                                         !test_bit(STRIPE_DEGRADED, &sh->state),
                                                0);
-                       }
-               } else if (test_bit(R5_Discard, &sh->dev[i].flags))
-                       clear_bit(R5_Discard, &sh->dev[i].flags);
+                       } else if (test_bit(R5_Discard, &dev->flags))
+                               discard_pending = 1;
+               }
+       if (!discard_pending &&
+           test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
+               clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
+               clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
+               if (sh->qd_idx >= 0) {
+                       clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
+                       clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
+               }
+               /* now that discard is done we can proceed with any sync */
+               clear_bit(STRIPE_DISCARD, &sh->state);
+               if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
+                       set_bit(STRIPE_HANDLE, &sh->state);
+
+       }
 
        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
                if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -2826,8 +2852,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
        set_bit(STRIPE_HANDLE, &sh->state);
        if (rmw < rcw && rmw > 0) {
                /* prefer read-modify-write, but need to get some data */
-               blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
-                                 (unsigned long long)sh->sector, rmw);
+               if (conf->mddev->queue)
+                       blk_add_trace_msg(conf->mddev->queue,
+                                         "raid5 rmw %llu %d",
+                                         (unsigned long long)sh->sector, rmw);
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if ((dev->towrite || i == sh->pd_idx) &&
@@ -2877,7 +2905,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                }
                        }
                }
-               if (rcw)
+               if (rcw && conf->mddev->queue)
                        blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
                                          (unsigned long long)sh->sector,
                                          rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
@@ -3417,9 +3445,15 @@ static void handle_stripe(struct stripe_head *sh)
                return;
        }
 
-       if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
-               set_bit(STRIPE_SYNCING, &sh->state);
-               clear_bit(STRIPE_INSYNC, &sh->state);
+       if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+               spin_lock(&sh->stripe_lock);
+               /* Cannot process 'sync' concurrently with 'discard' */
+               if (!test_bit(STRIPE_DISCARD, &sh->state) &&
+                   test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
+                       set_bit(STRIPE_SYNCING, &sh->state);
+                       clear_bit(STRIPE_INSYNC, &sh->state);
+               }
+               spin_unlock(&sh->stripe_lock);
        }
        clear_bit(STRIPE_DELAYED, &sh->state);
 
@@ -3579,6 +3613,8 @@ static void handle_stripe(struct stripe_head *sh)
            test_bit(STRIPE_INSYNC, &sh->state)) {
                md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
                clear_bit(STRIPE_SYNCING, &sh->state);
+               if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
+                       wake_up(&conf->wait_for_overlap);
        }
 
        /* If the failed drives are just a ReadError, then we might need
@@ -3982,9 +4018,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                atomic_inc(&conf->active_aligned_reads);
                spin_unlock_irq(&conf->device_lock);
 
-               trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
-                                     align_bi, disk_devt(mddev->gendisk),
-                                     raid_bio->bi_sector);
+               if (mddev->gendisk)
+                       trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+                                             align_bi, disk_devt(mddev->gendisk),
+                                             raid_bio->bi_sector);
                generic_make_request(align_bi);
                return 1;
        } else {
@@ -4078,7 +4115,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
                }
                spin_unlock_irq(&conf->device_lock);
        }
-       trace_block_unplug(mddev->queue, cnt, !from_schedule);
+       if (mddev->queue)
+               trace_block_unplug(mddev->queue, cnt, !from_schedule);
        kfree(cb);
 }
 
@@ -4141,6 +4179,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
                prepare_to_wait(&conf->wait_for_overlap, &w,
                                TASK_UNINTERRUPTIBLE);
+               set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
+               if (test_bit(STRIPE_SYNCING, &sh->state)) {
+                       release_stripe(sh);
+                       schedule();
+                       goto again;
+               }
+               clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
                spin_lock_irq(&sh->stripe_lock);
                for (d = 0; d < conf->raid_disks; d++) {
                        if (d == sh->pd_idx || d == sh->qd_idx)
@@ -4153,6 +4198,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
                                goto again;
                        }
                }
+               set_bit(STRIPE_DISCARD, &sh->state);
                finish_wait(&conf->wait_for_overlap, &w);
                for (d = 0; d < conf->raid_disks; d++) {
                        if (d == sh->pd_idx || d == sh->qd_idx)
index 18b2c4a8a1fdf55af4af03ccd5ca71d2e84db1cb..b0b663b119a8f324ba7fa3b58953b42c81030fa4 100644 (file)
@@ -221,10 +221,6 @@ struct stripe_head {
        struct stripe_operations {
                int                  target, target2;
                enum sum_check_flags zero_sum_result;
-               #ifdef CONFIG_MULTICORE_RAID456
-               unsigned long        request;
-               wait_queue_head_t    wait_for_ops;
-               #endif
        } ops;
        struct r5dev {
                /* rreq and rvec are used for the replacement device when
@@ -323,6 +319,7 @@ enum {
        STRIPE_COMPUTE_RUN,
        STRIPE_OPS_REQ_PENDING,
        STRIPE_ON_UNPLUG_LIST,
+       STRIPE_DISCARD,
 };
 
 /*
index d4e7567b367c04d25b9a21ba4c50d0db314596aa..0b899cb6cda1904b6f07cdb924f94be3d76ccb56 100644 (file)
@@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
        if (enable) {
                if (is_code(code, M5MOLS_RESTYPE_MONITOR))
                        ret = m5mols_start_monitor(info);
-               if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
+               else if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
                        ret = m5mols_start_capture(info);
                else
                        ret = -EINVAL;
index ccd18e4ee7892e62034a0d9b1e351107fb787749..54579e4c740b4da7f8590ad345145553d63d8062 100644 (file)
@@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] =
    vdelay      start of active video in 2 * field lines relative to
                trailing edge of /VRESET pulse (VDELAY register).
    sheight     height of active video in 2 * field lines.
+   extraheight Added to sheight for cropcap.bounds.height only
    videostart0 ITU-R frame line number of the line corresponding
                to vdelay in the first field. */
 #define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth,     \
-               vdelay, sheight, videostart0)                            \
+               vdelay, sheight, extraheight, videostart0)               \
        .cropcap.bounds.left = minhdelayx1,                              \
        /* * 2 because vertically we count field lines times two, */     \
        /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */           \
        .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \
        /* 4 is a safety margin at the end of the line. */               \
        .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4,        \
-       .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY,      \
+       .cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) -  \
+                                MIN_VDELAY,                             \
        .cropcap.defrect.left = hdelayx1,                                \
        .cropcap.defrect.top = (videostart0) * 2,                        \
        .cropcap.defrect.width = swidth,                                 \
@@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* totalwidth */ 1135,
                        /* sqwidth */ 944,
                        /* vdelay */ 0x20,
-               /* bt878 (and bt848?) can capture another
-                  line below active video. */
-                       /* sheight */ (576 + 2) + 0x20 - 2,
+                       /* sheight */ 576,
+                       /* bt878 (and bt848?) can capture another
+                          line below active video. */
+                       /* extraheight */ 2,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
@@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x1a,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_SECAM,
@@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 944,
                        /* vdelay */ 0x20,
                        /* sheight */ 576,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_PAL_Nc,
@@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x1a,
                        /* sheight */ 576,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_PAL_M,
@@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x1a,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_PAL_N,
@@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 944,
                        /* vdelay */ 0x20,
                        /* sheight */ 576,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                .v4l2_id        = V4L2_STD_NTSC_M_JP,
@@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 780,
                        /* vdelay */ 0x16,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        },{
                /* that one hopefully works with the strange timing
@@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
                        /* sqwidth */ 944,
                        /* vdelay */ 0x1a,
                        /* sheight */ 480,
+                       /* extraheight */ 0,
                        /* videostart0 */ 23)
        }
 };
index 82d9f6ac12f3a271481e106df7f75017b3f962c1..33b5ffc8d66dfe136e2cd0df4129963b6c60ad79 100644 (file)
@@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
 
 static int gsc_m2m_resume(struct gsc_dev *gsc)
 {
+       struct gsc_ctx *ctx;
        unsigned long flags;
 
        spin_lock_irqsave(&gsc->slock, flags);
        /* Clear for full H/W setup in first run after resume */
+       ctx = gsc->m2m.ctx;
        gsc->m2m.ctx = NULL;
        spin_unlock_irqrestore(&gsc->slock, flags);
 
        if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
-               gsc_m2m_job_finish(gsc->m2m.ctx,
-                                   VB2_BUF_STATE_ERROR);
+               gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
        return 0;
 }
 
@@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev)
        /* Do not resume if the device was idle before system suspend */
        spin_lock_irqsave(&gsc->slock, flags);
        if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
-           !gsc_m2m_active(gsc)) {
+           !gsc_m2m_opened(gsc)) {
                spin_unlock_irqrestore(&gsc->slock, flags);
                return 0;
        }
index e3916bde45cf13a4cb92b8bcd83715b1c1a07ce0..0f513dd19f86de105f36913a6d5e6e59839fd91a 100644 (file)
@@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
 
 static int fimc_m2m_resume(struct fimc_dev *fimc)
 {
+       struct fimc_ctx *ctx;
        unsigned long flags;
 
        spin_lock_irqsave(&fimc->slock, flags);
        /* Clear for full H/W setup in first run after resume */
+       ctx = fimc->m2m.ctx;
        fimc->m2m.ctx = NULL;
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
-               fimc_m2m_job_finish(fimc->m2m.ctx,
-                                   VB2_BUF_STATE_ERROR);
+               fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
+
        return 0;
 }
 
index f0af0754a7b46a04cee5f41552ede4d8bc2e96ed..ac9663ce2a4927a0a3249bde33107fb1a17a9783 100644 (file)
@@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = {
 void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
 {
        enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
-       unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+       int i = ARRAY_SIZE(src_pixfmt_map);
        u32 cfg;
 
-       while (i-- >= 0) {
+       while (--i >= 0) {
                if (src_pixfmt_map[i][0] == pixelcode)
                        break;
        }
@@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
                { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
        };
        u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
-       unsigned int i = ARRAY_SIZE(pixcode);
+       int i = ARRAY_SIZE(pixcode);
 
-       while (i-- >= 0)
+       while (--i >= 0)
                if (pixcode[i][0] == dev->fmt->mbus_code)
                        break;
        cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;
index bfc4206935c85a93fa75b6893e3afdcc8efb7c92..bbc35de7db278fe053c6de4d02be7fc7c40d4321 100644 (file)
@@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = {
        .id     = V4L2_CTRL_CLASS_USER | 0x1001,
        .type   = V4L2_CTRL_TYPE_BOOLEAN,
        .name   = "Test Pattern 640x480",
+       .step   = 1,
 };
 
 static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)
index a17fcb2d5d413ca66ed9a16a31a34045a83a0d6b..cd38d708ab584ef4487d4d1960aabe06d6b7138b 100644 (file)
@@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source,
        struct fimc_pipeline *pipeline;
        struct v4l2_subdev *sd;
        struct mutex *lock;
-       int ret = 0;
+       int i, ret = 0;
        int ref_count;
 
        if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
@@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source,
                return 0;
        }
 
+       mutex_lock(lock);
+       ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
+
        if (!(flags & MEDIA_LNK_FL_ENABLED)) {
-               int i;
-               mutex_lock(lock);
-               ret = __fimc_pipeline_close(pipeline);
+               if (ref_count > 0) {
+                       ret = __fimc_pipeline_close(pipeline);
+                       if (!ret && fimc)
+                               fimc_ctrls_delete(fimc->vid_cap.ctx);
+               }
                for (i = 0; i < IDX_MAX; i++)
                        pipeline->subdevs[i] = NULL;
-               if (fimc)
-                       fimc_ctrls_delete(fimc->vid_cap.ctx);
-               mutex_unlock(lock);
-               return ret;
+       } else if (ref_count > 0) {
+               /*
+                * Link activation. Enable power of pipeline elements only if
+                * the pipeline is already in use, i.e. its video node is open.
+                * Recreate the controls destroyed during the link deactivation.
+                */
+               ret = __fimc_pipeline_open(pipeline,
+                                          source->entity, true);
+               if (!ret && fimc)
+                       ret = fimc_capture_ctrls_create(fimc);
        }
-       /*
-        * Link activation. Enable power of pipeline elements only if the
-        * pipeline is already in use, i.e. its video node is opened.
-        * Recreate the controls destroyed during the link deactivation.
-        */
-       mutex_lock(lock);
-
-       ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
-       if (ref_count > 0)
-               ret = __fimc_pipeline_open(pipeline, source->entity, true);
-       if (!ret && fimc)
-               ret = fimc_capture_ctrls_create(fimc);
 
        mutex_unlock(lock);
        return ret ? -EPIPE : ret;
index e84703c314ce0d50a9861cf674244b9a3342615f..1cb6d57987c6f2dd06e0ff8d8965d0c02179039e 100644 (file)
@@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
        unsigned int frame_type;
 
        dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
-       frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
+       frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
 
        /* If frame is same as previous then skip and do not dequeue */
        if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
index 2356fd52a169abf2680dd22ebb9e17453c4ac52d..4f6b553c4b2de90f6686f2da38cf5221f68c352d 100644 (file)
@@ -232,6 +232,7 @@ static struct mfc_control controls[] = {
                .minimum = 0,
                .maximum = 1,
                .default_value = 0,
+               .step = 1,
                .menu_skip_mask = 0,
        },
        {
index 19f3563c61da651673b5ea7884b43b8e4c2145e1..5a79c333d45e66308e5b0f75cbc91ed9d60ce329 100644 (file)
@@ -291,7 +291,7 @@ config IR_TTUSBIR
 
 config IR_RX51
        tristate "Nokia N900 IR transmitter diode"
-       depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM
+       depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
        ---help---
           Say Y or M here if you want to enable support for the IR
           transmitter diode built in the Nokia N900 (RX51) device.
index a9d355230e8eb2efb52fb3fbdff2a5b8297a7a73..768aaf62d5dc300a7b2d7e150265e27035ef15cb 100644 (file)
@@ -10,7 +10,7 @@ ifeq ($(CONFIG_COMPAT),y)
   videodev-objs += v4l2-compat-ioctl32.o
 endif
 
-obj-$(CONFIG_VIDEO_DEV) += videodev.o
+obj-$(CONFIG_VIDEO_V4L2) += videodev.o
 obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
 obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
 
index 45ea7185c003d5b5deee66ad1a274c23212ed77f..642c6223fa6cd177f22c66e40b3d3531a6d841a4 100644 (file)
@@ -151,6 +151,20 @@ static void mei_me_intr_disable(struct mei_device *dev)
        mei_hcsr_set(hw, hcsr);
 }
 
+/**
+ * mei_me_hw_reset_release - release device from the reset
+ *
+ * @dev: the device structure
+ */
+static void mei_me_hw_reset_release(struct mei_device *dev)
+{
+       struct mei_me_hw *hw = to_me_hw(dev);
+       u32 hcsr = mei_hcsr_read(hw);
+
+       hcsr |= H_IG;
+       hcsr &= ~H_RST;
+       mei_hcsr_set(hw, hcsr);
+}
 /**
  * mei_me_hw_reset - resets fw via mei csr register.
  *
@@ -169,18 +183,14 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
        if (intr_enable)
                hcsr |= H_IE;
        else
-               hcsr &= ~H_IE;
-
-       mei_hcsr_set(hw, hcsr);
-
-       hcsr = mei_hcsr_read(hw) | H_IG;
-       hcsr &= ~H_RST;
+               hcsr |= ~H_IE;
 
        mei_hcsr_set(hw, hcsr);
 
-       hcsr = mei_hcsr_read(hw);
+       if (dev->dev_state == MEI_DEV_POWER_DOWN)
+               mei_me_hw_reset_release(dev);
 
-       dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
+       dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
 }
 
 /**
@@ -466,7 +476,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
                        mutex_unlock(&dev->device_lock);
                        return IRQ_HANDLED;
                } else {
-                       dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+                       dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
+                       mei_me_hw_reset_release(dev);
                        mutex_unlock(&dev->device_lock);
                        return IRQ_HANDLED;
                }
index 6ec530168afbbe2268da06d1c2508fd044cb943d..356179991a2e9ff87f7e34efb452b5ad4a547aa6 100644 (file)
@@ -183,6 +183,24 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
        mei_cl_all_write_clear(dev);
 }
 
+void mei_stop(struct mei_device *dev)
+{
+       dev_dbg(&dev->pdev->dev, "stopping the device.\n");
+
+       mutex_lock(&dev->device_lock);
+
+       cancel_delayed_work(&dev->timer_work);
+
+       mei_wd_stop(dev);
+
+       dev->dev_state = MEI_DEV_POWER_DOWN;
+       mei_reset(dev, 0);
+
+       mutex_unlock(&dev->device_lock);
+
+       flush_scheduled_work();
+}
+
 
 
 
index cb80166161f0f82ad725e1073baf1bc607fcc6bf..97873812e33b28c06e85dc000869ef48f1312471 100644 (file)
@@ -381,6 +381,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
 void mei_device_init(struct mei_device *dev);
 void mei_reset(struct mei_device *dev, int interrupts);
 int mei_hw_init(struct mei_device *dev);
+void mei_stop(struct mei_device *dev);
 
 /*
  *  MEI interrupt functions prototype
index b40ec0601ab0bcc7325f0dfedb8ebefccdd896ec..b8b5c9c3ad0375bab4605b038bcbda60ff970b4f 100644 (file)
@@ -247,44 +247,14 @@ static void mei_remove(struct pci_dev *pdev)
 
        hw = to_me_hw(dev);
 
-       mutex_lock(&dev->device_lock);
-
-       cancel_delayed_work(&dev->timer_work);
 
-       mei_wd_stop(dev);
+       dev_err(&pdev->dev, "stop\n");
+       mei_stop(dev);
 
        mei_pdev = NULL;
 
-       if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
-               dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
-               mei_cl_disconnect(&dev->iamthif_cl);
-       }
-       if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
-               dev->wd_cl.state = MEI_FILE_DISCONNECTING;
-               mei_cl_disconnect(&dev->wd_cl);
-       }
-
-       /* Unregistering watchdog device */
        mei_watchdog_unregister(dev);
 
-       /* remove entry if already in list */
-       dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
-
-       if (dev->open_handle_count > 0)
-               dev->open_handle_count--;
-       mei_cl_unlink(&dev->wd_cl);
-
-       if (dev->open_handle_count > 0)
-               dev->open_handle_count--;
-       mei_cl_unlink(&dev->iamthif_cl);
-
-       dev->iamthif_current_cb = NULL;
-       dev->me_clients_num = 0;
-
-       mutex_unlock(&dev->device_lock);
-
-       flush_scheduled_work();
-
        /* disable interrupts */
        mei_disable_interrupts(dev);
 
@@ -308,28 +278,20 @@ static int mei_pci_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct mei_device *dev = pci_get_drvdata(pdev);
-       int err;
 
        if (!dev)
                return -ENODEV;
-       mutex_lock(&dev->device_lock);
 
-       cancel_delayed_work(&dev->timer_work);
+       dev_err(&pdev->dev, "suspend\n");
 
-       /* Stop watchdog if exists */
-       err = mei_wd_stop(dev);
-       /* Set new mei state */
-       if (dev->dev_state == MEI_DEV_ENABLED ||
-           dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
-               dev->dev_state = MEI_DEV_POWER_DOWN;
-               mei_reset(dev, 0);
-       }
-       mutex_unlock(&dev->device_lock);
+       mei_stop(dev);
+
+       mei_disable_interrupts(dev);
 
        free_irq(pdev->irq, dev);
        pci_disable_msi(pdev);
 
-       return err;
+       return 0;
 }
 
 static int mei_pci_resume(struct device *device)
index ed5c433cd4936f8b52c98352bbf21ead11a67ae8..f3cdd904fe4d6e1ae5c48288858d793dbc0af22c 100644 (file)
@@ -42,9 +42,11 @@ struct datagram_entry {
 
 struct delayed_datagram_info {
        struct datagram_entry *entry;
-       struct vmci_datagram msg;
        struct work_struct work;
        bool in_dg_host_queue;
+       /* msg and msg_payload must be together. */
+       struct vmci_datagram msg;
+       u8 msg_payload[];
 };
 
 /* Number of in-flight host->host datagrams */
index 63feb75cc8e01d2048949343955023b01fc7fadc..9279a9174f84ad9cd3a2ae2c7117c29cc8134e9e 100644 (file)
 /* 10 parts were found on sflash on Netgear WNDR4500 */
 #define BCM47XXPART_MAX_PARTS          12
 
+/*
+ * Amount of bytes we read when analyzing each block of flash memory.
+ * Set it big enough to allow detecting partition and reading important data.
+ */
+#define BCM47XXPART_BYTES_TO_READ      0x404
+
 /* Magics */
 #define BOARD_DATA_MAGIC               0x5246504D      /* MPFR */
 #define POT_MAGIC1                     0x54544f50      /* POTT */
@@ -57,17 +63,15 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        struct trx_header *trx;
        int trx_part = -1;
        int last_trx_part = -1;
-       int max_bytes_to_read = 0x8004;
+       int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
 
        if (blocksize <= 0x10000)
                blocksize = 0x10000;
-       if (blocksize == 0x20000)
-               max_bytes_to_read = 0x18004;
 
        /* Alloc */
        parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
                        GFP_KERNEL);
-       buf = kzalloc(max_bytes_to_read, GFP_KERNEL);
+       buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
 
        /* Parse block by block looking for magics */
        for (offset = 0; offset <= master->size - blocksize;
@@ -82,7 +86,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Read beginning of the block */
-               if (mtd_read(master, offset, max_bytes_to_read,
+               if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
                             &bytes_read, (uint8_t *)buf) < 0) {
                        pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
                               offset);
@@ -96,20 +100,6 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                        continue;
                }
 
-               /* Standard NVRAM */
-               if (buf[0x000 / 4] == NVRAM_HEADER ||
-                   buf[0x1000 / 4] == NVRAM_HEADER ||
-                   buf[0x8000 / 4] == NVRAM_HEADER ||
-                   (blocksize == 0x20000 && (
-                     buf[0x10000 / 4] == NVRAM_HEADER ||
-                     buf[0x11000 / 4] == NVRAM_HEADER ||
-                     buf[0x18000 / 4] == NVRAM_HEADER))) {
-                       bcm47xxpart_add_part(&parts[curr_part++], "nvram",
-                                            offset, 0);
-                       offset = rounddown(offset, blocksize);
-                       continue;
-               }
-
                /*
                 * board_data starts with board_id which differs across boards,
                 * but we can use 'MPFR' (hopefully) magic at 0x100
@@ -178,6 +168,30 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                        continue;
                }
        }
+
+       /* Look for NVRAM at the end of the last block. */
+       for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) {
+               if (curr_part > BCM47XXPART_MAX_PARTS) {
+                       pr_warn("Reached maximum number of partitions, scanning stopped!\n");
+                       break;
+               }
+
+               offset = master->size - possible_nvram_sizes[i];
+               if (mtd_read(master, offset, 0x4, &bytes_read,
+                            (uint8_t *)buf) < 0) {
+                       pr_err("mtd_read error while reading at offset 0x%X!\n",
+                              offset);
+                       continue;
+               }
+
+               /* Standard NVRAM */
+               if (buf[0] == NVRAM_HEADER) {
+                       bcm47xxpart_add_part(&parts[curr_part++], "nvram",
+                                            master->size - blocksize, 0);
+                       break;
+               }
+       }
+
        kfree(buf);
 
        /*
index 43214151b882bcf4d2717eb48f20b1b2e0fd9a46..42c63927609dbee8697c691c8e97515cf509b990 100644 (file)
@@ -1523,6 +1523,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
                                        oobreadlen -= toread;
                                }
                        }
+
+                       if (chip->options & NAND_NEED_READRDY) {
+                               /* Apply delay or wait for ready/busy pin */
+                               if (!chip->dev_ready)
+                                       udelay(chip->chip_delay);
+                               else
+                                       nand_wait_ready(mtd);
+                       }
                } else {
                        memcpy(buf, chip->buffers->databuf + col, bytes);
                        buf += bytes;
@@ -1787,6 +1795,14 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
                len = min(len, readlen);
                buf = nand_transfer_oob(chip, buf, ops, len);
 
+               if (chip->options & NAND_NEED_READRDY) {
+                       /* Apply delay or wait for ready/busy pin */
+                       if (!chip->dev_ready)
+                               udelay(chip->chip_delay);
+                       else
+                               nand_wait_ready(mtd);
+               }
+
                readlen -= len;
                if (!readlen)
                        break;
index e3aa2748a6e7af1cac818aaa170d5f6aff253056..9c612388e5deff15def3bc8014fd1d6408222142 100644 (file)
 *      512     512 Byte page size
 */
 struct nand_flash_dev nand_flash_ids[] = {
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
 
 #ifdef CONFIG_MTD_NAND_MUSEUM_IDS
-       {"NAND 1MiB 5V 8-bit",          0x6e, 256, 1, 0x1000, 0},
-       {"NAND 2MiB 5V 8-bit",          0x64, 256, 2, 0x1000, 0},
-       {"NAND 4MiB 5V 8-bit",          0x6b, 512, 4, 0x2000, 0},
-       {"NAND 1MiB 3,3V 8-bit",        0xe8, 256, 1, 0x1000, 0},
-       {"NAND 1MiB 3,3V 8-bit",        0xec, 256, 1, 0x1000, 0},
-       {"NAND 2MiB 3,3V 8-bit",        0xea, 256, 2, 0x1000, 0},
-       {"NAND 4MiB 3,3V 8-bit",        0xd5, 512, 4, 0x2000, 0},
-       {"NAND 4MiB 3,3V 8-bit",        0xe3, 512, 4, 0x2000, 0},
-       {"NAND 4MiB 3,3V 8-bit",        0xe5, 512, 4, 0x2000, 0},
-       {"NAND 8MiB 3,3V 8-bit",        0xd6, 512, 8, 0x2000, 0},
-
-       {"NAND 8MiB 1,8V 8-bit",        0x39, 512, 8, 0x2000, 0},
-       {"NAND 8MiB 3,3V 8-bit",        0xe6, 512, 8, 0x2000, 0},
-       {"NAND 8MiB 1,8V 16-bit",       0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
-       {"NAND 8MiB 3,3V 16-bit",       0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
+       {"NAND 1MiB 5V 8-bit",          0x6e, 256, 1, 0x1000, SP_OPTIONS},
+       {"NAND 2MiB 5V 8-bit",          0x64, 256, 2, 0x1000, SP_OPTIONS},
+       {"NAND 4MiB 5V 8-bit",          0x6b, 512, 4, 0x2000, SP_OPTIONS},
+       {"NAND 1MiB 3,3V 8-bit",        0xe8, 256, 1, 0x1000, SP_OPTIONS},
+       {"NAND 1MiB 3,3V 8-bit",        0xec, 256, 1, 0x1000, SP_OPTIONS},
+       {"NAND 2MiB 3,3V 8-bit",        0xea, 256, 2, 0x1000, SP_OPTIONS},
+       {"NAND 4MiB 3,3V 8-bit",        0xd5, 512, 4, 0x2000, SP_OPTIONS},
+       {"NAND 4MiB 3,3V 8-bit",        0xe3, 512, 4, 0x2000, SP_OPTIONS},
+       {"NAND 4MiB 3,3V 8-bit",        0xe5, 512, 4, 0x2000, SP_OPTIONS},
+       {"NAND 8MiB 3,3V 8-bit",        0xd6, 512, 8, 0x2000, SP_OPTIONS},
+
+       {"NAND 8MiB 1,8V 8-bit",        0x39, 512, 8, 0x2000, SP_OPTIONS},
+       {"NAND 8MiB 3,3V 8-bit",        0xe6, 512, 8, 0x2000, SP_OPTIONS},
+       {"NAND 8MiB 1,8V 16-bit",       0x49, 512, 8, 0x2000, SP_OPTIONS16},
+       {"NAND 8MiB 3,3V 16-bit",       0x59, 512, 8, 0x2000, SP_OPTIONS16},
 #endif
 
-       {"NAND 16MiB 1,8V 8-bit",       0x33, 512, 16, 0x4000, 0},
-       {"NAND 16MiB 3,3V 8-bit",       0x73, 512, 16, 0x4000, 0},
-       {"NAND 16MiB 1,8V 16-bit",      0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
-       {"NAND 16MiB 3,3V 16-bit",      0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
-
-       {"NAND 32MiB 1,8V 8-bit",       0x35, 512, 32, 0x4000, 0},
-       {"NAND 32MiB 3,3V 8-bit",       0x75, 512, 32, 0x4000, 0},
-       {"NAND 32MiB 1,8V 16-bit",      0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
-       {"NAND 32MiB 3,3V 16-bit",      0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
-
-       {"NAND 64MiB 1,8V 8-bit",       0x36, 512, 64, 0x4000, 0},
-       {"NAND 64MiB 3,3V 8-bit",       0x76, 512, 64, 0x4000, 0},
-       {"NAND 64MiB 1,8V 16-bit",      0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
-       {"NAND 64MiB 3,3V 16-bit",      0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
-
-       {"NAND 128MiB 1,8V 8-bit",      0x78, 512, 128, 0x4000, 0},
-       {"NAND 128MiB 1,8V 8-bit",      0x39, 512, 128, 0x4000, 0},
-       {"NAND 128MiB 3,3V 8-bit",      0x79, 512, 128, 0x4000, 0},
-       {"NAND 128MiB 1,8V 16-bit",     0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-       {"NAND 128MiB 1,8V 16-bit",     0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-       {"NAND 128MiB 3,3V 16-bit",     0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-       {"NAND 128MiB 3,3V 16-bit",     0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
-
-       {"NAND 256MiB 3,3V 8-bit",      0x71, 512, 256, 0x4000, 0},
+       {"NAND 16MiB 1,8V 8-bit",       0x33, 512, 16, 0x4000, SP_OPTIONS},
+       {"NAND 16MiB 3,3V 8-bit",       0x73, 512, 16, 0x4000, SP_OPTIONS},
+       {"NAND 16MiB 1,8V 16-bit",      0x43, 512, 16, 0x4000, SP_OPTIONS16},
+       {"NAND 16MiB 3,3V 16-bit",      0x53, 512, 16, 0x4000, SP_OPTIONS16},
+
+       {"NAND 32MiB 1,8V 8-bit",       0x35, 512, 32, 0x4000, SP_OPTIONS},
+       {"NAND 32MiB 3,3V 8-bit",       0x75, 512, 32, 0x4000, SP_OPTIONS},
+       {"NAND 32MiB 1,8V 16-bit",      0x45, 512, 32, 0x4000, SP_OPTIONS16},
+       {"NAND 32MiB 3,3V 16-bit",      0x55, 512, 32, 0x4000, SP_OPTIONS16},
+
+       {"NAND 64MiB 1,8V 8-bit",       0x36, 512, 64, 0x4000, SP_OPTIONS},
+       {"NAND 64MiB 3,3V 8-bit",       0x76, 512, 64, 0x4000, SP_OPTIONS},
+       {"NAND 64MiB 1,8V 16-bit",      0x46, 512, 64, 0x4000, SP_OPTIONS16},
+       {"NAND 64MiB 3,3V 16-bit",      0x56, 512, 64, 0x4000, SP_OPTIONS16},
+
+       {"NAND 128MiB 1,8V 8-bit",      0x78, 512, 128, 0x4000, SP_OPTIONS},
+       {"NAND 128MiB 1,8V 8-bit",      0x39, 512, 128, 0x4000, SP_OPTIONS},
+       {"NAND 128MiB 3,3V 8-bit",      0x79, 512, 128, 0x4000, SP_OPTIONS},
+       {"NAND 128MiB 1,8V 16-bit",     0x72, 512, 128, 0x4000, SP_OPTIONS16},
+       {"NAND 128MiB 1,8V 16-bit",     0x49, 512, 128, 0x4000, SP_OPTIONS16},
+       {"NAND 128MiB 3,3V 16-bit",     0x74, 512, 128, 0x4000, SP_OPTIONS16},
+       {"NAND 128MiB 3,3V 16-bit",     0x59, 512, 128, 0x4000, SP_OPTIONS16},
+
+       {"NAND 256MiB 3,3V 8-bit",      0x71, 512, 256, 0x4000, SP_OPTIONS},
 
        /*
         * These are the new chips with large page size. The pagesize and the
index 8b4e96e01d6ce15def39381bcbc0a6b996d7bb85..6bbd90e1123c59c58329505537568a2ec2d5b9b0 100644 (file)
@@ -1746,6 +1746,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        bond_compute_features(bond);
 
+       bond_update_speed_duplex(new_slave);
+
        read_lock(&bond->lock);
 
        new_slave->last_arp_rx = jiffies -
@@ -1798,8 +1800,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                new_slave->link == BOND_LINK_DOWN ? "DOWN" :
                        (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
 
-       bond_update_speed_duplex(new_slave);
-
        if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
                /* if there is a primary slave, remember it */
                if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
@@ -2374,8 +2374,6 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_set_backup_slave(slave);
                        }
 
-                       bond_update_speed_duplex(slave);
-
                        pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
                                bond->dev->name, slave->dev->name,
                                slave->speed, slave->duplex ? "full" : "half");
index 1c9e09fbdff8346e99413e0313d03ed401f463e2..ea7a388f484306710a33375f3a553fd1ecc5b621 100644 (file)
@@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
        sprintf(linkname, "slave_%s", slave->name);
        ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
                                linkname);
+
+       /* free the master link created earlier in case of error */
+       if (ret)
+               sysfs_remove_link(&(slave->dev.kobj), "master");
+
        return ret;
 
 }
@@ -522,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                goto out;
        }
        if (new_value < 0) {
-               pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
+               pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
                       bond->dev->name, new_value, INT_MAX);
                ret = -EINVAL;
                goto out;
@@ -537,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
        pr_info("%s: Setting ARP monitoring interval to %d.\n",
                bond->dev->name, new_value);
        bond->params.arp_interval = new_value;
-       if (bond->params.miimon) {
-               pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
-                       bond->dev->name, bond->dev->name);
-               bond->params.miimon = 0;
-       }
-       if (!bond->params.arp_targets[0]) {
-               pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
-                       bond->dev->name);
+       if (new_value) {
+               if (bond->params.miimon) {
+                       pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+                               bond->dev->name, bond->dev->name);
+                       bond->params.miimon = 0;
+               }
+               if (!bond->params.arp_targets[0])
+                       pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+                               bond->dev->name);
        }
        if (bond->dev->flags & IFF_UP) {
                /* If the interface is up, we may need to fire off
@@ -552,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
                 * timer will get fired off when the open function
                 * is called.
                 */
-               cancel_delayed_work_sync(&bond->mii_work);
-               queue_delayed_work(bond->wq, &bond->arp_work, 0);
+               if (!new_value) {
+                       cancel_delayed_work_sync(&bond->arp_work);
+               } else {
+                       cancel_delayed_work_sync(&bond->mii_work);
+                       queue_delayed_work(bond->wq, &bond->arp_work, 0);
+               }
        }
-
 out:
        rtnl_unlock();
        return ret;
@@ -697,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
        }
        if (new_value < 0) {
                pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
        } else {
@@ -752,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
                goto out;
        }
        if (new_value < 0) {
-               pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+               pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
        } else {
@@ -963,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,
        }
        if (new_value < 0) {
                pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
-                      bond->dev->name, new_value, 1, INT_MAX);
+                      bond->dev->name, new_value, 0, INT_MAX);
                ret = -EINVAL;
                goto out;
-       } else {
-               pr_info("%s: Setting MII monitoring interval to %d.\n",
-                       bond->dev->name, new_value);
-               bond->params.miimon = new_value;
-               if (bond->params.updelay)
-                       pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
-                               bond->dev->name,
-                               bond->params.updelay * bond->params.miimon);
-               if (bond->params.downdelay)
-                       pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
-                               bond->dev->name,
-                               bond->params.downdelay * bond->params.miimon);
-               if (bond->params.arp_interval) {
-                       pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
-                               bond->dev->name);
-                       bond->params.arp_interval = 0;
-                       if (bond->params.arp_validate) {
-                               bond->params.arp_validate =
-                                       BOND_ARP_VALIDATE_NONE;
-                       }
-               }
-
-               if (bond->dev->flags & IFF_UP) {
-                       /* If the interface is up, we may need to fire off
-                        * the MII timer. If the interface is down, the
-                        * timer will get fired off when the open function
-                        * is called.
-                        */
+       }
+       pr_info("%s: Setting MII monitoring interval to %d.\n",
+               bond->dev->name, new_value);
+       bond->params.miimon = new_value;
+       if (bond->params.updelay)
+               pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+                       bond->dev->name,
+                       bond->params.updelay * bond->params.miimon);
+       if (bond->params.downdelay)
+               pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+                       bond->dev->name,
+                       bond->params.downdelay * bond->params.miimon);
+       if (new_value && bond->params.arp_interval) {
+               pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+                       bond->dev->name);
+               bond->params.arp_interval = 0;
+               if (bond->params.arp_validate)
+                       bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+       }
+       if (bond->dev->flags & IFF_UP) {
+               /* If the interface is up, we may need to fire off
+                * the MII timer. If the interface is down, the
+                * timer will get fired off when the open function
+                * is called.
+                */
+               if (!new_value) {
+                       cancel_delayed_work_sync(&bond->mii_work);
+               } else {
                        cancel_delayed_work_sync(&bond->arp_work);
                        queue_delayed_work(bond->wq, &bond->mii_work, 0);
                }
index b39ca5b3ea7faee34c2264ea7fbc6ac14e3caf35..ff2ba86cd4a495cdd29020f02561a1f31926ff11 100644 (file)
@@ -46,6 +46,7 @@ config CAN_EMS_PCI
 config CAN_PEAK_PCMCIA
        tristate "PEAK PCAN-PC Card"
        depends on PCMCIA
+       depends on HAS_IOPORT
        ---help---
          This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
          from PEAK-System (http://www.peak-system.com). To compile this
index a042cdc260dc720f982c44ae1bf8da315c0faaf6..3c18d7d000edaabe3b8ec93f1d2841d3300df087 100644 (file)
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
         */
        if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
            REG_CR_BASICCAN_INITIAL &&
-           (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+           (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
            (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
                flag = 1;
 
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
         * See states on p. 23 of the Datasheet.
         */
        if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
-           priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+           priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
            priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
                return flag;
 
index daf4013a8fc720ca0c73a9df96b647ad5c314839..e4df307eaa9081b9e39a81731714e0bd3969ea3b 100644 (file)
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
         */
        spin_lock_irqsave(&priv->cmdreg_lock, flags);
        priv->write_reg(priv, REG_CMR, val);
-       priv->read_reg(priv, REG_SR);
+       priv->read_reg(priv, SJA1000_REG_SR);
        spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
 }
 
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
 
        while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
                n++;
-               status = priv->read_reg(priv, REG_SR);
+               status = priv->read_reg(priv, SJA1000_REG_SR);
                /* check for absent controller due to hw unplug */
                if (status == 0xFF && sja1000_is_absent(priv))
                        return IRQ_NONE;
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
                        /* receive interrupt */
                        while (status & SR_RBS) {
                                sja1000_rx(dev);
-                               status = priv->read_reg(priv, REG_SR);
+                               status = priv->read_reg(priv, SJA1000_REG_SR);
                                /* check for absent controller */
                                if (status == 0xFF && sja1000_is_absent(priv))
                                        return IRQ_NONE;
index afa99847a5101ffa6399726b57d4189f6c3057f5..aa48e053da27ce26ed394e8a4584ef9805828372 100644 (file)
@@ -56,7 +56,7 @@
 /* SJA1000 registers - manual section 6.4 (Pelican Mode) */
 #define REG_MOD                0x00
 #define REG_CMR                0x01
-#define REG_SR         0x02
+#define SJA1000_REG_SR         0x02
 #define REG_IR         0x03
 #define REG_IER                0x04
 #define REG_ALC                0x0B
index 829b5ad71d0da0a4634cd211bc3f5ffc67215db6..edfdf6b950e73ad82f8c1e3749a973c113237bb7 100644 (file)
@@ -438,7 +438,6 @@ struct atl1e_adapter {
        struct atl1e_hw        hw;
        struct atl1e_hw_stats  hw_stats;
 
-       bool have_msi;
        u32 wol;
        u16 link_speed;
        u16 link_duplex;
index 92f4734f860d3f18ece977946b4562c028df35e6..f73d5609439ac8d054b7c3303bf0baadfa576877 100644 (file)
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
 
        free_irq(adapter->pdev->irq, netdev);
-
-       if (adapter->have_msi)
-               pci_disable_msi(adapter->pdev);
 }
 
 static int atl1e_request_irq(struct atl1e_adapter *adapter)
 {
        struct pci_dev    *pdev   = adapter->pdev;
        struct net_device *netdev = adapter->netdev;
-       int flags = 0;
        int err = 0;
 
-       adapter->have_msi = true;
-       err = pci_enable_msi(pdev);
-       if (err) {
-               netdev_dbg(netdev,
-                          "Unable to allocate MSI interrupt Error: %d\n", err);
-               adapter->have_msi = false;
-       }
-
-       if (!adapter->have_msi)
-               flags |= IRQF_SHARED;
-       err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
+       err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
+                         netdev);
        if (err) {
                netdev_dbg(adapter->netdev,
                           "Unable to allocate interrupt Error: %d\n", err);
-               if (adapter->have_msi)
-                       pci_disable_msi(pdev);
                return err;
        }
        netdev_dbg(netdev, "atl1e_request_irq OK\n");
index a923bc4d5a1f5540704423215a8f772bd3a8831a..4046f97378c29fa737d7123347ba1e940bd8e46d 100644 (file)
@@ -2760,6 +2760,7 @@ load_error2:
        bp->port.pmf = 0;
 load_error1:
        bnx2x_napi_disable(bp);
+       bnx2x_del_all_napi(bp);
 
        /* clear pf_load status, as it was already set */
        if (IS_PF(bp))
index 568205436a15f4d5b37528eb155df6e00ee3f07c..91ecd6a00d05a77453e4311eb5741064ba03ded2 100644 (file)
@@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
                        break;
                default:
                        BNX2X_ERR("Non valid capability ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "DCB disabled\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
@@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
                        break;
                default:
                        BNX2X_ERR("Non valid TC-ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "DCB disabled\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        return rval;
@@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
        return -EINVAL;
 }
 
-static u8  bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
 {
        struct bnx2x *bp = netdev_priv(netdev);
        DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
@@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
                        break;
                default:
                        BNX2X_ERR("Non valid featrue-ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "DCB disabled\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        return rval;
@@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
                        break;
                default:
                        BNX2X_ERR("Non valid featrue-ID\n");
-                       rval = -EINVAL;
+                       rval = 1;
                        break;
                }
        } else {
                DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
-               rval = -EINVAL;
+               rval = 1;
        }
 
        return rval;
index 364e37ecbc5cc013573ab63f5e1125f1e8cd750e..198f6f1c9ad5a342736f0d51528539ee5f22f775 100644 (file)
@@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old {
 
 #define UPDATE_QSTAT(s, t) \
        do { \
-               qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
                qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
+               qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
+                       + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
        } while (0)
 
 #define UPDATE_QSTAT_OLD(f) \
index 93729f9423588de05fa4d5e25dbcef99438fd17d..17a972734ba746d9218df4bf025218a91ebee947 100644 (file)
@@ -4130,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
                tp->link_config.active_speed = tp->link_config.speed;
                tp->link_config.active_duplex = tp->link_config.duplex;
 
+               if (tg3_asic_rev(tp) == ASIC_REV_5714) {
+                       /* With autoneg disabled, 5715 only links up when the
+                        * advertisement register has the configured speed
+                        * enabled.
+                        */
+                       tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
+               }
+
                bmcr = 0;
                switch (tp->link_config.speed) {
                default:
@@ -14596,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp)
                if (j + len > block_end)
                        goto partno;
 
-               memcpy(tp->fw_ver, &vpd_data[j], len);
-               strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+               if (len >= sizeof(tp->fw_ver))
+                       len = sizeof(tp->fw_ver) - 1;
+               memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
+               snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
+                        &vpd_data[j]);
        }
 
 partno:
index a170065b597382ed9408239d98c36cec98a535a2..b0ebc9f6d55e9844e1ebe9367e9340681cba2a72 100644 (file)
 #define XGMAC_FLOW_CTRL_FCB_BPA        0x00000001      /* Flow Control Busy ... */
 
 /* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMTIM   0x00800000      /* PMT Interrupt Mask */
 #define XGMAC_INT_STAT_PMT     0x0080          /* PMT Interrupt Status */
 #define XGMAC_INT_STAT_LPI     0x0040          /* LPI Interrupt Status */
 
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
        writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
 
+       /* Mask power mgt interrupt */
+       writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
+
        /* XGMAC requires AXI bus init. This is a 'magic number' for now */
        writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
 
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
                struct sk_buff *skb;
                int frame_len;
 
+               if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
+                       break;
+
                entry = priv->rx_tail;
                p = priv->dma_rx + entry;
                if (desc_get_owner(p))
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
        unsigned int pmt = 0;
 
        if (mode & WAKE_MAGIC)
-               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+               pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
        if (mode & WAKE_UCAST)
                pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
 
index 4ce62031f62fa3a51500d8a571feba701e2fa6ed..8049268ce0f25f50f99a5a0ef7137d0f97c1e763 100644 (file)
@@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
-#define VPD_BASE           0
 #define VPD_LEN            512
+#define VPD_BASE           0x400
+#define VPD_BASE_OLD       0
 
 /**
  *     t4_seeprom_wp - enable/disable EEPROM write protection
@@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
 {
        u32 cclk_param, cclk_val;
-       int i, ret;
+       int i, ret, addr;
        int ec, sn;
        u8 *vpd, csum;
        unsigned int vpdr_len, kw_offset, id_len;
@@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
        if (!vpd)
                return -ENOMEM;
 
-       ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd);
+       ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
+       if (ret < 0)
+               goto out;
+       addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+       ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
        if (ret < 0)
                goto out;
 
index 8cdf02503d13d5bdd52348f77995c0ce47618f1c..9eada8e86078fe83981223e023bd45f3929b1a70 100644 (file)
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
                tmp = readl(reg);
 }
 
+/*
+ * Sleep, either by using msleep() or if we are suspending, then
+ * use mdelay() to sleep.
+ */
+static void dm9000_msleep(board_info_t *db, unsigned int ms)
+{
+       if (db->in_suspend)
+               mdelay(ms);
+       else
+               msleep(ms);
+}
+
+/* Read a word from phyxcer */
+static int
+dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+{
+       board_info_t *db = netdev_priv(dev);
+       unsigned long flags;
+       unsigned int reg_save;
+       int ret;
+
+       mutex_lock(&db->addr_lock);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* Save previous register address */
+       reg_save = readb(db->io_addr);
+
+       /* Fill the phyxcer register into REG_0C */
+       iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+       /* Issue phyxcer read command */
+       iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
+
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       dm9000_msleep(db, 1);           /* Wait read complete */
+
+       spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
+
+       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer read command */
+
+       /* The read data keeps on REG_0D & REG_0E */
+       ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+
+       /* restore the previous address */
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       mutex_unlock(&db->addr_lock);
+
+       dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+       return ret;
+}
+
+/* Write a word to phyxcer */
+static void
+dm9000_phy_write(struct net_device *dev,
+                int phyaddr_unused, int reg, int value)
+{
+       board_info_t *db = netdev_priv(dev);
+       unsigned long flags;
+       unsigned long reg_save;
+
+       dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+       mutex_lock(&db->addr_lock);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       /* Save previous register address */
+       reg_save = readb(db->io_addr);
+
+       /* Fill the phyxcer register into REG_0C */
+       iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+       /* Fill the written data into REG_0D & REG_0E */
+       iow(db, DM9000_EPDRL, value);
+       iow(db, DM9000_EPDRH, value >> 8);
+
+       /* Issue phyxcer write command */
+       iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
+
+       writeb(reg_save, db->io_addr);
+       spin_unlock_irqrestore(&db->lock, flags);
+
+       dm9000_msleep(db, 1);           /* Wait write complete */
+
+       spin_lock_irqsave(&db->lock, flags);
+       reg_save = readb(db->io_addr);
+
+       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer write command */
+
+       /* restore the previous address */
+       writeb(reg_save, db->io_addr);
+
+       spin_unlock_irqrestore(&db->lock, flags);
+       mutex_unlock(&db->addr_lock);
+}
+
 /* dm9000_set_io
  *
  * select the specified set of io routines to use with the
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
 
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
 
+       dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
+       dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
        /* if wol is needed, then always set NCR_WAKEEN otherwise we end
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
        return 0;
 }
 
-/*
- * Sleep, either by using msleep() or if we are suspending, then
- * use mdelay() to sleep.
- */
-static void dm9000_msleep(board_info_t *db, unsigned int ms)
-{
-       if (db->in_suspend)
-               mdelay(ms);
-       else
-               msleep(ms);
-}
-
-/*
- *   Read a word from phyxcer
- */
-static int
-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
-{
-       board_info_t *db = netdev_priv(dev);
-       unsigned long flags;
-       unsigned int reg_save;
-       int ret;
-
-       mutex_lock(&db->addr_lock);
-
-       spin_lock_irqsave(&db->lock,flags);
-
-       /* Save previous register address */
-       reg_save = readb(db->io_addr);
-
-       /* Fill the phyxcer register into REG_0C */
-       iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
-       iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);   /* Issue phyxcer read command */
-
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock,flags);
-
-       dm9000_msleep(db, 1);           /* Wait read complete */
-
-       spin_lock_irqsave(&db->lock,flags);
-       reg_save = readb(db->io_addr);
-
-       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer read command */
-
-       /* The read data keeps on REG_0D & REG_0E */
-       ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
-
-       /* restore the previous address */
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock,flags);
-
-       mutex_unlock(&db->addr_lock);
-
-       dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
-       return ret;
-}
-
-/*
- *   Write a word to phyxcer
- */
-static void
-dm9000_phy_write(struct net_device *dev,
-                int phyaddr_unused, int reg, int value)
-{
-       board_info_t *db = netdev_priv(dev);
-       unsigned long flags;
-       unsigned long reg_save;
-
-       dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
-       mutex_lock(&db->addr_lock);
-
-       spin_lock_irqsave(&db->lock,flags);
-
-       /* Save previous register address */
-       reg_save = readb(db->io_addr);
-
-       /* Fill the phyxcer register into REG_0C */
-       iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
-       /* Fill the written data into REG_0D & REG_0E */
-       iow(db, DM9000_EPDRL, value);
-       iow(db, DM9000_EPDRH, value >> 8);
-
-       iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);   /* Issue phyxcer write command */
-
-       writeb(reg_save, db->io_addr);
-       spin_unlock_irqrestore(&db->lock, flags);
-
-       dm9000_msleep(db, 1);           /* Wait write complete */
-
-       spin_lock_irqsave(&db->lock,flags);
-       reg_save = readb(db->io_addr);
-
-       iow(db, DM9000_EPCR, 0x0);      /* Clear phyxcer write command */
-
-       /* restore the previous address */
-       writeb(reg_save, db->io_addr);
-
-       spin_unlock_irqrestore(&db->lock, flags);
-       mutex_unlock(&db->addr_lock);
-}
-
 static void
 dm9000_shutdown(struct net_device *dev)
 {
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
        db->flags |= DM9000_PLATF_SIMPLE_PHY;
 #endif
 
-       dm9000_reset(db);
+       /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
+        * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
+        * while probe stage.
+        */
+
+       iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
 
        /* try multiple times, DM9000 sometimes gets the read wrong */
        for (i = 0; i < 8; i++) {
index 55688bd1a3ef0bb83ed90391c33bce894a16da89..9ce058adababcf28c83f644819b0b501079ef898 100644 (file)
@@ -69,7 +69,9 @@
 #define NCR_WAKEEN          (1<<6)
 #define NCR_FCOL            (1<<4)
 #define NCR_FDX             (1<<3)
-#define NCR_LBK             (3<<1)
+
+#define NCR_RESERVED        (3<<1)
+#define NCR_MAC_LBK         (1<<1)
 #define NCR_RST                    (1<<0)
 
 #define NSR_SPEED           (1<<7)
 #define ISR_LNKCHNG            (1<<5)
 #define ISR_UNDERRUN           (1<<4)
 
+/* Davicom MII registers.
+ */
+
+#define MII_DM_DSPCR           0x1b    /* DSP Control Register */
+
+#define DSPCR_INIT_PARAM       0xE100  /* DSP init parameter */
+
 #endif /* _DM9000X_H_ */
 
index 0c37fb2cc867530c266902c9d1b79c095a63ba99..1df33c799c0012a9144811c90c39a063e92fb45e 100644 (file)
@@ -108,6 +108,7 @@ config TULIP_DM910X
 config DE4X5
        tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
        depends on (PCI || EISA)
+       depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
        select CRC32
        ---help---
          This is support for the DIGITAL series of PCI/EISA Ethernet cards.
index 069a155d16ed40c57da5e70a8088eef767d0690b..f292c3aa423fbdeabb83ebdafa3b1699e988b19a 100644 (file)
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 }
 
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+       struct fec_enet_private *fep = netdev_priv(dev);
+       struct bufdesc *bdp;
+       unsigned int i;
+
+       /* Initialize the receive buffer descriptors. */
+       bdp = fep->rx_bd_base;
+       for (i = 0; i < RX_RING_SIZE; i++) {
+
+               /* Initialize the BD for every fragment in the page. */
+               if (bdp->cbd_bufaddr)
+                       bdp->cbd_sc = BD_ENET_RX_EMPTY;
+               else
+                       bdp->cbd_sc = 0;
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+       }
+
+       /* Set the last buffer to wrap */
+       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+       bdp->cbd_sc |= BD_SC_WRAP;
+
+       fep->cur_rx = fep->rx_bd_base;
+
+       /* ...and the same for transmit */
+       bdp = fep->tx_bd_base;
+       fep->cur_tx = bdp;
+       for (i = 0; i < TX_RING_SIZE; i++) {
+
+               /* Initialize the BD for every fragment in the page. */
+               bdp->cbd_sc = 0;
+               if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+                       dev_kfree_skb_any(fep->tx_skbuff[i]);
+                       fep->tx_skbuff[i] = NULL;
+               }
+               bdp->cbd_bufaddr = 0;
+               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+       }
+
+       /* Set the last buffer to wrap */
+       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+       bdp->cbd_sc |= BD_SC_WRAP;
+       fep->dirty_tx = bdp;
+}
+
 /* This function is called to start or restart the FEC during a link
  * change.  This only happens when switching between half and full
  * duplex.
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)
        /* Set maximum receive buffer size. */
        writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
 
+       fec_enet_bd_init(ndev);
+
        /* Set receive and transmit descriptor base. */
        writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
        if (fep->bufdesc_ex)
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)
                writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
                        * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
 
-       fep->cur_rx = fep->rx_bd_base;
 
        for (i = 0; i <= TX_RING_MOD_MASK; i++) {
                if (fep->tx_skbuff[i]) {
@@ -934,24 +982,28 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                goto spin_unlock;
        }
 
-       /* Duplex link change */
        if (phy_dev->link) {
-               if (fep->full_duplex != phy_dev->duplex) {
-                       fec_restart(ndev, phy_dev->duplex);
-                       /* prevent unnecessary second fec_restart() below */
+               if (!fep->link) {
                        fep->link = phy_dev->link;
                        status_change = 1;
                }
-       }
 
-       /* Link on or off change */
-       if (phy_dev->link != fep->link) {
-               fep->link = phy_dev->link;
-               if (phy_dev->link)
+               if (fep->full_duplex != phy_dev->duplex)
+                       status_change = 1;
+
+               if (phy_dev->speed != fep->speed) {
+                       fep->speed = phy_dev->speed;
+                       status_change = 1;
+               }
+
+               /* if any of the above changed restart the FEC */
+               if (status_change)
                        fec_restart(ndev, phy_dev->duplex);
-               else
+       } else {
+               if (fep->link) {
                        fec_stop(ndev);
-               status_change = 1;
+                       status_change = 1;
+               }
        }
 
 spin_unlock:
@@ -1328,7 +1380,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 static void fec_enet_free_buffers(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int i;
+       unsigned int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
 
@@ -1352,7 +1404,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
 static int fec_enet_alloc_buffers(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int i;
+       unsigned int i;
        struct sk_buff *skb;
        struct bufdesc  *bdp;
 
@@ -1437,6 +1489,7 @@ fec_enet_close(struct net_device *ndev)
        struct fec_enet_private *fep = netdev_priv(ndev);
 
        /* Don't know what to do yet. */
+       napi_disable(&fep->napi);
        fep->opened = 0;
        netif_stop_queue(ndev);
        fec_stop(ndev);
@@ -1592,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        struct bufdesc *cbd_base;
-       struct bufdesc *bdp;
-       int i;
 
        /* Allocate memory for buffer descriptors. */
        cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1603,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev)
                return -ENOMEM;
        }
 
+       memset(cbd_base, 0, PAGE_SIZE);
        spin_lock_init(&fep->hw_lock);
 
        fep->netdev = ndev;
@@ -1626,35 +1678,6 @@ static int fec_enet_init(struct net_device *ndev)
        writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
        netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
 
-       /* Initialize the receive buffer descriptors. */
-       bdp = fep->rx_bd_base;
-       for (i = 0; i < RX_RING_SIZE; i++) {
-
-               /* Initialize the BD for every fragment in the page. */
-               bdp->cbd_sc = 0;
-               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
-       }
-
-       /* Set the last buffer to wrap */
-       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
-       bdp->cbd_sc |= BD_SC_WRAP;
-
-       /* ...and the same for transmit */
-       bdp = fep->tx_bd_base;
-       fep->cur_tx = bdp;
-       for (i = 0; i < TX_RING_SIZE; i++) {
-
-               /* Initialize the BD for every fragment in the page. */
-               bdp->cbd_sc = 0;
-               bdp->cbd_bufaddr = 0;
-               bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
-       }
-
-       /* Set the last buffer to wrap */
-       bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
-       bdp->cbd_sc |= BD_SC_WRAP;
-       fep->dirty_tx = bdp;
-
        fec_restart(ndev, 0);
 
        return 0;
index f5390071efd0622832686d753f501c49039906c2..eb4372962839c216ed1ad68e29ba09a7aaf487ea 100644 (file)
@@ -240,6 +240,7 @@ struct fec_enet_private {
        phy_interface_t phy_interface;
        int     link;
        int     full_duplex;
+       int     speed;
        struct  completion mdio_done;
        int     irq[FEC_IRQ_NUM];
        int     bufdesc_ex;
index 1f17ca0f22019d8350af2affdcc6b00739ce966f..0d8df400a4798c06a1345773b9a0f821b4ab7cb2 100644 (file)
@@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
 
        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 }
+EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
 
 /**
  * fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
            -EFAULT : 0;
 }
+EXPORT_SYMBOL(fec_ptp_ioctl);
 
 /**
  * fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
                pr_info("registered PHC device on %s\n", ndev->name);
        }
 }
+EXPORT_SYMBOL(fec_ptp_init);
index 43462d596a4e5b04050793d66e26b0172a4f78de..ffd287196bf875560e40e5330c68e0a84b50e930 100644 (file)
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                txdr->buffer_info[i].dma =
                        dma_map_single(&pdev->dev, skb->data, skb->len,
                                       DMA_TO_DEVICE);
+               if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
+                       ret_val = 4;
+                       goto err_nomem;
+               }
                tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
                tx_desc->lower.data = cpu_to_le32(skb->len);
                tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
                                    GFP_KERNEL);
        if (!rxdr->buffer_info) {
-               ret_val = 4;
+               ret_val = 5;
                goto err_nomem;
        }
 
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
        rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
                                        GFP_KERNEL);
        if (!rxdr->desc) {
-               ret_val = 5;
+               ret_val = 6;
                goto err_nomem;
        }
        memset(rxdr->desc, 0, rxdr->size);
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
 
                skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
                if (!skb) {
-                       ret_val = 6;
+                       ret_val = 7;
                        goto err_nomem;
                }
                skb_reserve(skb, NET_IP_ALIGN);
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                rxdr->buffer_info[i].dma =
                        dma_map_single(&pdev->dev, skb->data,
                                       E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
+                       ret_val = 8;
+                       goto err_nomem;
+               }
                rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
                memset(skb->data, 0x00, skb->len);
        }
index 948b86ffa4f027753eec3f5b57b45b90277947ab..7e615e2bf7e6a4e96971945d33e3e332301e88bb 100644 (file)
@@ -848,11 +848,16 @@ check_page:
                        }
                }
 
-               if (!buffer_info->dma)
+               if (!buffer_info->dma) {
                        buffer_info->dma = dma_map_page(&pdev->dev,
                                                        buffer_info->page, 0,
                                                        PAGE_SIZE,
                                                        DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+                               adapter->alloc_rx_buff_failed++;
+                               break;
+                       }
+               }
 
                rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
                rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
index b64542acfa3449bda2ee5c793d83de6cda1f097c..12b1d84808084269971e5f83f55bd8b0f3fb8240 100644 (file)
@@ -1818,27 +1818,32 @@ out:
  **/
 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
 {
-       u32 dtxswc;
+       u32 reg_val, reg_offset;
 
        switch (hw->mac.type) {
        case e1000_82576:
+               reg_offset = E1000_DTXSWC;
+               break;
        case e1000_i350:
-               dtxswc = rd32(E1000_DTXSWC);
-               if (enable) {
-                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
-                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
-                       /* The PF can spoof - it has to in order to
-                        * support emulation mode NICs */
-                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
-               } else {
-                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
-                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
-               }
-               wr32(E1000_DTXSWC, dtxswc);
+               reg_offset = E1000_TXSWC;
                break;
        default:
-               break;
+               return;
+       }
+
+       reg_val = rd32(reg_offset);
+       if (enable) {
+               reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+                            E1000_DTXSWC_VLAN_SPOOF_MASK);
+               /* The PF can spoof - it has to in order to
+                * support emulation mode NICs
+                */
+               reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+       } else {
+               reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+                            E1000_DTXSWC_VLAN_SPOOF_MASK);
        }
+       wr32(reg_offset, reg_val);
 }
 
 /**
index 4623502054d5347b2723811fe1cdca0d1e93ae0e..0478a1abe54110d1374c6d77d950f6a4d7bf0327 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/pci.h>
 
 #ifdef CONFIG_IGB_HWMON
-struct i2c_board_info i350_sensor_info = {
+static struct i2c_board_info i350_sensor_info = {
        I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
 };
 
index 4dbd62968c7a18090a61029249c04f6ee3b1335e..8496adfc6a685580f6ec1c50b86f0fed62b2b121 100644 (file)
@@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
        if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
                return;
 
-       igb_enable_sriov(pdev, max_vfs);
        pci_sriov_set_totalvfs(pdev, 7);
+       igb_enable_sriov(pdev, max_vfs);
 
 #endif /* CONFIG_PCI_IOV */
 }
@@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
                if (max_vfs > 7) {
                        dev_warn(&pdev->dev,
                                 "Maximum of 7 VFs per PF, using max\n");
-                       adapter->vfs_allocated_count = 7;
+                       max_vfs = adapter->vfs_allocated_count = 7;
                } else
                        adapter->vfs_allocated_count = max_vfs;
                if (adapter->vfs_allocated_count)
index 0987822359f00590d7f36bc5e126c8a4f20ee1ac..0a237507ee85008e9e11c703b07f58730202e072 100644 (file)
@@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
        case e1000_82576:
                snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
                adapter->ptp_caps.owner = THIS_MODULE;
-               adapter->ptp_caps.max_adj = 1000000000;
+               adapter->ptp_caps.max_adj = 999999881;
                adapter->ptp_caps.n_ext_ts = 0;
                adapter->ptp_caps.pps = 0;
                adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
index ea48083734359e52b229f592e84abf7654a82523..b5f94abe3cffc9c406f8e2aaa27ed9cd77ad6542 100644 (file)
@@ -2159,6 +2159,10 @@ map_skb:
                                                  skb->data,
                                                  adapter->rx_buffer_len,
                                                  DMA_FROM_DEVICE);
+               if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+                       adapter->alloc_rx_buff_failed++;
+                       break;
+               }
 
                rx_desc = IXGB_RX_DESC(*rx_ring, i);
                rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2168,7 +2172,8 @@ map_skb:
                rx_desc->status = 0;
 
 
-               if (++i == rx_ring->count) i = 0;
+               if (++i == rx_ring->count)
+                       i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
index c3db6cd69b68b0135f42908fd738bd7c9fd4fddc..2b6cb5ca48eefe8f0a075b1166f24dff7aaaaa83 100644 (file)
@@ -944,9 +944,17 @@ free_queue_irqs:
                free_irq(adapter->msix_entries[vector].vector,
                         adapter->q_vector[vector]);
        }
-       pci_disable_msix(adapter->pdev);
-       kfree(adapter->msix_entries);
-       adapter->msix_entries = NULL;
+       /* This failure is non-recoverable - it indicates the system is
+        * out of MSIX vector resources and the VF driver cannot run
+        * without them.  Set the number of msix vectors to zero
+        * indicating that not enough can be allocated.  The error
+        * will be returned to the user indicating device open failed.
+        * Any further attempts to force the driver to open will also
+        * fail.  The only way to recover is to unload the driver and
+        * reload it again.  If the system has recovered some MSIX
+        * vectors then it may succeed.
+        */
+       adapter->num_msix_vectors = 0;
        return err;
 }
 
@@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev)
        struct ixgbe_hw *hw = &adapter->hw;
        int err;
 
+       /* A previous failure to open the device because of a lack of
+        * available MSIX vector resources may have reset the number
+        * of msix vectors variable to zero.  The only way to recover
+        * is to unload/reload the driver and hope that the system has
+        * been able to recover some MSIX vector resources.
+        */
+       if (!adapter->num_msix_vectors)
+               return -ENOMEM;
+
        /* disallow open during test */
        if (test_bit(__IXGBEVF_TESTING, &adapter->state))
                return -EBUSY;
@@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev)
 
 err_req_irq:
        ixgbevf_down(adapter);
-       ixgbevf_free_irq(adapter);
 err_setup_rx:
        ixgbevf_free_all_rx_resources(adapter);
 err_setup_tx:
index 6a2127489af78e718264f202737ba0667770ba1b..bfdb06860397e720a848d6e8cab7b324ded6ea1c 100644 (file)
@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev)
        return 0;
 
 err_free:
-       kfree(dev);
+       free_netdev(dev);
 err_out:
        return err;
 }
index fc07ca35721b29ba07dba97bf6e52dc74f446534..6a0e671fcecd6e399952056646130578191ba57d 100644 (file)
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
 
-               tp = space - 2048/8;
+               tp = space - 8192/8;
                sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
                sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
        } else {
index 615ac63ea8603a802896d2bd810d4c2ac874204c..ec6dcd80152bdd46550b0aaf0865d00a01261a93 100644 (file)
@@ -2074,7 +2074,7 @@ enum {
        GM_IS_RX_FF_OR  = 1<<1, /* Receive FIFO Overrun */
        GM_IS_RX_COMPL  = 1<<0, /* Frame Reception Complete */
 
-#define GMAC_DEF_MSK     GM_IS_TX_FF_UR
+#define GMAC_DEF_MSK     (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
 };
 
 /*     GMAC_LINK_CTRL  16 bit  GMAC Link Control Reg (YUKON only) */
index 995d4b6d5c1e924a870f3da67c6ce1b543b99a83..f278b10ef7140a6e195056e11a12075de02afabc 100644 (file)
@@ -1637,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
        /* Flush multicast filter */
        mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
 
+       /* Remove flow steering rules for the port*/
+       if (mdev->dev->caps.steering_mode ==
+           MLX4_STEERING_MODE_DEVICE_MANAGED) {
+               ASSERT_RTNL();
+               list_for_each_entry_safe(flow, tmp_flow,
+                                        &priv->ethtool_list, list) {
+                       mlx4_flow_detach(mdev->dev, flow->id);
+                       list_del(&flow->list);
+               }
+       }
+
        mlx4_en_destroy_drop_qp(priv);
 
        /* Free TX Rings */
@@ -1657,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
        if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
                mdev->mac_removed[priv->port] = 1;
 
-       /* Remove flow steering rules for the port*/
-       if (mdev->dev->caps.steering_mode ==
-           MLX4_STEERING_MODE_DEVICE_MANAGED) {
-               ASSERT_RTNL();
-               list_for_each_entry_safe(flow, tmp_flow,
-                                        &priv->ethtool_list, list) {
-                       mlx4_flow_detach(mdev->dev, flow->id);
-                       list_del(&flow->list);
-               }
-       }
-
        /* Free RX Rings */
        for (i = 0; i < priv->rx_ring_num; i++) {
                mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
index 251ae2f9311680cb69eaada07cf3ad8a3cf58354..8e3123a1df886de6b0afb2c60397ecd28be3e4b9 100644 (file)
@@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
        struct mlx4_slave_event_eq_info *event_eq =
                priv->mfunc.master.slave_state[slave].event_eq;
        u32 in_modifier = vhcr->in_modifier;
-       u32 eqn = in_modifier & 0x1FF;
+       u32 eqn = in_modifier & 0x3FF;
        u64 in_param =  vhcr->in_param;
        int err = 0;
        int i;
index 2995687f1aee3692ef5c164cff02870607f76224..1391b52f443aa5489ec31ee7e155b087f0372b0c 100644 (file)
@@ -99,6 +99,7 @@ struct res_qp {
        struct list_head        mcg_list;
        spinlock_t              mcg_spl;
        int                     local_qpn;
+       atomic_t                ref_count;
 };
 
 enum res_mtt_states {
@@ -197,6 +198,7 @@ enum res_fs_rule_states {
 
 struct res_fs_rule {
        struct res_common       com;
+       int                     qpn;
 };
 
 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
@@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev)
        return dev->caps.num_mpts - 1;
 }
 
-static void *find_res(struct mlx4_dev *dev, int res_id,
+static void *find_res(struct mlx4_dev *dev, u64 res_id,
                      enum mlx4_resource type)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id)
        ret->local_qpn = id;
        INIT_LIST_HEAD(&ret->mcg_list);
        spin_lock_init(&ret->mcg_spl);
+       atomic_set(&ret->ref_count, 0);
 
        return &ret->com;
 }
@@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id)
        return &ret->com;
 }
 
-static struct res_common *alloc_fs_rule_tr(u64 id)
+static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
 {
        struct res_fs_rule *ret;
 
@@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id)
 
        ret->com.res_id = id;
        ret->com.state = RES_FS_RULE_ALLOCATED;
-
+       ret->qpn = qpn;
        return &ret->com;
 }
 
@@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
                ret = alloc_xrcdn_tr(id);
                break;
        case RES_FS_RULE:
-               ret = alloc_fs_rule_tr(id);
+               ret = alloc_fs_rule_tr(id, extra);
                break;
        default:
                return NULL;
@@ -671,10 +674,14 @@ undo:
 
 static int remove_qp_ok(struct res_qp *res)
 {
-       if (res->com.state == RES_QP_BUSY)
+       if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
+           !list_empty(&res->mcg_list)) {
+               pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
+                      res->com.state, atomic_read(&res->ref_count));
                return -EBUSY;
-       else if (res->com.state != RES_QP_RESERVED)
+       } else if (res->com.state != RES_QP_RESERVED) {
                return -EPERM;
+       }
 
        return 0;
 }
@@ -3124,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
        int err;
        int qpn;
+       struct res_qp *rqp;
        struct mlx4_net_trans_rule_hw_ctrl *ctrl;
        struct _rule_hw  *rule_header;
        int header_id;
@@ -3134,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
 
        ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
        qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
-       err = get_res(dev, slave, qpn, RES_QP, NULL);
+       err = get_res(dev, slave, qpn, RES_QP, &rqp);
        if (err) {
                pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
                return err;
@@ -3175,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
        if (err)
                goto err_put;
 
-       err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
+       err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
        if (err) {
                mlx4_err(dev, "Fail to add flow steering resources.\n ");
                /* detach rule*/
                mlx4_cmd(dev, vhcr->out_param, 0, 0,
                         MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
                         MLX4_CMD_NATIVE);
+               goto err_put;
        }
+       atomic_inc(&rqp->ref_count);
 err_put:
        put_res(dev, slave, qpn, RES_QP);
        return err;
@@ -3195,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
                                         struct mlx4_cmd_info *cmd)
 {
        int err;
+       struct res_qp *rqp;
+       struct res_fs_rule *rrule;
 
        if (dev->caps.steering_mode !=
            MLX4_STEERING_MODE_DEVICE_MANAGED)
                return -EOPNOTSUPP;
 
+       err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
+       if (err)
+               return err;
+       /* Release the rule form busy state before removal */
+       put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
+       err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
+       if (err)
+               return err;
+
        err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
        if (err) {
                mlx4_err(dev, "Fail to remove flow steering resources.\n ");
-               return err;
+               goto out;
        }
 
        err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
                       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
                       MLX4_CMD_NATIVE);
+       if (!err)
+               atomic_dec(&rqp->ref_count);
+out:
+       put_res(dev, slave, rrule->qpn, RES_QP);
        return err;
 }
 
@@ -3806,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
        mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
        /*VLAN*/
        rem_slave_macs(dev, slave);
+       rem_slave_fs_rule(dev, slave);
        rem_slave_qps(dev, slave);
        rem_slave_srqs(dev, slave);
        rem_slave_cqs(dev, slave);
@@ -3814,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
        rem_slave_mtts(dev, slave);
        rem_slave_counters(dev, slave);
        rem_slave_xrcdns(dev, slave);
-       rem_slave_fs_rule(dev, slave);
        mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
 }
index 33bcb63d56a2355c93dbd1306f24bce816eab23c..8fb481252e2cb653f45f7543f7e41c70cef9401e 100644 (file)
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
        for (; rxfc != 0; rxfc--) {
                rxh = ks8851_rdreg32(ks, KS_RXFHSR);
                rxstat = rxh & 0xffff;
-               rxlen = rxh >> 16;
+               rxlen = (rxh >> 16) & 0xfff;
 
                netif_dbg(ks, rx_status, ks->netdev,
                          "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
index c4122c86f829293cc60d6dd48c72602d7dded49f..efa29b712d5f1bdc9793648ebd76f9201cc3f99a 100644 (file)
@@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        }
        platform_set_drvdata(pdev, ndev);
 
-       if (lpc_mii_init(pldat) != 0)
+       ret = lpc_mii_init(pldat);
+       if (ret)
                goto err_out_unregister_netdev;
 
        netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
index 39ab4d09faaa2eb500748466ba6ab0fbe4074ac9..73ce7dd6b9544d470a6e14b163f7ae751d3d749d 100644 (file)
@@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
 
                        skb->protocol = eth_type_trans(skb, netdev);
                        if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
-                               skb->ip_summed = CHECKSUM_NONE;
-                       else
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       else
+                               skb->ip_summed = CHECKSUM_NONE;
 
                        napi_gro_receive(&adapter->napi, skb);
                        (*work_done)++;
index 33e96176e4d82167a2d62a51f1f1eb5d711f7375..6ed333fe5c04cd36618c465c0a3bec8dafbe8fcc 100644 (file)
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                if (felic_stat & ECSR_LCHNG) {
                        /* Link Changed */
                        if (mdp->cd->no_psr || mdp->no_ether_link) {
-                               if (mdp->link == PHY_DOWN)
-                                       link_stat = 0;
-                               else
-                                       link_stat = PHY_ST_LINK;
+                               goto ignore_link;
                        } else {
                                link_stat = (sh_eth_read(ndev, PSR));
                                if (mdp->ether_link_active_low)
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
                }
        }
 
+ignore_link:
        if (intr_status & EESR_TWB) {
                /* Write buck end. unused write back interrupt */
                if (intr_status & EESR_TABT)    /* Transmit Abort int */
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct sh_eth_cpu_data *cd = mdp->cd;
        irqreturn_t ret = IRQ_NONE;
-       u32 intr_status = 0;
+       unsigned long intr_status;
 
        spin_lock(&mdp->lock);
 
-       /* Get interrpt stat */
+       /* Get interrupt status */
        intr_status = sh_eth_read(ndev, EESR);
+       /* Mask it with the interrupt mask, forcing ECI interrupt to be always
+        * enabled since it's the one that  comes thru regardless of the mask,
+        * and we need to fully handle it in sh_eth_error() in order to quench
+        * it as it doesn't get cleared by just writing 1 to the ECI bit...
+        */
+       intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
        /* Clear interrupt */
        if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
                        EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
        struct phy_device *phydev = mdp->phydev;
        int new_state = 0;
 
-       if (phydev->link != PHY_DOWN) {
+       if (phydev->link) {
                if (phydev->duplex != mdp->duplex) {
                        new_state = 1;
                        mdp->duplex = phydev->duplex;
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                        if (mdp->cd->set_rate)
                                mdp->cd->set_rate(ndev);
                }
-               if (mdp->link == PHY_DOWN) {
+               if (!mdp->link) {
                        sh_eth_write(ndev,
                                (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
                        new_state = 1;
                        mdp->link = phydev->link;
+                       if (mdp->cd->no_psr || mdp->no_ether_link)
+                               sh_eth_rcv_snd_enable(ndev);
                }
        } else if (mdp->link) {
                new_state = 1;
-               mdp->link = PHY_DOWN;
+               mdp->link = 0;
                mdp->speed = 0;
                mdp->duplex = -1;
+               if (mdp->cd->no_psr || mdp->no_ether_link)
+                       sh_eth_rcv_snd_disable(ndev);
        }
 
        if (new_state && netif_msg_link(mdp))
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
        snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
                mdp->mii_bus->id , mdp->phy_id);
 
-       mdp->link = PHY_DOWN;
+       mdp->link = 0;
        mdp->speed = 0;
        mdp->duplex = -1;
 
@@ -2220,6 +2228,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
 /* MDIO bus release function */
 static int sh_mdio_release(struct net_device *ndev)
 {
+       struct sh_eth_private *mdp = netdev_priv(ndev);
        struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
 
        /* unregister mdio bus */
@@ -2234,6 +2243,9 @@ static int sh_mdio_release(struct net_device *ndev)
        /* free bitbang info */
        free_mdio_bitbang(bus);
 
+       /* free bitbang memory */
+       kfree(mdp->bitbang);
+
        return 0;
 }
 
@@ -2262,6 +2274,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
        bitbang->ctrl.ops = &bb_ops;
 
        /* MII controller setting */
+       mdp->bitbang = bitbang;
        mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
        if (!mdp->mii_bus) {
                ret = -ENOMEM;
@@ -2441,6 +2454,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                }
                mdp->tsu_addr = ioremap(rtsu->start,
                                        resource_size(rtsu));
+               if (mdp->tsu_addr == NULL) {
+                       ret = -ENOMEM;
+                       dev_err(&pdev->dev, "TSU ioremap failed.\n");
+                       goto out_release;
+               }
                mdp->port = devno % 2;
                ndev->features = NETIF_F_HW_VLAN_FILTER;
        }
index bae84fd2e73a7419504d8b29b195c0a118711b8a..828be4515008145507f59a5bbe34ac64eb28090b 100644 (file)
@@ -705,6 +705,7 @@ struct sh_eth_private {
        const u16 *reg_offset;
        void __iomem *addr;
        void __iomem *tsu_addr;
+       struct bb_info *bitbang;
        u32 num_rx_ring;
        u32 num_tx_ring;
        dma_addr_t rx_desc_dma;
@@ -722,7 +723,7 @@ struct sh_eth_private {
        u32 phy_id;                                     /* PHY ID */
        struct mii_bus *mii_bus;        /* MDIO bus control */
        struct phy_device *phydev;      /* PHY device control */
-       enum phy_state link;
+       int link;
        phy_interface_t phy_interface;
        int msg_enable;
        int speed;
index 0ad790cc473cb1697e71bd422af0e0086c0242a9..eaa8e874a3cb0580cbe68ca40ba5004942a91e5f 100644 (file)
@@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
                return false;
 
        tx_queue->empty_read_count = 0;
-       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
+               && tx_queue->write_count - write_count == 1;
 }
 
 /* For each entry inserted into the software descriptor ring, create a
index 01ffbc48698298d1678b478599c1b46f2ee55ef9..80cad06e5eb21337b111bc32af0376e6da2d7065 100644 (file)
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)
         * queue is stopped then start the queue as we have free desc for tx
         */
        if (unlikely(netif_queue_stopped(ndev)))
-               netif_start_queue(ndev);
+               netif_wake_queue(ndev);
        cpts_tx_timestamp(priv->cpts, skb);
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += len;
@@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
        /* If there is no more tx desc left free then we need to
         * tell the kernel to stop sending us tx frames.
         */
-       if (unlikely(cpdma_check_free_tx_desc(priv->txch)))
+       if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
                netif_stop_queue(ndev);
 
        return NETDEV_TX_OK;
@@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                struct platform_device *mdio;
 
                parp = of_get_property(slave_node, "phy_id", &lenp);
-               if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
+               if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        pr_err("Missing slave[%d] phy_id property\n", i);
                        ret = -EINVAL;
                        goto error_ret;
index 52c05366599aca9338267d013ca814abe5b61e78..72300bc9e3783842ef3608be24f0f33d2ed0f69b 100644 (file)
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
         * queue is stopped then start the queue as we have free desc for tx
         */
        if (unlikely(netif_queue_stopped(ndev)))
-               netif_start_queue(ndev);
+               netif_wake_queue(ndev);
        ndev->stats.tx_packets++;
        ndev->stats.tx_bytes += len;
        dev_kfree_skb_any(skb);
@@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
        /* If there is no more tx desc left free then we need to
         * tell the kernel to stop sending us tx frames.
         */
-       if (unlikely(cpdma_check_free_tx_desc(priv->txchan)))
+       if (unlikely(!cpdma_check_free_tx_desc(priv->txchan)))
                netif_stop_queue(ndev);
 
        return NETDEV_TX_OK;
index 37add21a3d7d630d2a989312deab132017eb5fcf..59ac143dec2576a697d9f383ef4d5d7372eef93c 100644 (file)
@@ -666,6 +666,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
                goto done;
 
        spin_lock_irqsave(&target_list_lock, flags);
+restart:
        list_for_each_entry(nt, &target_list, list) {
                netconsole_target_get(nt);
                if (nt->np.dev == dev) {
@@ -678,15 +679,17 @@ static int netconsole_netdev_event(struct notifier_block *this,
                        case NETDEV_UNREGISTER:
                                /*
                                 * rtnl_lock already held
+                                * we might sleep in __netpoll_cleanup()
                                 */
-                               if (nt->np.dev) {
-                                       __netpoll_cleanup(&nt->np);
-                                       dev_put(nt->np.dev);
-                                       nt->np.dev = NULL;
-                               }
+                               spin_unlock_irqrestore(&target_list_lock, flags);
+                               __netpoll_cleanup(&nt->np);
+                               spin_lock_irqsave(&target_list_lock, flags);
+                               dev_put(nt->np.dev);
+                               nt->np.dev = NULL;
                                nt->enabled = 0;
                                stopped = true;
-                               break;
+                               netconsole_target_put(nt);
+                               goto restart;
                        }
                }
                netconsole_target_put(nt);
index 3b6e9b83342db08c28f318c38253d315ae60317c..7c769d8e25ad835440321d9840822b3484f0d08b 100644 (file)
@@ -268,7 +268,7 @@ config USB_NET_SMSC75XX
        select CRC16
        select CRC32
        help
-         This option adds support for SMSC LAN95XX based USB 2.0
+         This option adds support for SMSC LAN75XX based USB 2.0
          Gigabit Ethernet adapters.
 
 config USB_NET_SMSC95XX
index 248d2dc765a5c06c64ab3c27c47f38d36bf14c20..16c842997291483eb12306d9ccf0a638772f18f0 100644 (file)
@@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
        struct cdc_ncm_ctx *ctx;
        struct usb_driver *subdriver = ERR_PTR(-ENODEV);
        int ret = -ENODEV;
-       u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM;
+       u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
        struct cdc_mbim_state *info = (void *)&dev->data;
 
-       /* see if interface supports MBIM alternate setting */
-       if (intf->num_altsetting == 2) {
-               if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
-                       usb_set_interface(dev->udev,
-                                         intf->cur_altsetting->desc.bInterfaceNumber,
-                                         CDC_NCM_COMM_ALTSETTING_MBIM);
-               data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
-       }
-
        /* Probably NCM, defer for cdc_ncm_bind */
        if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
                goto err;
index 61b74a2b89ac4fbcf2ac494fc0045645efc379a0..4709fa3497cf2efcada053ccdefc06f7061971bc 100644 (file)
 
 #define        DRIVER_VERSION                          "14-Mar-2012"
 
+#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
+static bool prefer_mbim = true;
+#else
+static bool prefer_mbim;
+#endif
+module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions");
+
 static void cdc_ncm_txpath_bh(unsigned long param);
 static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
 static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
@@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 }
 EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
 
-static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+/* Select the MBIM altsetting iff it is preferred and available,
+ * returning the number of the corresponding data interface altsetting
+ */
+u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
 {
-       int ret;
+       struct usb_host_interface *alt;
 
        /* The MBIM spec defines a NCM compatible default altsetting,
         * which we may have matched:
@@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
         *   endpoint descriptors, shall be constructed according to
         *   the rules given in section 6 (USB Device Model) of this
         *   specification."
-        *
-        * Do not bind to such interfaces, allowing cdc_mbim to handle
-        * them
         */
-#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
-       if ((intf->num_altsetting == 2) &&
-           !usb_set_interface(dev->udev,
-                              intf->cur_altsetting->desc.bInterfaceNumber,
-                              CDC_NCM_COMM_ALTSETTING_MBIM)) {
-               if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
-                       return -ENODEV;
-               else
-                       usb_set_interface(dev->udev,
-                                         intf->cur_altsetting->desc.bInterfaceNumber,
-                                         CDC_NCM_COMM_ALTSETTING_NCM);
+       if (prefer_mbim && intf->num_altsetting == 2) {
+               alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
+               if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
+                   !usb_set_interface(dev->udev,
+                                      intf->cur_altsetting->desc.bInterfaceNumber,
+                                      CDC_NCM_COMM_ALTSETTING_MBIM))
+                       return CDC_NCM_DATA_ALTSETTING_MBIM;
        }
-#endif
+       return CDC_NCM_DATA_ALTSETTING_NCM;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
+
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+       int ret;
+
+       /* MBIM backwards compatible function? */
+       cdc_ncm_select_altsetting(dev, intf);
+       if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
+               return -ENODEV;
 
        /* NCM data altsetting is always 1 */
        ret = cdc_ncm_bind_common(dev, intf, 1);
index efb5c7c33a28c946c7ca44a0b8a05aeeb8048345..968d5d50751dc120b406f54fffca49b46eafa230 100644 (file)
@@ -139,16 +139,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
 
        BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
 
-       /* control and data is shared? */
-       if (intf->cur_altsetting->desc.bNumEndpoints == 3) {
-               info->control = intf;
-               info->data = intf;
-               goto shared;
-       }
-
-       /* else require a single interrupt status endpoint on control intf */
-       if (intf->cur_altsetting->desc.bNumEndpoints != 1)
-               goto err;
+       /* set up initial state */
+       info->control = intf;
+       info->data = intf;
 
        /* and a number of CDC descriptors */
        while (len > 3) {
@@ -207,25 +200,14 @@ next_desc:
                buf += h->bLength;
        }
 
-       /* did we find all the required ones? */
-       if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
-           !(found & (1 << USB_CDC_UNION_TYPE))) {
-               dev_err(&intf->dev, "CDC functional descriptors missing\n");
-               goto err;
-       }
-
-       /* verify CDC Union */
-       if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
-               dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
-               goto err;
-       }
-
-       /* need to save these for unbind */
-       info->control = intf;
-       info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
-       if (!info->data) {
-               dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
-               goto err;
+       /* Use separate control and data interfaces if we found a CDC Union */
+       if (cdc_union) {
+               info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
+               if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
+                       dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
+                               cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+                       goto err;
+               }
        }
 
        /* errors aren't fatal - we can live with the dynamic address */
@@ -235,11 +217,12 @@ next_desc:
        }
 
        /* claim data interface and set it up */
-       status = usb_driver_claim_interface(driver, info->data, dev);
-       if (status < 0)
-               goto err;
+       if (info->control != info->data) {
+               status = usb_driver_claim_interface(driver, info->data, dev);
+               if (status < 0)
+                       goto err;
+       }
 
-shared:
        status = qmi_wwan_register_subdriver(dev);
        if (status < 0 && info->control != info->data) {
                usb_set_intfdata(info->data, NULL);
index 9abe51710f229377cb9f405ffedb198eab580738..1a15ec14c386f3c4e05808722bd84a196d82f3a8 100644 (file)
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
 static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct usbnet *dev = netdev_priv(netdev);
+       int ret;
+
+       if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+               return -EINVAL;
 
-       int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
+       ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
        if (ret < 0) {
                netdev_warn(dev->net, "Failed to set mac rx frame length\n");
                return ret;
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)
 
        netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
 
-       ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
+       ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
        if (ret < 0) {
                netdev_warn(dev->net, "Failed to set max rx frame length\n");
                return ret;
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                        else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
                                dev->net->stats.rx_frame_errors++;
                } else {
-                       /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
-                       if (unlikely(size > (ETH_FRAME_LEN + 12))) {
+                       /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
+                       if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
                                netif_dbg(dev, rx_err, dev->net,
                                          "size err rx_cmd_a=0x%08x\n",
                                          rx_cmd_a);
index 4cc13940c8950d2a7ad2a3fdf1a092ac3736a7d5..f76c3ca07a4501603883bbcbe9feb901d4eca390 100644 (file)
@@ -1023,6 +1023,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
                                          AR_PHY_AGC_CONTROL_FLTR_CAL   |
                                          AR_PHY_AGC_CONTROL_PKDET_CAL;
 
+       /* Use chip chainmask only for calibration */
        ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
 
        if (rtt) {
@@ -1150,6 +1151,9 @@ skip_tx_iqcal:
                ar9003_hw_rtt_disable(ah);
        }
 
+       /* Revert chainmask to runtime parameters */
+       ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
+
        /* Initialize list pointers */
        ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
index ade3afb21f911e86589e325c2e99b49b7fe91289..7fdac6c7b3ea5dc030136d66c4574c4d8373a82e 100644 (file)
@@ -28,21 +28,21 @@ void ath_tx_complete_poll_work(struct work_struct *work)
        int i;
        bool needreset = false;
 
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
-               if (ATH_TXQ_SETUP(sc, i)) {
-                       txq = &sc->tx.txq[i];
-                       ath_txq_lock(sc, txq);
-                       if (txq->axq_depth) {
-                               if (txq->axq_tx_inprogress) {
-                                       needreset = true;
-                                       ath_txq_unlock(sc, txq);
-                                       break;
-                               } else {
-                                       txq->axq_tx_inprogress = true;
-                               }
+       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+               txq = sc->tx.txq_map[i];
+
+               ath_txq_lock(sc, txq);
+               if (txq->axq_depth) {
+                       if (txq->axq_tx_inprogress) {
+                               needreset = true;
+                               ath_txq_unlock(sc, txq);
+                               break;
+                       } else {
+                               txq->axq_tx_inprogress = true;
                        }
-                       ath_txq_unlock_complete(sc, txq);
                }
+               ath_txq_unlock_complete(sc, txq);
+       }
 
        if (needreset) {
                ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)
 {
        struct ath_softc *sc = (struct ath_softc *)data;
 
-       ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+       if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
+               ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 }
 
 /*
index 38bc5a7997ffd43b9a8c8f45a6329fe4ffdac5c1..122146943bf204693b540b124f6c947bec34833b 100644 (file)
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        const struct b43_dma_ops *ops;
        struct b43_dmaring *ring;
        struct b43_dmadesc_meta *meta;
+       static const struct b43_txstatus fake; /* filled with 0 */
+       const struct b43_txstatus *txstat;
        int slot, firstused;
        bool frame_succeed;
+       int skip;
+       static u8 err_out1, err_out2;
 
        ring = parse_cookie(dev, status->cookie, &slot);
        if (unlikely(!ring))
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
        firstused = ring->current_slot - ring->used_slots + 1;
        if (firstused < 0)
                firstused = ring->nr_slots + firstused;
+
+       skip = 0;
        if (unlikely(slot != firstused)) {
                /* This possibly is a firmware bug and will result in
-                * malfunction, memory leaks and/or stall of DMA functionality. */
-               b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
-                      "Expected %d, but got %d\n",
-                      ring->index, firstused, slot);
-               return;
+                * malfunction, memory leaks and/or stall of DMA functionality.
+                */
+               if (slot == next_slot(ring, next_slot(ring, firstused))) {
+                       /* If a single header/data pair was missed, skip over
+                        * the first two slots in an attempt to recover.
+                        */
+                       slot = firstused;
+                       skip = 2;
+                       if (!err_out1) {
+                               /* Report the error once. */
+                               b43dbg(dev->wl,
+                                      "Skip on DMA ring %d slot %d.\n",
+                                      ring->index, slot);
+                               err_out1 = 1;
+                       }
+               } else {
+                       /* More than a single header/data pair were missed.
+                        * Report this error once.
+                        */
+                       if (!err_out2)
+                               b43dbg(dev->wl,
+                                      "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
+                                      ring->index, firstused, slot);
+                       err_out2 = 1;
+                       return;
+               }
        }
 
        ops = ring->ops;
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                               slot, firstused, ring->index);
                        break;
                }
+
                if (meta->skb) {
                        struct b43_private_tx_info *priv_info =
-                               b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+                            b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
 
-                       unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
+                       unmap_descbuffer(ring, meta->dmaaddr,
+                                        meta->skb->len, 1);
                        kfree(priv_info->bouncebuffer);
                        priv_info->bouncebuffer = NULL;
                } else {
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                        struct ieee80211_tx_info *info;
 
                        if (unlikely(!meta->skb)) {
-                               /* This is a scatter-gather fragment of a frame, so
-                                * the skb pointer must not be NULL. */
+                               /* This is a scatter-gather fragment of a frame,
+                                * so the skb pointer must not be NULL.
+                                */
                                b43dbg(dev->wl, "TX status unexpected NULL skb "
                                       "at slot %d (first=%d) on ring %d\n",
                                       slot, firstused, ring->index);
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
 
                        /*
                         * Call back to inform the ieee80211 subsystem about
-                        * the status of the transmission.
+                        * the status of the transmission. When skipping over
+                        * a missed TX status report, use a status structure
+                        * filled with zeros to indicate that the frame was not
+                        * sent (frame_count 0) and not acknowledged
                         */
-                       frame_succeed = b43_fill_txstatus_report(dev, info, status);
+                       if (unlikely(skip))
+                               txstat = &fake;
+                       else
+                               txstat = status;
+
+                       frame_succeed = b43_fill_txstatus_report(dev, info,
+                                                                txstat);
 #ifdef CONFIG_B43_DEBUG
                        if (frame_succeed)
                                ring->nr_succeed_tx_packets++;
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                /* Everything unmapped and free'd. So it's not used anymore. */
                ring->used_slots--;
 
-               if (meta->is_last_fragment) {
+               if (meta->is_last_fragment && !skip) {
                        /* This is the last scatter-gather
                         * fragment of the frame. We are done. */
                        break;
                }
                slot = next_slot(ring, slot);
+               if (skip > 0)
+                       --skip;
        }
        if (ring->stopped) {
                B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
index 3c35382ee6c23ebfbaed8d1dcfdf33e804faec96..e8486c1e091af2f2db2a23827dc612315398751b 100644 (file)
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        u16 clip_off[2] = { 0xFFFF, 0xFFFF };
 
        u8 vcm_final = 0;
-       s8 offset[4];
+       s32 offset[4];
        s32 results[8][4] = { };
        s32 results_min[4] = { };
        s32 poll_results[4] = { };
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
                }
                for (i = 0; i < 4; i += 2) {
                        s32 curr;
-                       s32 mind = 40;
+                       s32 mind = 0x100000;
                        s32 minpoll = 249;
                        u8 minvcm = 0;
                        if (2 * core != i)
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
        u8 regs_save_radio[2];
        u16 regs_save_phy[2];
 
-       s8 offset[4];
+       s32 offset[4];
        u8 core;
        u8 rail;
 
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
        }
 
        for (i = 0; i < 4; i++) {
-               s32 mind = 40;
+               s32 mind = 0x100000;
                u8 minvcm = 0;
                s32 minpoll = 249;
                s32 curr;
index 21a824232478f2ac02d593495ac5fe82ff94df77..18d37645e2cde25f2d291a94bd51a3e3675db5be 100644 (file)
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
        gain0_15 = ((biq1 & 0xf) << 12) |
                   ((tia & 0xf) << 8) |
                   ((lna2 & 0x3) << 6) |
-                  ((lna2 & 0x3) << 4) |
-                  ((lna1 & 0x3) << 2) |
-                  ((lna1 & 0x3) << 0);
+                  ((lna2 &
+                    0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
 
        mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
        mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
        }
 
        mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
-       mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
-       mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
 
 }
 
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
        return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
 }
 
-static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
-                                     u16 tia_gain, u16 lna2_gain)
-{
-       u32 i_thresh_l, q_thresh_l;
-       u32 i_thresh_h, q_thresh_h;
-       struct lcnphy_iq_est iq_est_h, iq_est_l;
-
-       wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
-                                              lna2_gain, 0);
-
-       wlc_lcnphy_rx_gain_override_enable(pi, true);
-       wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
-       udelay(500);
-       write_radio_reg(pi, RADIO_2064_REG112, 0);
-       if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
-               return false;
-
-       wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
-       udelay(500);
-       write_radio_reg(pi, RADIO_2064_REG112, 0);
-       if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
-               return false;
-
-       i_thresh_l = (iq_est_l.i_pwr << 1);
-       i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
-
-       q_thresh_l = (iq_est_l.q_pwr << 1);
-       q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
-       if ((iq_est_h.i_pwr > i_thresh_l) &&
-           (iq_est_h.i_pwr < i_thresh_h) &&
-           (iq_est_h.q_pwr > q_thresh_l) &&
-           (iq_est_h.q_pwr < q_thresh_h))
-               return true;
-
-       return false;
-}
-
 static bool
 wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
                     const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
            RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
            rfoverride3_old, rfoverride3val_old, rfoverride4_old,
            rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
-       int tia_gain, lna2_gain, biq1_gain;
-       bool set_gain;
+       int tia_gain;
+       u32 received_power, rx_pwr_threshold;
        u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
        u16 values_to_save[11];
        s16 *ptr;
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
                goto cal_done;
        }
 
-       WARN_ON(module != 1);
-       tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
-       wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
-
-       for (i = 0; i < 11; i++)
-               values_to_save[i] =
-                       read_radio_reg(pi, rxiq_cal_rf_reg[i]);
-       Core1TxControl_old = read_phy_reg(pi, 0x631);
-
-       or_phy_reg(pi, 0x631, 0x0015);
-
-       RFOverride0_old = read_phy_reg(pi, 0x44c);
-       RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
-       rfoverride2_old = read_phy_reg(pi, 0x4b0);
-       rfoverride2val_old = read_phy_reg(pi, 0x4b1);
-       rfoverride3_old = read_phy_reg(pi, 0x4f9);
-       rfoverride3val_old = read_phy_reg(pi, 0x4fa);
-       rfoverride4_old = read_phy_reg(pi, 0x938);
-       rfoverride4val_old = read_phy_reg(pi, 0x939);
-       afectrlovr_old = read_phy_reg(pi, 0x43b);
-       afectrlovrval_old = read_phy_reg(pi, 0x43c);
-       old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
-       old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
-       tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
-       if (tx_gain_override_old) {
-               wlc_lcnphy_get_tx_gain(pi, &old_gains);
-               tx_gain_index_old = pi_lcn->lcnphy_current_index;
-       }
-
-       wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+       if (module == 1) {
 
-       mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+               tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+               wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
 
-       mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+               for (i = 0; i < 11; i++)
+                       values_to_save[i] =
+                               read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+               Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+               or_phy_reg(pi, 0x631, 0x0015);
+
+               RFOverride0_old = read_phy_reg(pi, 0x44c);
+               RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+               rfoverride2_old = read_phy_reg(pi, 0x4b0);
+               rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+               rfoverride3_old = read_phy_reg(pi, 0x4f9);
+               rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+               rfoverride4_old = read_phy_reg(pi, 0x938);
+               rfoverride4val_old = read_phy_reg(pi, 0x939);
+               afectrlovr_old = read_phy_reg(pi, 0x43b);
+               afectrlovrval_old = read_phy_reg(pi, 0x43c);
+               old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+               old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
+
+               tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+               if (tx_gain_override_old) {
+                       wlc_lcnphy_get_tx_gain(pi, &old_gains);
+                       tx_gain_index_old = pi_lcn->lcnphy_current_index;
+               }
 
-       write_radio_reg(pi, RADIO_2064_REG116, 0x06);
-       write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
-       write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
-       write_radio_reg(pi, RADIO_2064_REG098, 0x03);
-       write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
-       mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
-       write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
-       write_radio_reg(pi, RADIO_2064_REG114, 0x01);
-       write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
-       write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
-       mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
-       mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
-       mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
-       mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
-       mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
-       mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
-       mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
-       mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+               wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
 
-       mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
-       mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+               mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
 
-       write_phy_reg(pi, 0x6da, 0xffff);
-       or_phy_reg(pi, 0x6db, 0x3);
+               mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
 
-       wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
-       set_gain = false;
-
-       lna2_gain = 3;
-       while ((lna2_gain >= 0) && !set_gain) {
-               tia_gain = 4;
-
-               while ((tia_gain >= 0) && !set_gain) {
-                       biq1_gain = 6;
-
-                       while ((biq1_gain >= 0) && !set_gain) {
-                               set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
-                                                                    (u16)
-                                                                    biq1_gain,
-                                                                    (u16)
-                                                                    tia_gain,
-                                                                    (u16)
-                                                                    lna2_gain);
-                               biq1_gain -= 1;
-                       }
+               write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+               write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+               write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+               write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+               write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+               mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+               write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+               write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+               write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+               write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+               mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+               mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+               mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+               mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+               mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+               mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+               mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+               mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
+
+               mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+               mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+               wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
+               write_phy_reg(pi, 0x6da, 0xffff);
+               or_phy_reg(pi, 0x6db, 0x3);
+               wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+               wlc_lcnphy_rx_gain_override_enable(pi, true);
+
+               tia_gain = 8;
+               rx_pwr_threshold = 950;
+               while (tia_gain > 0) {
                        tia_gain -= 1;
+                       wlc_lcnphy_set_rx_gain_by_distribution(pi,
+                                                              0, 0, 2, 2,
+                                                              (u16)
+                                                              tia_gain, 1, 0);
+                       udelay(500);
+
+                       received_power =
+                               wlc_lcnphy_measure_digital_power(pi, 2000);
+                       if (received_power < rx_pwr_threshold)
+                               break;
                }
-               lna2_gain -= 1;
-       }
+               result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
 
-       if (set_gain)
-               result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
-       else
-               result = false;
+               wlc_lcnphy_stop_tx_tone(pi);
 
-       wlc_lcnphy_stop_tx_tone(pi);
+               write_phy_reg(pi, 0x631, Core1TxControl_old);
 
-       write_phy_reg(pi, 0x631, Core1TxControl_old);
-
-       write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
-       write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
-       write_phy_reg(pi, 0x4b0, rfoverride2_old);
-       write_phy_reg(pi, 0x4b1, rfoverride2val_old);
-       write_phy_reg(pi, 0x4f9, rfoverride3_old);
-       write_phy_reg(pi, 0x4fa, rfoverride3val_old);
-       write_phy_reg(pi, 0x938, rfoverride4_old);
-       write_phy_reg(pi, 0x939, rfoverride4val_old);
-       write_phy_reg(pi, 0x43b, afectrlovr_old);
-       write_phy_reg(pi, 0x43c, afectrlovrval_old);
-       write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
-       write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+               write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+               write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+               write_phy_reg(pi, 0x4b0, rfoverride2_old);
+               write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+               write_phy_reg(pi, 0x4f9, rfoverride3_old);
+               write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+               write_phy_reg(pi, 0x938, rfoverride4_old);
+               write_phy_reg(pi, 0x939, rfoverride4val_old);
+               write_phy_reg(pi, 0x43b, afectrlovr_old);
+               write_phy_reg(pi, 0x43c, afectrlovrval_old);
+               write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+               write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
 
-       wlc_lcnphy_clear_trsw_override(pi);
+               wlc_lcnphy_clear_trsw_override(pi);
 
-       mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+               mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
 
-       for (i = 0; i < 11; i++)
-               write_radio_reg(pi, rxiq_cal_rf_reg[i],
-                               values_to_save[i]);
+               for (i = 0; i < 11; i++)
+                       write_radio_reg(pi, rxiq_cal_rf_reg[i],
+                                       values_to_save[i]);
 
-       if (tx_gain_override_old)
-               wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
-       else
-               wlc_lcnphy_disable_tx_gain_override(pi);
+               if (tx_gain_override_old)
+                       wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+               else
+                       wlc_lcnphy_disable_tx_gain_override(pi);
 
-       wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
-       wlc_lcnphy_rx_gain_override_enable(pi, false);
+               wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+               wlc_lcnphy_rx_gain_override_enable(pi, false);
+       }
 
 cal_done:
        kfree(ptr);
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
                write_radio_reg(pi, RADIO_2064_REG038, 3);
                write_radio_reg(pi, RADIO_2064_REG091, 7);
        }
-
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
-                       0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
-
-               write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
-               write_radio_reg(pi, RADIO_2064_REG091, 0x3);
-               write_radio_reg(pi, RADIO_2064_REG038, 0x3);
-
-               write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
-       }
 }
 
 static int
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
                } else {
                        mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
                        mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
-                       mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
-                       mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
-                       mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
-                       mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
-                       mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
-                       mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
-                       mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
-                       mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
                }
        } else {
                mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
                    (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
 
        mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
-       mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
 }
 
 static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 {
        struct phytbl_info tab;
        u32 rfseq, ind;
-       u8 tssi_sel;
 
        tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
        tab.tbl_width = 32;
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 
        mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
 
-       if (pi->sh->boardflags & BFL_FEM) {
-               tssi_sel = 0x1;
-               wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
-       } else {
-               tssi_sel = 0xe;
-               wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
-       }
+       wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
        mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
 
        mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
        mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
 
        if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
-               mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
+               mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
                mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
        } else {
-               mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
                mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
                mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
        }
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
 
        mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
 
-       mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
-       mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
-       mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
-
        wlc_lcnphy_pwrctrl_rssiparams(pi);
 }
 
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
                read_radio_reg(pi, RADIO_2064_REG007) & 1;
        u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
        u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
-       u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
-
        idleTssi = read_phy_reg(pi, 0x4ab);
        suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
                         MCTL_EN_MAC));
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
        mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
        mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
        wlc_lcnphy_tssi_setup(pi);
-
-       mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
-       mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
-
-       wlc_lcnphy_set_bbmult(pi, 0x0);
-
        wlc_phy_do_dummy_tx(pi, true, OFF);
        idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
                    >> 0);
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
 
        mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
 
-       wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
        wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
        wlc_lcnphy_set_tx_gain(pi, &old_gains);
        wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
                        wlc_lcnphy_write_table(pi, &tab);
                        tab.tbl_offset++;
                }
-               mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
-               mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
-               mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
-               mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
-               mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
 
                mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
 
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
        target_gains.pad_gain = 21;
        target_gains.dac_gain = 0;
        wlc_lcnphy_set_tx_gain(pi, &target_gains);
+       wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
 
        if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
 
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
                                        lcnphy_recal ? LCNPHY_CAL_RECAL :
                                        LCNPHY_CAL_FULL), false);
        } else {
-               wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
                wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
        }
 
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
        if (CHSPEC_IS5G(pi->radio_chanspec))
                pa_gain = 0x70;
        else
-               pa_gain = 0x60;
+               pa_gain = 0x70;
 
        if (pi->sh->boardflags & BFL_FEM)
                pa_gain = 0x10;
-
        tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
        tab.tbl_width = 32;
        tab.tbl_len = 1;
        tab.tbl_ptr = &val;
 
        for (j = 0; j < 128; j++) {
-               if (pi->sh->boardflags & BFL_FEM)
-                       gm_gain = gain_table[j].gm;
-               else
-                       gm_gain = 15;
-
+               gm_gain = gain_table[j].gm;
                val = (((u32) pa_gain << 24) |
                       (gain_table[j].pad << 16) |
                       (gain_table[j].pga << 8) | gm_gain);
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
 
        write_phy_reg(pi, 0x4ea, 0x4688);
 
-       if (pi->sh->boardflags & BFL_FEM)
-               mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
-       else
-               mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
+       mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
 
        mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
 
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
        wlc_lcnphy_rcal(pi);
 
        wlc_lcnphy_rc_cal(pi);
-
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
-               write_radio_reg(pi, RADIO_2064_REG033, 0x19);
-               write_radio_reg(pi, RADIO_2064_REG039, 0xe);
-       }
-
 }
 
 static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
                wlc_lcnphy_write_table(pi, &tab);
        }
 
-       if (!(pi->sh->boardflags & BFL_FEM)) {
-               tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
-               tab.tbl_width = 16;
-               tab.tbl_ptr = &val;
-               tab.tbl_len = 1;
+       tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+       tab.tbl_width = 16;
+       tab.tbl_ptr = &val;
+       tab.tbl_len = 1;
 
-               val = 150;
-               tab.tbl_offset = 0;
-               wlc_lcnphy_write_table(pi, &tab);
+       val = 114;
+       tab.tbl_offset = 0;
+       wlc_lcnphy_write_table(pi, &tab);
 
-               val = 220;
-               tab.tbl_offset = 1;
-               wlc_lcnphy_write_table(pi, &tab);
-       }
+       val = 130;
+       tab.tbl_offset = 1;
+       wlc_lcnphy_write_table(pi, &tab);
+
+       val = 6;
+       tab.tbl_offset = 8;
+       wlc_lcnphy_write_table(pi, &tab);
 
        if (CHSPEC_IS2G(pi->radio_chanspec)) {
                if (pi->sh->boardflags & BFL_FEM)
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
                wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
 
        mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
-       wlc_lcnphy_tssi_setup(pi);
 }
 
 void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
        if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
                return false;
 
-       if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
+       if ((pi->sh->boardflags & BFL_FEM) &&
+           (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
                if (pi_lcn->lcnphy_tempsense_option == 3) {
                        pi->hwpwrctrl = true;
                        pi->hwpwrctrl_capable = true;
index b7e95acc2084033dd5c27de24b80bebd78f8dfbe..622c01ca72c5d24e8bfc55c3b84ad61633c4bb92 100644 (file)
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
 };
 
 static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
-       0x0009,
        0x000a,
-       0x0005,
-       0x0006,
        0x0009,
-       0x000a,
-       0x0005,
        0x0006,
-       0x0009,
-       0x000a,
        0x0005,
-       0x0006,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
+       0x000a,
        0x0009,
+       0x0006,
+       0x0005,
        0x000a,
+       0x0009,
+       0x0006,
        0x0005,
+       0x000a,
+       0x0009,
        0x0006,
+       0x0005,
 };
 
 static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
index 3630a41df50d7c64815abae048696ea04329a703..c353b5f19c8c639480be47121f06c166dfc9f71c 100644 (file)
@@ -475,6 +475,7 @@ il3945_tx_skb(struct il_priv *il,
        dma_addr_t txcmd_phys;
        int txq_id = skb_get_queue_mapping(skb);
        u16 len, idx, hdr_len;
+       u16 firstlen, secondlen;
        u8 id;
        u8 unicast;
        u8 sta_id;
@@ -589,21 +590,22 @@ il3945_tx_skb(struct il_priv *il,
        len =
            sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
            hdr_len;
-       len = (len + 3) & ~3;
+       firstlen = (len + 3) & ~3;
 
        /* Physical address of this Tx command's header (not MAC header!),
         * within command buffer array. */
        txcmd_phys =
-           pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+           pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+                          PCI_DMA_TODEVICE);
        if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
                goto drop_unlock;
 
        /* Set up TFD's 2nd entry to point directly to remainder of skb,
         * if any (802.11 null frames have no payload). */
-       len = skb->len - hdr_len;
-       if (len) {
+       secondlen = skb->len - hdr_len;
+       if (secondlen > 0) {
                phys_addr =
-                   pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+                   pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
                                   PCI_DMA_TODEVICE);
                if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
                        goto drop_unlock;
@@ -611,12 +613,12 @@ il3945_tx_skb(struct il_priv *il,
 
        /* Add buffer containing Tx command and MAC(!) header to TFD's
         * first entry */
-       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0);
+       il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
        dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
-       dma_unmap_len_set(out_meta, len, len);
-       if (len)
-               il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0,
-                                              U32_PAD(len));
+       dma_unmap_len_set(out_meta, len, firstlen);
+       if (secondlen > 0)
+               il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
+                                              U32_PAD(secondlen));
 
        if (!ieee80211_has_morefrags(hdr->frame_control)) {
                txq->need_update = 1;
index e8324b5e5bfe836610f5680f86cf9c742fc4061b..6c7493c2d698e6a2c83de679131890726aff6456 100644 (file)
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
        int rate_idx;
        int i;
        u32 rate;
-       u8 use_green = il4965_rs_use_green(il, sta);
+       u8 use_green;
        u8 active_tbl = 0;
        u8 valid_tx_ant;
        struct il_station_priv *sta_priv;
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
        if (!sta || !lq_sta)
                return;
 
+       use_green = il4965_rs_use_green(il, sta);
        sta_priv = (void *)sta->drv_priv;
 
        i = lq_sta->last_txrate_idx;
index 86ea5f4c39398077efa5e13c4a16a7dcf769bf05..44ca0e57f9f76fd8b3b58c183a630fdb057f78ad 100644 (file)
@@ -1261,6 +1261,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
                return -EIO;
        }
 
+       /*
+        * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
+        * in iwl_down but cancel the workers only later.
+        */
+       if (!priv->ucode_loaded) {
+               IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
+               return -EIO;
+       }
+
        /*
         * Synchronous commands from this op-mode must hold
         * the mutex, this ensures we don't try to send two
index 736fe9bb140ebab643e065742469ef0a9f667e31..1a4ac9236a4446e2d8b30722cad23e826756a660 100644 (file)
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
                return -EIO;
        }
 
+       priv->ucode_loaded = true;
+
        if (ucode_type != IWL_UCODE_WOWLAN) {
                /* delay a bit to give rfkill time to run */
                msleep(5);
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
                return ret;
        }
 
-       priv->ucode_loaded = true;
-
        return 0;
 }
 
index 17bedc50e753d62613055eb3662c4095fe9f41bf..12c4f31ca8fbddcc95925cebf4b269b150a64ef2 100644 (file)
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
 
        /* If platform's RF_KILL switch is NOT set to KILL */
        hw_rfkill = iwl_is_rfkill_set(trans);
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans_pcie->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans_pcie->status);
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
        if (hw_rfkill && !run_in_rfkill)
                return -ERFKILL;
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
        int err;
 
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
        iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans_pcie->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans_pcie->status);
        iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
 
        return 0;
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
                 * op_mode.
                 */
                hw_rfkill = iwl_is_rfkill_set(trans);
+               if (hw_rfkill)
+                       set_bit(STATUS_RFKILL, &trans_pcie->status);
+               else
+                       clear_bit(STATUS_RFKILL, &trans_pcie->status);
                iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
        }
 }
index 8595c16f74deb8bdcf531725e3c65c0986d58745..cb5c6792e3a8de2679022370c140263b8e1e9e57 100644 (file)
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
                int copy = 0;
 
-               if (!cmd->len)
+               if (!cmd->len[i])
                        continue;
 
                /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
index 20a6c55558737b3bc3e3968856c5fe827780c688..b5c8b962ce12f74077fb6159b3e8558f521f10a5 100644 (file)
@@ -157,6 +157,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
                return -1;
        }
 
+       cmd_code = le16_to_cpu(host_cmd->command);
+       cmd_size = le16_to_cpu(host_cmd->size);
+
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
+           cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
+           cmd_code != HostCmd_CMD_FUNC_INIT) {
+               dev_err(adapter->dev,
+                       "DNLD_CMD: FW in reset state, ignore cmd %#x\n",
+                       cmd_code);
+               mwifiex_complete_cmd(adapter, cmd_node);
+               mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               return -1;
+       }
+
        /* Set command sequence number */
        adapter->seq_num++;
        host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
@@ -168,9 +182,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
        adapter->curr_cmd = cmd_node;
        spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
 
-       cmd_code = le16_to_cpu(host_cmd->command);
-       cmd_size = le16_to_cpu(host_cmd->size);
-
        /* Adjust skb length */
        if (cmd_node->cmd_skb->len > cmd_size)
                /*
@@ -484,8 +495,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
 
        ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
                                     data_buf);
-       if (!ret)
-               ret = mwifiex_wait_queue_complete(adapter);
 
        return ret;
 }
@@ -588,9 +597,10 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
        if (cmd_no == HostCmd_CMD_802_11_SCAN) {
                mwifiex_queue_scan_cmd(priv, cmd_node);
        } else {
-               adapter->cmd_queued = cmd_node;
                mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
                queue_work(adapter->workqueue, &adapter->main_work);
+               if (cmd_node->wait_q_enabled)
+                       ret = mwifiex_wait_queue_complete(adapter, cmd_node);
        }
 
        return ret;
index e38aa9b3663d0a76c0cfe1c7f0698f923fc58587..0ff4c37ab42ae6853dbe6c85f8bb33f476b531e1 100644 (file)
@@ -709,6 +709,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
                return ret;
        }
 
+       /* cancel current command */
+       if (adapter->curr_cmd) {
+               dev_warn(adapter->dev, "curr_cmd is still in processing\n");
+               del_timer(&adapter->cmd_timer);
+               mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
+               adapter->curr_cmd = NULL;
+       }
+
        /* shut down mwifiex */
        dev_dbg(adapter->dev, "info: shutdown mwifiex...\n");
 
index 246aa62a48172d849cff8bd3d42947b28d7803a0..2fe0ceba4400ad017d99ca5324707aa96093d6be 100644 (file)
@@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
                adhoc_join->bss_descriptor.bssid,
                adhoc_join->bss_descriptor.ssid);
 
-       for (i = 0; bss_desc->supported_rates[i] &&
-                       i < MWIFIEX_SUPPORTED_RATES;
-                       i++)
-                       ;
+       for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
+                   bss_desc->supported_rates[i]; i++)
+               ;
        rates_size = i;
 
        /* Copy Data Rates from the Rates recorded in scan response */
index 553adfb0aa81a35c468e44a07943b3f8fdf6e79e..7035ade9af74aeb08ceeea6b1545b28bd8e105d9 100644 (file)
@@ -723,7 +723,6 @@ struct mwifiex_adapter {
        u16 cmd_wait_q_required;
        struct mwifiex_wait_queue cmd_wait_q;
        u8 scan_wait_q_woken;
-       struct cmd_ctrl_node *cmd_queued;
        spinlock_t queue_lock;          /* lock for tx queues */
        struct completion fw_load;
        u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
@@ -1018,7 +1017,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
                        struct mwifiex_multicast_list *mcast_list);
 int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
                            struct net_device *dev);
-int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter);
+int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
+                               struct cmd_ctrl_node *cmd_queued);
 int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
                      struct cfg80211_ssid *req_ssid);
 int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
index 5c395e2e6a2b874fb0257424663a73cb7abf0c17..feb20461339767df1c8912b216b9d62a1c11f5da 100644 (file)
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                }
                memcpy(adapter->upld_buf, skb->data,
                       min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
+               skb_push(skb, INTF_HEADER_LEN);
                if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
                                           PCI_DMA_FROMDEVICE))
                        return -1;
index bb60c2754a97ea54cc8c709206ba31a861e05d2e..d215b4d3c51b57db90eee2293f6c153059fe037b 100644 (file)
@@ -1388,10 +1388,13 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
                        list_del(&cmd_node->list);
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
-                       adapter->cmd_queued = cmd_node;
                        mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
                                                        true);
                        queue_work(adapter->workqueue, &adapter->main_work);
+
+                       /* Perform internal scan synchronously */
+                       if (!priv->scan_request)
+                               mwifiex_wait_queue_complete(adapter, cmd_node);
                } else {
                        spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
                                               flags);
@@ -1946,9 +1949,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv,
                /* Normal scan */
                ret = mwifiex_scan_networks(priv, NULL);
 
-       if (!ret)
-               ret = mwifiex_wait_queue_complete(priv->adapter);
-
        up(&priv->async_sem);
 
        return ret;
index 9f33c92c90f5b8dc32bf9685bd3b708171b66acc..13100f8de3db1729b613333f16d21438104cd2e0 100644 (file)
@@ -54,16 +54,10 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
  * This function waits on a cmd wait queue. It also cancels the pending
  * request after waking up, in case of errors.
  */
-int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
+int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
+                               struct cmd_ctrl_node *cmd_queued)
 {
        int status;
-       struct cmd_ctrl_node *cmd_queued;
-
-       if (!adapter->cmd_queued)
-               return 0;
-
-       cmd_queued = adapter->cmd_queued;
-       adapter->cmd_queued = NULL;
 
        dev_dbg(adapter->dev, "cmd pending\n");
        atomic_inc(&adapter->cmd_pending);
index 44d6ead433411eeddc801dc8e1eea0cce40d67e4..2bf4efa331867acaea139bb81dcca75a66532ef4 100644 (file)
@@ -55,10 +55,10 @@ config RT61PCI
 
 config RT2800PCI
        tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
-       depends on PCI || RALINK_RT288X || RALINK_RT305X
+       depends on PCI || SOC_RT288X || SOC_RT305X
        select RT2800_LIB
        select RT2X00_LIB_PCI if PCI
-       select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X
+       select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
        select RT2X00_LIB_FIRMWARE
        select RT2X00_LIB_CRYPTO
        select CRC_CCITT
index 48a01aa21f1c94039d9485d5e635926d58a17750..ded73da4de0b0d52cd6331d837c8c68e86b87749 100644 (file)
@@ -89,7 +89,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
        rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
 }
 
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
 static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
 {
        void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
@@ -107,7 +107,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
 {
        return -ENOMEM;
 }
-#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
+#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
 
 #ifdef CONFIG_PCI
 static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -1177,7 +1177,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
 #endif /* CONFIG_PCI */
 MODULE_LICENSE("GPL");
 
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
 static int rt2800soc_probe(struct platform_device *pdev)
 {
        return rt2x00soc_probe(pdev, &rt2800pci_ops);
@@ -1194,7 +1194,7 @@ static struct platform_driver rt2800soc_driver = {
        .suspend        = rt2x00soc_suspend,
        .resume         = rt2x00soc_resume,
 };
-#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
+#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
 
 #ifdef CONFIG_PCI
 static int rt2800pci_probe(struct pci_dev *pci_dev,
@@ -1217,7 +1217,7 @@ static int __init rt2800pci_init(void)
 {
        int ret = 0;
 
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
        ret = platform_driver_register(&rt2800soc_driver);
        if (ret)
                return ret;
@@ -1225,7 +1225,7 @@ static int __init rt2800pci_init(void)
 #ifdef CONFIG_PCI
        ret = pci_register_driver(&rt2800pci_driver);
        if (ret) {
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
                platform_driver_unregister(&rt2800soc_driver);
 #endif
                return ret;
@@ -1240,7 +1240,7 @@ static void __exit rt2800pci_exit(void)
 #ifdef CONFIG_PCI
        pci_unregister_driver(&rt2800pci_driver);
 #endif
-#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
+#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
        platform_driver_unregister(&rt2800soc_driver);
 #endif
 }
index b1ccff474c7953f2431cb2cd0390f2e0ebf89591..c08d0f4c5f3d3fe8416195951ea6547a80429030 100644 (file)
@@ -1376,75 +1376,58 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
 }
 
 void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
-{
-       /* dummy routine needed for callback from rtl_op_configure_filter() */
-}
-
-/*========================================================================== */
-
-static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
-                             enum nl80211_iftype type)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       u8 filterout_non_associated_bssid = false;
+       u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
 
-       switch (type) {
-       case NL80211_IFTYPE_ADHOC:
-       case NL80211_IFTYPE_STATION:
-               filterout_non_associated_bssid = true;
-               break;
-       case NL80211_IFTYPE_UNSPECIFIED:
-       case NL80211_IFTYPE_AP:
-       default:
-               break;
-       }
-       if (filterout_non_associated_bssid) {
+       if (rtlpriv->psc.rfpwr_state != ERFON)
+               return;
+
+       if (check_bssid) {
+               u8 tmp;
                if (IS_NORMAL_CHIP(rtlhal->version)) {
-                       switch (rtlphy->current_io_type) {
-                       case IO_CMD_RESUME_DM_BY_SCAN:
-                               reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-                               rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
-                               /* enable update TSF */
-                               _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
-                               break;
-                       case IO_CMD_PAUSE_DM_BY_SCAN:
-                               reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
-                               rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                HW_VAR_RCR, (u8 *)(&reg_rcr));
-                               /* disable update TSF */
-                               _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
-                               break;
-                       }
+                       reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                       tmp = BIT(4);
                } else {
-                       reg_rcr |= (RCR_CBSSID);
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
-                                                     (u8 *)(&reg_rcr));
-                       _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
+                       reg_rcr |= RCR_CBSSID;
+                       tmp = BIT(4) | BIT(5);
                }
-       } else if (filterout_non_associated_bssid == false) {
+               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
+                                             (u8 *) (&reg_rcr));
+               _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
+       } else {
+               u8 tmp;
                if (IS_NORMAL_CHIP(rtlhal->version)) {
-                       reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
-                                                     (u8 *)(&reg_rcr));
-                       _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
+                       reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+                       tmp = BIT(4);
                } else {
-                       reg_rcr &= (~RCR_CBSSID);
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
-                                                     (u8 *)(&reg_rcr));
-                       _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
+                       reg_rcr &= ~RCR_CBSSID;
+                       tmp = BIT(4) | BIT(5);
                }
+               reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
+               rtlpriv->cfg->ops->set_hw_reg(hw,
+                                             HW_VAR_RCR, (u8 *) (&reg_rcr));
+               _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
        }
 }
 
+/*========================================================================== */
+
 int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
 {
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
        if (_rtl92cu_set_media_status(hw, type))
                return -EOPNOTSUPP;
-       _rtl92cu_set_check_bssid(hw, type);
+
+       if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+               if (type != NL80211_IFTYPE_AP)
+                       rtl92cu_set_check_bssid(hw, true);
+       } else {
+               rtl92cu_set_check_bssid(hw, false);
+       }
+
        return 0;
 }
 
@@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
                               (shortgi_rate << 4) | (shortgi_rate);
        }
        rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
-       RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
-                rtl_read_dword(rtlpriv, REG_ARFR0));
 }
 
 void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
index 156b52732f3d576cb458bc3f6b3e2eb2818f2d90..5847d6d0881e7ce30180d26cc275425c55eda76b 100644 (file)
@@ -851,6 +851,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
        if (unlikely(!_urb)) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "Can't allocate urb. Drop skb!\n");
+               kfree_skb(skb);
                return;
        }
        _rtl_submit_tx_urb(hw, _urb);
index ab886b7ee327af413f98976382869242fcba679b..b41ac7756a4ba6ff7795804db13db00c6437c662 100644 (file)
@@ -100,6 +100,27 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
        return min((size_t)(image - rom), size);
 }
 
+static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size)
+{
+       struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+       loff_t start;
+
+       /* assign the ROM an address if it doesn't have one */
+       if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE))
+               return 0;
+       start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
+       *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+
+       if (*size == 0)
+               return 0;
+
+       /* Enable ROM space decodes */
+       if (pci_enable_rom(pdev))
+               return 0;
+
+       return start;
+}
+
 /**
  * pci_map_rom - map a PCI ROM to kernel space
  * @pdev: pointer to pci device struct
@@ -114,21 +135,15 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
 void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
 {
        struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
-       loff_t start;
+       loff_t start = 0;
        void __iomem *rom;
 
-       /*
-        * Some devices may provide ROMs via a source other than the BAR
-        */
-       if (pdev->rom && pdev->romlen) {
-               *size = pdev->romlen;
-               return phys_to_virt(pdev->rom);
        /*
         * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
         * memory map if the VGA enable bit of the Bridge Control register is
         * set for embedded VGA.
         */
-       } else if (res->flags & IORESOURCE_ROM_SHADOW) {
+       if (res->flags & IORESOURCE_ROM_SHADOW) {
                /* primary video rom always starts here */
                start = (loff_t)0xC0000;
                *size = 0x20000; /* cover C000:0 through E000:0 */
@@ -139,21 +154,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
                        return (void __iomem *)(unsigned long)
                                pci_resource_start(pdev, PCI_ROM_RESOURCE);
                } else {
-                       /* assign the ROM an address if it doesn't have one */
-                       if (res->parent == NULL &&
-                           pci_assign_resource(pdev,PCI_ROM_RESOURCE))
-                               return NULL;
-                       start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
-                       *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
-                       if (*size == 0)
-                               return NULL;
-
-                       /* Enable ROM space decodes */
-                       if (pci_enable_rom(pdev))
-                               return NULL;
+                       start = pci_find_rom(pdev, size);
                }
        }
 
+       /*
+        * Some devices may provide ROMs via a source other than the BAR
+        */
+       if (!start && pdev->rom && pdev->romlen) {
+               *size = pdev->romlen;
+               return phys_to_virt(pdev->rom);
+       }
+
+       if (!start)
+               return NULL;
+
        rom = ioremap(start, *size);
        if (!rom) {
                /* restore enable if ioremap fails */
index c689c04a4f523248e9b2712e77d7e041f5b074d7..2d2f0a43d36b409cf287f6d6629b506e517948a9 100644 (file)
@@ -620,7 +620,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
 
                /* special soc specific control */
                if (ctrl->mpp_get || ctrl->mpp_set) {
-                       if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) {
+                       if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) {
                                dev_err(&pdev->dev, "wrong soc control info\n");
                                return -EINVAL;
                        }
index ac8d382a79bbfe43de8769ea43d1c13ab2fac036..d611ecfcbf70d8401946767f01833069ef60c431 100644 (file)
@@ -622,7 +622,7 @@ static const struct file_operations pinconf_dbg_pinname_fops = {
 static int pinconf_dbg_state_print(struct seq_file *s, void *d)
 {
        if (strlen(dbg_state_name))
-               seq_printf(s, "%s\n", dbg_pinname);
+               seq_printf(s, "%s\n", dbg_state_name);
        else
                seq_printf(s, "No pin state set\n");
        return 0;
index e3ed8cb072a5a4dd410015c66e3202ded90dd49c..bfda73d64eed527b2fce7e0a084b7459943bfb1e 100644 (file)
@@ -90,7 +90,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot,
  * pin config.
  */
 
-#ifdef CONFIG_GENERIC_PINCONF
+#if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS)
 
 void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev,
                              struct seq_file *s, unsigned pin);
index caecdd37306126d7352b57365932b00b5e7ac1f2..c542a97c82f37cc743fa9d97e914c13dfe55aaa7 100644 (file)
@@ -422,7 +422,7 @@ static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
        }
 
        /* check if pin use AlternateFunction register */
-       if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED))
+       if ((af.alt_bit1 == UNUSED) && (af.alt_bit2 == UNUSED))
                return mode;
        /*
         * if pin GPIOSEL bit is set and pin supports alternate function,
index 75933a6aa8284c0cbf466a968ed2c846c7c14d69..efb7f10e902a0e1aa9701ed5506a23d204b1a9df 100644 (file)
@@ -1277,21 +1277,80 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
 }
 
 #ifdef CONFIG_PM
+
+static u32 wakeups[MAX_GPIO_BANKS];
+static u32 backups[MAX_GPIO_BANKS];
+
 static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
 {
        struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d);
        unsigned        bank = at91_gpio->pioc_idx;
+       unsigned mask = 1 << d->hwirq;
 
        if (unlikely(bank >= MAX_GPIO_BANKS))
                return -EINVAL;
 
+       if (state)
+               wakeups[bank] |= mask;
+       else
+               wakeups[bank] &= ~mask;
+
        irq_set_irq_wake(at91_gpio->pioc_virq, state);
 
        return 0;
 }
+
+void at91_pinctrl_gpio_suspend(void)
+{
+       int i;
+
+       for (i = 0; i < gpio_banks; i++) {
+               void __iomem  *pio;
+
+               if (!gpio_chips[i])
+                       continue;
+
+               pio = gpio_chips[i]->regbase;
+
+               backups[i] = __raw_readl(pio + PIO_IMR);
+               __raw_writel(backups[i], pio + PIO_IDR);
+               __raw_writel(wakeups[i], pio + PIO_IER);
+
+               if (!wakeups[i]) {
+                       clk_unprepare(gpio_chips[i]->clock);
+                       clk_disable(gpio_chips[i]->clock);
+               } else {
+                       printk(KERN_DEBUG "GPIO-%c may wake for %08x\n",
+                              'A'+i, wakeups[i]);
+               }
+       }
+}
+
+void at91_pinctrl_gpio_resume(void)
+{
+       int i;
+
+       for (i = 0; i < gpio_banks; i++) {
+               void __iomem  *pio;
+
+               if (!gpio_chips[i])
+                       continue;
+
+               pio = gpio_chips[i]->regbase;
+
+               if (!wakeups[i]) {
+                       if (clk_prepare(gpio_chips[i]->clock) == 0)
+                               clk_enable(gpio_chips[i]->clock);
+               }
+
+               __raw_writel(wakeups[i], pio + PIO_IDR);
+               __raw_writel(backups[i], pio + PIO_IER);
+       }
+}
+
 #else
 #define gpio_irq_set_wake      NULL
-#endif
+#endif /* CONFIG_PM */
 
 static struct irq_chip gpio_irqchip = {
        .name           = "GPIO",
index 1a00658b3ea070f336333160411f1b01bbc8f96e..bd83c8b01cd102e7c0f194c38389bb4a083c5dec 100644 (file)
@@ -194,6 +194,11 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
        }
 
        if (!gpio_range) {
+               /*
+                * A pin should not be freed more times than allocated.
+                */
+               if (WARN_ON(!desc->mux_usecount))
+                       return NULL;
                desc->mux_usecount--;
                if (desc->mux_usecount)
                        return NULL;
index 434ebc3a99dc089d10a6d27e5be99faebb28aace..0a9f27e094ea9ea122ec0658c673a3249eb6e04e 100644 (file)
@@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updated);
 static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
 static void __iomem *at91_rtc_regs;
 static int irq;
+static u32 at91_rtc_imr;
 
 /*
  * Decode time/date into rtc_time structure
@@ -108,9 +109,11 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
        cr = at91_rtc_read(AT91_RTC_CR);
        at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
 
+       at91_rtc_imr |= AT91_RTC_ACKUPD;
        at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
        wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
        at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+       at91_rtc_imr &= ~AT91_RTC_ACKUPD;
 
        at91_rtc_write(AT91_RTC_TIMR,
                          bin2bcd(tm->tm_sec) << 0
@@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
        tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
        tm->tm_year = at91_alarm_year - 1900;
 
-       alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+       alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM)
                        ? 1 : 0;
 
        dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        tm.tm_sec = alrm->time.tm_sec;
 
        at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+       at91_rtc_imr &= ~AT91_RTC_ALARM;
        at91_rtc_write(AT91_RTC_TIMALR,
                  bin2bcd(tm.tm_sec) << 0
                | bin2bcd(tm.tm_min) << 8
@@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
 
        if (alrm->enabled) {
                at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+               at91_rtc_imr |= AT91_RTC_ALARM;
                at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
        }
 
@@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 
        if (enabled) {
                at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+               at91_rtc_imr |= AT91_RTC_ALARM;
                at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
-       } else
+       } else {
                at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+               at91_rtc_imr &= ~AT91_RTC_ALARM;
+       }
 
        return 0;
 }
@@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
  */
 static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
 {
-       unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
-
        seq_printf(seq, "update_IRQ\t: %s\n",
-                       (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
+                       (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no");
        seq_printf(seq, "periodic_IRQ\t: %s\n",
-                       (imr & AT91_RTC_SECEV) ? "yes" : "no");
+                       (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no");
 
        return 0;
 }
@@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
        unsigned int rtsr;
        unsigned long events = 0;
 
-       rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
+       rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr;
        if (rtsr) {             /* this interrupt is shared!  Is it ours? */
                if (rtsr & AT91_RTC_ALARM)
                        events |= (RTC_AF | RTC_IRQF);
@@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
        at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
                                        AT91_RTC_SECEV | AT91_RTC_TIMEV |
                                        AT91_RTC_CALEV);
+       at91_rtc_imr = 0;
 
        ret = request_irq(irq, at91_rtc_interrupt,
                                IRQF_SHARED,
@@ -329,6 +336,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
        at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
                                        AT91_RTC_SECEV | AT91_RTC_TIMEV |
                                        AT91_RTC_CALEV);
+       at91_rtc_imr = 0;
        free_irq(irq, pdev);
 
        rtc_device_unregister(rtc);
@@ -341,31 +349,35 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
 
 /* AT91RM9200 RTC Power management control */
 
-static u32 at91_rtc_imr;
+static u32 at91_rtc_bkpimr;
+
 
 static int at91_rtc_suspend(struct device *dev)
 {
        /* this IRQ is shared with DBGU and other hardware which isn't
         * necessarily doing PM like we are...
         */
-       at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
-                       & (AT91_RTC_ALARM|AT91_RTC_SECEV);
-       if (at91_rtc_imr) {
-               if (device_may_wakeup(dev))
+       at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV);
+       if (at91_rtc_bkpimr) {
+               if (device_may_wakeup(dev)) {
                        enable_irq_wake(irq);
-               else
-                       at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
-       }
+               } else {
+                       at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr);
+                       at91_rtc_imr &= ~at91_rtc_bkpimr;
+               }
+}
        return 0;
 }
 
 static int at91_rtc_resume(struct device *dev)
 {
-       if (at91_rtc_imr) {
-               if (device_may_wakeup(dev))
+       if (at91_rtc_bkpimr) {
+               if (device_may_wakeup(dev)) {
                        disable_irq_wake(irq);
-               else
-                       at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
+               } else {
+                       at91_rtc_imr |= at91_rtc_bkpimr;
+                       at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
+               }
        }
        return 0;
 }
index da1945e5f71449a99dd6ed5baa6a4aec23af7a64..5f940b6844cbfb524de17c1e58cf755f18775bee 100644 (file)
@@ -64,7 +64,6 @@
 #define        AT91_RTC_SCCR           0x1c                    /* Status Clear Command Register */
 #define        AT91_RTC_IER            0x20                    /* Interrupt Enable Register */
 #define        AT91_RTC_IDR            0x24                    /* Interrupt Disable Register */
-#define        AT91_RTC_IMR            0x28                    /* Interrupt Mask Register */
 
 #define        AT91_RTC_VER            0x2c                    /* Valid Entry Register */
 #define                AT91_RTC_NVTIM          (1 <<  0)               /* Non valid Time */
index 0dde688ca09b4785bf6968393f1b1a7579233b8f..969abbad7fe346d0f815c4da6f566b01aeedea45 100644 (file)
@@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platform_device *pdev)
 
        rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
        platform_set_drvdata(pdev, rtc);
-       rtc->irq = platform_get_irq_byname(pdev, "ALM");
-       ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
-                               da9052_rtc_irq,
-                               IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                               "ALM", rtc);
+       rtc->irq =  DA9052_IRQ_ALARM;
+       ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM",
+                               da9052_rtc_irq, rtc);
        if (ret != 0) {
                rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
                return ret;
index 9978ad4433cb460ea96dcc5f7a4bd0d7e8c5bdd6..5ac9c935c151511a1a9b42cf76dc5efd87c4f71f 100644 (file)
@@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = {
        .release = scm_release,
 };
 
+static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
+{
+       return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
+}
+
 static void scm_request_prepare(struct scm_request *scmrq)
 {
        struct scm_blk_dev *bdev = scmrq->bdev;
@@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq)
 
        scm_release_cluster(scmrq);
        blk_requeue_request(bdev->rq, scmrq->request);
+       atomic_dec(&bdev->queued_reqs);
        scm_request_done(scmrq);
        scm_ensure_queue_restart(bdev);
 }
 
 void scm_request_finish(struct scm_request *scmrq)
 {
+       struct scm_blk_dev *bdev = scmrq->bdev;
+
        scm_release_cluster(scmrq);
        blk_end_request_all(scmrq->request, scmrq->error);
+       atomic_dec(&bdev->queued_reqs);
        scm_request_done(scmrq);
 }
 
@@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq)
                if (req->cmd_type != REQ_TYPE_FS)
                        continue;
 
+               if (!scm_permit_request(bdev, req)) {
+                       scm_ensure_queue_restart(bdev);
+                       return;
+               }
                scmrq = scm_request_fetch();
                if (!scmrq) {
                        SCM_LOG(5, "no request");
@@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq)
                        return;
                }
                if (scm_need_cluster_request(scmrq)) {
+                       atomic_inc(&bdev->queued_reqs);
                        blk_start_request(req);
                        scm_initiate_cluster_request(scmrq);
                        return;
                }
                scm_request_prepare(scmrq);
+               atomic_inc(&bdev->queued_reqs);
                blk_start_request(req);
 
                ret = scm_start_aob(scmrq->aob);
@@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq)
                        scm_request_requeue(scmrq);
                        return;
                }
-               atomic_inc(&bdev->queued_reqs);
        }
 }
 
@@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
        tasklet_hi_schedule(&bdev->tasklet);
 }
 
+static void scm_blk_handle_error(struct scm_request *scmrq)
+{
+       struct scm_blk_dev *bdev = scmrq->bdev;
+       unsigned long flags;
+
+       if (scmrq->error != -EIO)
+               goto restart;
+
+       /* For -EIO the response block is valid. */
+       switch (scmrq->aob->response.eqc) {
+       case EQC_WR_PROHIBIT:
+               spin_lock_irqsave(&bdev->lock, flags);
+               if (bdev->state != SCM_WR_PROHIBIT)
+                       pr_info("%lu: Write access to the SCM increment is suspended\n",
+                               (unsigned long) bdev->scmdev->address);
+               bdev->state = SCM_WR_PROHIBIT;
+               spin_unlock_irqrestore(&bdev->lock, flags);
+               goto requeue;
+       default:
+               break;
+       }
+
+restart:
+       if (!scm_start_aob(scmrq->aob))
+               return;
+
+requeue:
+       spin_lock_irqsave(&bdev->rq_lock, flags);
+       scm_request_requeue(scmrq);
+       spin_unlock_irqrestore(&bdev->rq_lock, flags);
+}
+
 static void scm_blk_tasklet(struct scm_blk_dev *bdev)
 {
        struct scm_request *scmrq;
@@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
                spin_unlock_irqrestore(&bdev->lock, flags);
 
                if (scmrq->error && scmrq->retries-- > 0) {
-                       if (scm_start_aob(scmrq->aob)) {
-                               spin_lock_irqsave(&bdev->rq_lock, flags);
-                               scm_request_requeue(scmrq);
-                               spin_unlock_irqrestore(&bdev->rq_lock, flags);
-                       }
+                       scm_blk_handle_error(scmrq);
+
                        /* Request restarted or requeued, handle next. */
                        spin_lock_irqsave(&bdev->lock, flags);
                        continue;
@@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev)
                }
 
                scm_request_finish(scmrq);
-               atomic_dec(&bdev->queued_reqs);
                spin_lock_irqsave(&bdev->lock, flags);
        }
        spin_unlock_irqrestore(&bdev->lock, flags);
@@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
        }
 
        bdev->scmdev = scmdev;
+       bdev->state = SCM_OPER;
        spin_lock_init(&bdev->rq_lock);
        spin_lock_init(&bdev->lock);
        INIT_LIST_HEAD(&bdev->finished_requests);
@@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
        put_disk(bdev->gendisk);
 }
 
+void scm_blk_set_available(struct scm_blk_dev *bdev)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&bdev->lock, flags);
+       if (bdev->state == SCM_WR_PROHIBIT)
+               pr_info("%lu: Write access to the SCM increment is restored\n",
+                       (unsigned long) bdev->scmdev->address);
+       bdev->state = SCM_OPER;
+       spin_unlock_irqrestore(&bdev->lock, flags);
+}
+
 static int __init scm_blk_init(void)
 {
        int ret = -EINVAL;
index 3c1ccf4946478dd5817b9a3c0435d89d5f164c06..8b387b32fd622b981a19e951eca1b3fd30c783bf 100644 (file)
@@ -21,6 +21,7 @@ struct scm_blk_dev {
        spinlock_t rq_lock;     /* guard the request queue */
        spinlock_t lock;        /* guard the rest of the blockdev */
        atomic_t queued_reqs;
+       enum {SCM_OPER, SCM_WR_PROHIBIT} state;
        struct list_head finished_requests;
 #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
        struct list_head cluster_list;
@@ -48,6 +49,7 @@ struct scm_request {
 
 int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
 void scm_blk_dev_cleanup(struct scm_blk_dev *);
+void scm_blk_set_available(struct scm_blk_dev *);
 void scm_blk_irq(struct scm_device *, void *, int);
 
 void scm_request_finish(struct scm_request *);
index 9fa0a908607b013ce3ef0981a323d3b4e9043bde..5f6180d6ff08f50516215c20553f0c1ce5f4da42 100644 (file)
 #include <asm/eadm.h>
 #include "scm_blk.h"
 
-static void notify(struct scm_device *scmdev)
+static void scm_notify(struct scm_device *scmdev, enum scm_event event)
 {
-       pr_info("%lu: The capabilities of the SCM increment changed\n",
-               (unsigned long) scmdev->address);
-       SCM_LOG(2, "State changed");
-       SCM_LOG_STATE(2, scmdev);
+       struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
+
+       switch (event) {
+       case SCM_CHANGE:
+               pr_info("%lu: The capabilities of the SCM increment changed\n",
+                       (unsigned long) scmdev->address);
+               SCM_LOG(2, "State changed");
+               SCM_LOG_STATE(2, scmdev);
+               break;
+       case SCM_AVAIL:
+               SCM_LOG(2, "Increment available");
+               SCM_LOG_STATE(2, scmdev);
+               scm_blk_set_available(bdev);
+               break;
+       }
 }
 
 static int scm_probe(struct scm_device *scmdev)
@@ -64,7 +75,7 @@ static struct scm_driver scm_drv = {
                .name = "scm_block",
                .owner = THIS_MODULE,
        },
-       .notify = notify,
+       .notify = scm_notify,
        .probe = scm_probe,
        .remove = scm_remove,
        .handler = scm_blk_irq,
index 30a2255389e55303934d2e454fbd01fcfe947b2e..cd798386b622173172906cc021306d9cc64941bf 100644 (file)
@@ -627,6 +627,8 @@ static int __init sclp_detect_standby_memory(void)
        struct read_storage_sccb *sccb;
        int i, id, assigned, rc;
 
+       if (OLDMEM_BASE) /* No standby memory in kdump mode */
+               return 0;
        if (!early_read_info_sccb_valid)
                return 0;
        if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
index 31ceef1beb8b06715090b9459f879997067896dd..e16c553f65561f453c4eed2a97eb4cda8a539231 100644 (file)
@@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
                              " failed (rc=%d).\n", ret);
 }
 
+static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
+{
+       int ret;
+
+       CIO_CRW_EVENT(4, "chsc: scm available information\n");
+       if (sei_area->rs != 7)
+               return;
+
+       ret = scm_process_availability_information();
+       if (ret)
+               CIO_CRW_EVENT(0, "chsc: process availability information"
+                             " failed (rc=%d).\n", ret);
+}
+
 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
 {
        switch (sei_area->cc) {
@@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
        case 12: /* scm change notification */
                chsc_process_sei_scm_change(sei_area);
                break;
+       case 14: /* scm available notification */
+               chsc_process_sei_scm_avail(sei_area);
+               break;
        default: /* other stuff */
                CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
                              sei_area->cc);
index 227e05f674b38a6151a3c3b99534c6700835640f..349d5fc471963a20d7f1dd4fec393ef44877d6a6 100644 (file)
@@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
 
 #ifdef CONFIG_SCM_BUS
 int scm_update_information(void);
+int scm_process_availability_information(void);
 #else /* CONFIG_SCM_BUS */
 static inline int scm_update_information(void) { return 0; }
+static inline int scm_process_availability_information(void) { return 0; }
 #endif /* CONFIG_SCM_BUS */
 
 
index bcf20f3aa51b67fd148ae807e0cbc070e09e8b22..46ec25632e8bd4ef8e0be26a3f515697abd7f760 100644 (file)
@@ -211,7 +211,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
                goto out;
        scmdrv = to_scm_drv(scmdev->dev.driver);
        if (changed && scmdrv->notify)
-               scmdrv->notify(scmdev);
+               scmdrv->notify(scmdev, SCM_CHANGE);
 out:
        device_unlock(&scmdev->dev);
        if (changed)
@@ -297,6 +297,22 @@ int scm_update_information(void)
        return ret;
 }
 
+static int scm_dev_avail(struct device *dev, void *unused)
+{
+       struct scm_driver *scmdrv = to_scm_drv(dev->driver);
+       struct scm_device *scmdev = to_scm_dev(dev);
+
+       if (dev->driver && scmdrv->notify)
+               scmdrv->notify(scmdev, SCM_AVAIL);
+
+       return 0;
+}
+
+int scm_process_availability_information(void)
+{
+       return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
+}
+
 static int __init scm_init(void)
 {
        int ret;
index d87961d4c0defe7aced66b6430b34cc0c032d886..8c0622399fcd67bd7353b6274a3d370b3d9caaa4 100644 (file)
@@ -916,6 +916,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
        void *reply_param);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
 int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
+int qeth_get_elements_for_frags(struct sk_buff *);
 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
                        struct sk_buff *, struct qeth_hdr *, int, int, int);
 int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
index 0d8cdff818139e8ee267f57df08c78494c1592d0..0d73a999983d90ff632c0dcdcf6d54bedbd5a289 100644 (file)
@@ -3679,6 +3679,25 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
 }
 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
 
+int qeth_get_elements_for_frags(struct sk_buff *skb)
+{
+       int cnt, length, e, elements = 0;
+       struct skb_frag_struct *frag;
+       char *data;
+
+       for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
+               frag = &skb_shinfo(skb)->frags[cnt];
+               data = (char *)page_to_phys(skb_frag_page(frag)) +
+                       frag->page_offset;
+               length = frag->size;
+               e = PFN_UP((unsigned long)data + length - 1) -
+                       PFN_DOWN((unsigned long)data);
+               elements += e;
+       }
+       return elements;
+}
+EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
+
 int qeth_get_elements_no(struct qeth_card *card, void *hdr,
                     struct sk_buff *skb, int elems)
 {
@@ -3686,7 +3705,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
        int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
                PFN_DOWN((unsigned long)skb->data);
 
-       elements_needed += skb_shinfo(skb)->nr_frags;
+       elements_needed += qeth_get_elements_for_frags(skb);
+
        if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
                QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
                        "(Number=%d / Length=%d). Discarded.\n",
@@ -3771,12 +3791,23 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
 
        for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
                frag = &skb_shinfo(skb)->frags[cnt];
-               buffer->element[element].addr = (char *)
-                       page_to_phys(skb_frag_page(frag))
-                       + frag->page_offset;
-               buffer->element[element].length = frag->size;
-               buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
-               element++;
+               data = (char *)page_to_phys(skb_frag_page(frag)) +
+                       frag->page_offset;
+               length = frag->size;
+               while (length > 0) {
+                       length_here = PAGE_SIZE -
+                               ((unsigned long) data % PAGE_SIZE);
+                       if (length < length_here)
+                               length_here = length;
+
+                       buffer->element[element].addr = data;
+                       buffer->element[element].length = length_here;
+                       buffer->element[element].eflags =
+                               SBAL_EFLAGS_MIDDLE_FRAG;
+                       length -= length_here;
+                       data += length_here;
+                       element++;
+               }
        }
 
        if (buffer->element[element - 1].eflags)
index 091ca0efa1c539e0ca27f57f55a00cb543bfc304..8710337dab3eceb4e5fa692d6a39accd697ba508 100644 (file)
@@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
        return rc;
 }
 
-static void qeth_l3_correct_routing_type(struct qeth_card *card,
+static int qeth_l3_correct_routing_type(struct qeth_card *card,
                enum qeth_routing_types *type, enum qeth_prot_versions prot)
 {
        if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card,
                case PRIMARY_CONNECTOR:
                case SECONDARY_CONNECTOR:
                case MULTICAST_ROUTER:
-                       return;
+                       return 0;
                default:
                        goto out_inval;
                }
@@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card,
                case NO_ROUTER:
                case PRIMARY_ROUTER:
                case SECONDARY_ROUTER:
-                       return;
+                       return 0;
                case MULTICAST_ROUTER:
                        if (qeth_is_ipafunc_supported(card, prot,
                                                      IPA_OSA_MC_ROUTER))
-                               return;
+                               return 0;
                default:
                        goto out_inval;
                }
        }
 out_inval:
        *type = NO_ROUTER;
+       return -EINVAL;
 }
 
 int qeth_l3_setrouting_v4(struct qeth_card *card)
@@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 3, "setrtg4");
 
-       qeth_l3_correct_routing_type(card, &card->options.route4.type,
+       rc = qeth_l3_correct_routing_type(card, &card->options.route4.type,
                                  QETH_PROT_IPV4);
+       if (rc)
+               return rc;
 
        rc = qeth_l3_send_setrouting(card, card->options.route4.type,
                                  QETH_PROT_IPV4);
@@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
 
        if (!qeth_is_supported(card, IPA_IPV6))
                return 0;
-       qeth_l3_correct_routing_type(card, &card->options.route6.type,
+       rc = qeth_l3_correct_routing_type(card, &card->options.route6.type,
                                  QETH_PROT_IPV6);
+       if (rc)
+               return rc;
 
        rc = qeth_l3_send_setrouting(card, card->options.route6.type,
                                  QETH_PROT_IPV6);
@@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb)
                tcp_hdr(skb)->doff * 4;
        int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
        int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
-       elements += skb_shinfo(skb)->nr_frags;
+
+       elements += qeth_get_elements_for_frags(skb);
+
        return elements;
 }
 
@@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                rc = -ENODEV;
                goto out_remove;
        }
-       qeth_trace_features(card);
 
        if (!card->dev && qeth_l3_setup_netdev(card)) {
                rc = -ENODEV;
@@ -3425,6 +3431,7 @@ contin:
                qeth_l3_set_multicast_list(card->dev);
                rtnl_unlock();
        }
+       qeth_trace_features(card);
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
        mutex_unlock(&card->conf_mutex);
index ebc3794862675e591b45395f29cc0f6cf4c5e801..e70af2406ff9da388828660bb0bed2b73bf5cb71 100644 (file)
@@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
                        rc = qeth_l3_setrouting_v6(card);
        }
 out:
+       if (rc)
+               route->type = old_route_type;
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
 }
index 81a1fe661579d365ff82dfac8c0de06b53045110..71a73ec5af8d90811faebf7d087c705231a0a316 100644 (file)
@@ -1483,7 +1483,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
        case TRIG_NONE:
                /*  continous acquisition */
                devpriv->ai_continous = 1;
-               devpriv->ai_sample_count = 0;
+               devpriv->ai_sample_count = 1;
                break;
        }
 
index 73582705e8c5bc71081494a7e4481b7eb36d42e8..5c3714530961c5ede430f7745706d24c833781ce 100644 (file)
@@ -15,7 +15,7 @@ config RAMSTER
        depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y
        depends on NET
        # must ensure struct page is 8-byte aligned
-       select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
+       select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
        default n
        help
          RAMster allows RAM on other machines in a cluster to be utilized
index db0cf7c8adde04db2b272660a6047297fa848a78..a0fc7b9eea652c570419052deff33fec88210209 100644 (file)
@@ -166,6 +166,7 @@ static int chap_server_compute_md5(
 {
        char *endptr;
        unsigned long id;
+       unsigned char id_as_uchar;
        unsigned char digest[MD5_SIGNATURE_SIZE];
        unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
        unsigned char identifier[10], *challenge = NULL;
@@ -355,7 +356,9 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       sg_init_one(&sg, &id, 1);
+       /* To handle both endiannesses */
+       id_as_uchar = id;
+       sg_init_one(&sg, &id_as_uchar, 1);
        ret = crypto_hash_update(&desc, &sg, 1);
        if (ret < 0) {
                pr_err("crypto_hash_update() failed for id\n");
index bc02b018ae46b0d0340db390adbf518990e77174..37ffc5bd23992a5f1e1124b2c6eba9889e7119ff 100644 (file)
@@ -7,7 +7,7 @@
 #define FD_DEVICE_QUEUE_DEPTH  32
 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
 #define FD_BLOCKSIZE           512
-#define FD_MAX_SECTORS         1024
+#define FD_MAX_SECTORS         2048
 
 #define RRF_EMULATE_CDB                0x01
 #define RRF_GOT_LBA            0x02
index 82e78d72fdb61f1849f1204e6d02d0970fdb7b41..e992b27aa0904b75320991b0adbb65b3d84037b6 100644 (file)
@@ -883,7 +883,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
                        page, len, off);
 
-               while (len > 0 && data_len > 0) {
+               /*
+                * We only have one page of data in each sg element,
+                * we can not cross a page boundary.
+                */
+               if (off + len > PAGE_SIZE)
+                       goto fail;
+
+               if (len > 0 && data_len > 0) {
                        bytes = min_t(unsigned int, len, PAGE_SIZE - off);
                        bytes = min(bytes, data_len);
 
@@ -940,9 +947,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                bio = NULL;
                        }
 
-                       len -= bytes;
                        data_len -= bytes;
-                       off = 0;
                }
        }
 
index 290230de2c53c72c0c192930c7b2dff63a3b24a9..60d4b5185f32979739c01e04c5aa94de6fcd838f 100644 (file)
@@ -464,8 +464,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                break;
        case SYNCHRONIZE_CACHE:
        case SYNCHRONIZE_CACHE_16:
-               if (!ops->execute_sync_cache)
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (!ops->execute_sync_cache) {
+                       size = 0;
+                       cmd->execute_cmd = sbc_emulate_noop;
+                       break;
+               }
 
                /*
                 * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
index 9169d6a5d7e4bff83f2aec0ecfc84c8667f9bab6..aac9d2727e3c8584a20db563ef5c914bc758de19 100644 (file)
@@ -711,7 +711,8 @@ int core_tpg_register(
 
        if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
                if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
-                       kfree(se_tpg);
+                       array_free(se_tpg->tpg_lun_list,
+                                  TRANSPORT_MAX_LUNS_PER_TPG);
                        return -ENOMEM;
                }
        }
index 2030b608136d3cfb7dc4509105ff061a3a621e14..3243ea790eaba9ee0db1b4152ac4e3450148ecd3 100644 (file)
@@ -1139,8 +1139,10 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
                return ret;
 
        ret = target_check_reservation(cmd);
-       if (ret)
+       if (ret) {
+               cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
                return ret;
+       }
 
        ret = dev->transport->parse_cdb(cmd);
        if (ret)
index 7b0bfa0e7a9c231a450012d104e6de093ca2c001..3078c403b42d84029968b23ff20b39ebd4af295d 100644 (file)
@@ -143,22 +143,18 @@ static int dove_thermal_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
-       if (!priv->sensor) {
-               dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
-               return -EADDRNOTAVAIL;
-       }
+       priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->sensor))
+               return PTR_ERR(priv->sensor);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (!res) {
                dev_err(&pdev->dev, "Failed to get platform resource\n");
                return -ENODEV;
        }
-       priv->control = devm_request_and_ioremap(&pdev->dev, res);
-       if (!priv->control) {
-               dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
-               return -EADDRNOTAVAIL;
-       }
+       priv->control = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->control))
+               return PTR_ERR(priv->control);
 
        ret = dove_init_sensor(priv);
        if (ret) {
index e04ebd8671aca27dc390c32d6ec00d6033c7d7ae..46568c078dee8d2fe4c195cc4659e68e6653f36d 100644 (file)
@@ -476,7 +476,7 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
 
        if (IS_ERR(th_zone->therm_dev)) {
                pr_err("Failed to register thermal zone device\n");
-               ret = -EINVAL;
+               ret = PTR_ERR(th_zone->therm_dev);
                goto err_unregister;
        }
        th_zone->mode = THERMAL_DEVICE_ENABLED;
index 65cb4f09e8f6ffb19123c6aaa2a7a55f3442323b..e5500edb528568a009597b9336fa3d951df852ac 100644 (file)
@@ -85,11 +85,9 @@ static int kirkwood_thermal_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       priv->sensor = devm_request_and_ioremap(&pdev->dev, res);
-       if (!priv->sensor) {
-               dev_err(&pdev->dev, "Failed to request_ioremap memory\n");
-               return -EADDRNOTAVAIL;
-       }
+       priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->sensor))
+               return PTR_ERR(priv->sensor);
 
        thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
                                               priv, &ops, NULL, 0, 0);
index 28f0919940137dcaf57fce04df69ddcdecfc173f..2cc5b6115e3e200d13955f8103b97a2ddf3c0514 100644 (file)
@@ -145,6 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
        struct device *dev = rcar_priv_to_dev(priv);
        int i;
        int ctemp, old, new;
+       int ret = -EINVAL;
 
        mutex_lock(&priv->lock);
 
@@ -174,7 +175,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
 
        if (!ctemp) {
                dev_err(dev, "thermal sensor was broken\n");
-               return -EINVAL;
+               goto err_out_unlock;
        }
 
        /*
@@ -192,10 +193,10 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
        dev_dbg(dev, "thermal%d  %d -> %d\n", priv->id, priv->ctemp, ctemp);
 
        priv->ctemp = ctemp;
-
+       ret = 0;
+err_out_unlock:
        mutex_unlock(&priv->lock);
-
-       return 0;
+       return ret;
 }
 
 static int rcar_thermal_get_temp(struct thermal_zone_device *zone,
@@ -363,6 +364,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        struct resource *res, *irq;
        int mres = 0;
        int i;
+       int ret = -ENODEV;
        int idle = IDLE_INTERVAL;
 
        common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
@@ -399,11 +401,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                /*
                 * rcar_has_irq_support() will be enabled
                 */
-               common->base = devm_request_and_ioremap(dev, res);
-               if (!common->base) {
-                       dev_err(dev, "Unable to ioremap thermal register\n");
-                       return -ENOMEM;
-               }
+               common->base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(common->base))
+                       return PTR_ERR(common->base);
 
                /* enable temperature comparation */
                rcar_thermal_common_write(common, ENR, 0x00030303);
@@ -422,11 +422,9 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                        return -ENOMEM;
                }
 
-               priv->base = devm_request_and_ioremap(dev, res);
-               if (!priv->base) {
-                       dev_err(dev, "Unable to ioremap priv register\n");
-                       return -ENOMEM;
-               }
+               priv->base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(priv->base))
+                       return PTR_ERR(priv->base);
 
                priv->common = common;
                priv->id = i;
@@ -441,6 +439,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                                                idle);
                if (IS_ERR(priv->zone)) {
                        dev_err(dev, "can't register thermal zone\n");
+                       ret = PTR_ERR(priv->zone);
                        goto error_unregister;
                }
 
@@ -460,7 +459,7 @@ error_unregister:
        rcar_thermal_for_each_priv(priv, common)
                thermal_zone_device_unregister(priv->zone);
 
-       return -ENODEV;
+       return ret;
 }
 
 static int rcar_thermal_remove(struct platform_device *pdev)
similarity index 99%
rename from drivers/tty/serial/8250/8250.c
rename to drivers/tty/serial/8250/8250_core.c
index cf6a5383748aa6cf468a76e72e1c575497031eed..35f9c96aada9c9f6d31238accab3ddd7887ed2fb 100644 (file)
@@ -3418,6 +3418,7 @@ MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
 #endif
 MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
 
+#ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS
 #ifndef MODULE
 /* This module was renamed to 8250_core in 3.7.  Keep the old "8250" name
  * working as well for the module options so we don't break people.  We
@@ -3432,7 +3433,7 @@ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR);
 static void __used s8250_options(void)
 {
 #undef MODULE_PARAM_PREFIX
-#define MODULE_PARAM_PREFIX "8250."
+#define MODULE_PARAM_PREFIX "8250_core."
 
        module_param_cb(share_irqs, &param_ops_uint, &share_irqs, 0644);
        module_param_cb(nr_uarts, &param_ops_uint, &nr_uarts, 0644);
@@ -3444,5 +3445,6 @@ static void __used s8250_options(void)
 #endif
 }
 #else
-MODULE_ALIAS("8250");
+MODULE_ALIAS("8250_core");
+#endif
 #endif
index aa76825229dca32d612781f877b1d1167add71af..26e3a97ab157ed16d23825c1a62bec4d53cd81b9 100644 (file)
@@ -1554,6 +1554,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
 #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
 #define PCI_VENDOR_ID_WCH              0x4348
+#define PCI_DEVICE_ID_WCH_CH352_2S     0x3253
 #define PCI_DEVICE_ID_WCH_CH353_4S     0x3453
 #define PCI_DEVICE_ID_WCH_CH353_2S1PF  0x5046
 #define PCI_DEVICE_ID_WCH_CH353_2S1P   0x7053
@@ -2172,6 +2173,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_wch_ch353_setup,
        },
+       /* WCH CH352 2S card (16550 clone) */
+       {
+               .vendor         = PCI_VENDOR_ID_WCH,
+               .device         = PCI_DEVICE_ID_WCH_CH352_2S,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_wch_ch353_setup,
+       },
        /*
         * ASIX devices with FIFO bug
         */
@@ -4870,6 +4879,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
+       {       PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0, pbn_b0_bt_2_115200 },
+
        /*
         * Commtech, Inc. Fastcom adapters
         */
index 2ef9537bcb2cda6a985d27f9dda8c3385187fe3a..80fe91e64a527da06e82d9cf237be64a26faf662 100644 (file)
@@ -33,6 +33,23 @@ config SERIAL_8250
          Most people will say Y or M here, so that they can use serial mice,
          modems and similar devices connecting to the standard serial ports.
 
+config SERIAL_8250_DEPRECATED_OPTIONS
+       bool "Support 8250_core.* kernel options (DEPRECATED)"
+       depends on SERIAL_8250
+       default y
+       ---help---
+         In 3.7 we renamed 8250 to 8250_core by mistake, so now we have to
+         accept kernel parameters in both forms like 8250_core.nr_uarts=4 and
+         8250.nr_uarts=4. We now renamed the module back to 8250, but if
+         anybody noticed in 3.7 and changed their userspace we still have to
+         keep the 8350_core.* options around until they revert the changes
+         they already did.
+
+         If 8250 is built as a module, this adds 8250_core alias instead. 
+
+         If you did not notice yet and/or you have userspace from pre-3.7, it
+         is safe (and recommended) to say N here.
+
 config SERIAL_8250_PNP
        bool "8250/16550 PNP device support" if EXPERT
        depends on SERIAL_8250 && PNP
index a23838a4d5353773c1ec8d15adbecec3ceee123c..36d68d0543072c21f48807d3b932ed426e843685 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the 8250 serial device drivers.
 #
 
-obj-$(CONFIG_SERIAL_8250)              += 8250_core.o
-8250_core-y                            := 8250.o
-8250_core-$(CONFIG_SERIAL_8250_PNP)    += 8250_pnp.o
-8250_core-$(CONFIG_SERIAL_8250_DMA)    += 8250_dma.o
+obj-$(CONFIG_SERIAL_8250)              += 8250.o
+8250-y                                 := 8250_core.o
+8250-$(CONFIG_SERIAL_8250_PNP)         += 8250_pnp.o
+8250-$(CONFIG_SERIAL_8250_DMA)         += 8250_dma.o
 obj-$(CONFIG_SERIAL_8250_GSC)          += 8250_gsc.o
 obj-$(CONFIG_SERIAL_8250_PCI)          += 8250_pci.o
 obj-$(CONFIG_SERIAL_8250_HP300)                += 8250_hp300.o
index d4a7c241b751279d3422f6d024b45cbb1a04a2de..3467462869ce2dce88ca8832002d7f294d5eaae6 100644 (file)
@@ -158,7 +158,7 @@ struct atmel_uart_port {
 };
 
 static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
-static unsigned long atmel_ports_in_use;
+static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
 
 #ifdef SUPPORT_SYSRQ
 static struct console atmel_console;
@@ -1769,15 +1769,14 @@ static int atmel_serial_probe(struct platform_device *pdev)
        if (ret < 0)
                /* port id not found in platform data nor device-tree aliases:
                 * auto-enumerate it */
-               ret = find_first_zero_bit(&atmel_ports_in_use,
-                               sizeof(atmel_ports_in_use));
+               ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
 
-       if (ret > ATMEL_MAX_UART) {
+       if (ret >= ATMEL_MAX_UART) {
                ret = -ENODEV;
                goto err;
        }
 
-       if (test_and_set_bit(ret, &atmel_ports_in_use)) {
+       if (test_and_set_bit(ret, atmel_ports_in_use)) {
                /* port already in use */
                ret = -EBUSY;
                goto err;
@@ -1857,7 +1856,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
 
        /* "port" is allocated statically, so we shouldn't free it */
 
-       clear_bit(port->line, &atmel_ports_in_use);
+       clear_bit(port->line, atmel_ports_in_use);
 
        clk_put(atmel_port->clk);
 
index e343d6670854e3b0fa097a0e66135daa065ce2b9..451687cb96850cd55c1fdd15f4832e591eae8f5d 100644 (file)
@@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = {
 #define UART_NR        4
 
 static struct uart_sunsu_port sunsu_ports[UART_NR];
+static int nr_inst; /* Number of already registered ports */
 
 #ifdef CONFIG_SERIO
 
@@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options)
        printk("Console: ttyS%d (SU)\n",
               (sunsu_reg.minor - 64) + co->index);
 
-       /*
-        * Check whether an invalid uart number has been specified, and
-        * if so, search for the first available port that does have
-        * console support.
-        */
-       if (co->index >= UART_NR)
-               co->index = 0;
+       if (co->index > nr_inst)
+               return -ENODEV;
        port = &sunsu_ports[co->index].port;
 
        /*
@@ -1408,7 +1404,6 @@ static enum su_type su_get_type(struct device_node *dp)
 
 static int su_probe(struct platform_device *op)
 {
-       static int inst;
        struct device_node *dp = op->dev.of_node;
        struct uart_sunsu_port *up;
        struct resource *rp;
@@ -1418,16 +1413,16 @@ static int su_probe(struct platform_device *op)
 
        type = su_get_type(dp);
        if (type == SU_PORT_PORT) {
-               if (inst >= UART_NR)
+               if (nr_inst >= UART_NR)
                        return -EINVAL;
-               up = &sunsu_ports[inst];
+               up = &sunsu_ports[nr_inst];
        } else {
                up = kzalloc(sizeof(*up), GFP_KERNEL);
                if (!up)
                        return -ENOMEM;
        }
 
-       up->port.line = inst;
+       up->port.line = nr_inst;
 
        spin_lock_init(&up->port.lock);
 
@@ -1461,6 +1456,8 @@ static int su_probe(struct platform_device *op)
                }
                dev_set_drvdata(&op->dev, up);
 
+               nr_inst++;
+
                return 0;
        }
 
@@ -1488,7 +1485,7 @@ static int su_probe(struct platform_device *op)
 
        dev_set_drvdata(&op->dev, up);
 
-       inst++;
+       nr_inst++;
 
        return 0;
 
index ba451c7209fc6b9bbaaf47b9e2d2458f202feaf8..f36bbba1ac8b9070cc7cefafb8a33265a7872253 100644 (file)
@@ -578,6 +578,8 @@ static int xuartps_startup(struct uart_port *port)
        /* Receive Timeout register is enabled with value of 10 */
        xuartps_writel(10, XUARTPS_RXTOUT_OFFSET);
 
+       /* Clear out any pending interrupts before enabling them */
+       xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET);
 
        /* Set the Interrupt Registers with desired interrupts */
        xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY |
index e4ca345873c3279761f8dcf6cee3f06053205088..d7799deacb21a66805d8e4308bcc199b64894fbf 100644 (file)
@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll)
 static struct vcs_poll_data *
 vcs_poll_data_get(struct file *file)
 {
-       struct vcs_poll_data *poll = file->private_data;
+       struct vcs_poll_data *poll = file->private_data, *kill = NULL;
 
        if (poll)
                return poll;
@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file)
                file->private_data = poll;
        } else {
                /* someone else raced ahead of us */
-               vcs_poll_data_free(poll);
+               kill = poll;
                poll = file->private_data;
        }
        spin_unlock(&file->f_lock);
+       if (kill)
+               vcs_poll_data_free(kill);
 
        return poll;
 }
index 8ac25adf31b41d857a6919299f1848cf2ee43710..387dc6c8ad25b0d43a7d954ce71d877618fcb63d 100644 (file)
@@ -593,7 +593,6 @@ static void acm_port_destruct(struct tty_port *port)
 
        dev_dbg(&acm->control->dev, "%s\n", __func__);
 
-       tty_unregister_device(acm_tty_driver, acm->minor);
        acm_release_minor(acm);
        usb_put_intf(acm->control);
        kfree(acm->country_codes);
@@ -977,6 +976,8 @@ static int acm_probe(struct usb_interface *intf,
        int num_rx_buf;
        int i;
        int combined_interfaces = 0;
+       struct device *tty_dev;
+       int rv = -ENOMEM;
 
        /* normal quirks */
        quirks = (unsigned long)id->driver_info;
@@ -1339,11 +1340,24 @@ skip_countries:
        usb_set_intfdata(data_interface, acm);
 
        usb_get_intf(control_interface);
-       tty_port_register_device(&acm->port, acm_tty_driver, minor,
+       tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
                        &control_interface->dev);
+       if (IS_ERR(tty_dev)) {
+               rv = PTR_ERR(tty_dev);
+               goto alloc_fail8;
+       }
 
        return 0;
+alloc_fail8:
+       if (acm->country_codes) {
+               device_remove_file(&acm->control->dev,
+                               &dev_attr_wCountryCodes);
+               device_remove_file(&acm->control->dev,
+                               &dev_attr_iCountryCodeRelDate);
+       }
+       device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
 alloc_fail7:
+       usb_set_intfdata(intf, NULL);
        for (i = 0; i < ACM_NW; i++)
                usb_free_urb(acm->wb[i].urb);
 alloc_fail6:
@@ -1359,7 +1373,7 @@ alloc_fail2:
        acm_release_minor(acm);
        kfree(acm);
 alloc_fail:
-       return -ENOMEM;
+       return rv;
 }
 
 static void stop_data_traffic(struct acm *acm)
@@ -1411,6 +1425,8 @@ static void acm_disconnect(struct usb_interface *intf)
 
        stop_data_traffic(acm);
 
+       tty_unregister_device(acm_tty_driver, acm->minor);
+
        usb_free_urb(acm->ctrlurb);
        for (i = 0; i < ACM_NW; i++)
                usb_free_urb(acm->wb[i].urb);
index 622b4a48e732e7a83b6c760d6563994394b0caa9..2b487d4797bd74057a9ad88148af199360d9de1d 100644 (file)
@@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        struct hc_driver        *driver;
        struct usb_hcd          *hcd;
        int                     retval;
+       int                     hcd_irq = 0;
 
        if (usb_disabled())
                return -ENODEV;
@@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                return -ENODEV;
        dev->current_state = PCI_D0;
 
-       /* The xHCI driver supports MSI and MSI-X,
-        * so don't fail if the BIOS doesn't provide a legacy IRQ.
+       /*
+        * The xHCI driver has its own irq management
+        * make sure irq setup is not touched for xhci in generic hcd code
         */
-       if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
-               dev_err(&dev->dev,
-                       "Found HC with no IRQ.  Check BIOS/PCI %s setup!\n",
-                       pci_name(dev));
-               retval = -ENODEV;
-               goto disable_pci;
+       if ((driver->flags & HCD_MASK) != HCD_USB3) {
+               if (!dev->irq) {
+                       dev_err(&dev->dev,
+                       "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
+                               pci_name(dev));
+                       retval = -ENODEV;
+                       goto disable_pci;
+               }
+               hcd_irq = dev->irq;
        }
 
        hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev));
@@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        pci_set_master(dev);
 
-       retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED);
+       retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED);
        if (retval != 0)
                goto unmap_registers;
        set_hs_companion(dev, hcd);
index 99b34a30354f4a2a3e06deef32addaefb3e46e61..f9ec44cbb82fb1ff6fa8e620367de8a9daec2c70 100644 (file)
@@ -2412,6 +2412,14 @@ int usb_hcd_is_primary_hcd(struct usb_hcd *hcd)
 }
 EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd);
 
+int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1)
+{
+       if (!hcd->driver->find_raw_port_number)
+               return port1;
+
+       return hcd->driver->find_raw_port_number(hcd, port1);
+}
+
 static int usb_hcd_request_irqs(struct usb_hcd *hcd,
                unsigned int irqnum, unsigned long irqflags)
 {
index b6f4bad3f756eefb173097767ffde6970e06220e..255c14464bf2ea7a30dffab6fa1770c7b030c0aa 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/usb/hcd.h>
 #include <acpi/acpi_bus.h>
 
 #include "usb.h"
@@ -188,8 +189,13 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle)
                 * connected to.
                 */
                if (!udev->parent) {
-                       *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
+                       struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+                       int raw_port_num;
+
+                       raw_port_num = usb_hcd_find_raw_port_number(hcd,
                                port_num);
+                       *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev),
+                               raw_port_num);
                        if (!*handle)
                                return -ENODEV;
                } else {
index 5a0c541daf893a33a428bedb8d68ec1fa72d4234..c7525b1cad74a0f92dc1057621dca509f80d671c 100644 (file)
@@ -145,6 +145,7 @@ config USB_LPC32XX
        tristate "LPC32XX USB Peripheral Controller"
        depends on ARCH_LPC32XX
        select USB_ISP1301
+       select USB_OTG_UTILS
        help
           This option selects the USB device controller in the LPC32xx SoC.
 
index 71beeb83355874b601998a5764abddc076515138..cc9c49c57c803a9e787887fd8977e05c585d630e 100644 (file)
@@ -447,14 +447,13 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
 static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
 {
        struct f_rndis                  *rndis = req->context;
-       struct usb_composite_dev        *cdev = rndis->port.func.config->cdev;
        int                             status;
 
        /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
 //     spin_lock(&dev->lock);
        status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
        if (status < 0)
-               ERROR(cdev, "RNDIS command error %d, %d/%d\n",
+               pr_err("RNDIS command error %d, %d/%d\n",
                        status, req->actual, req->length);
 //     spin_unlock(&dev->lock);
 }
index 3953dd4d7186827d0ee65b3e631701a093e7f618..3b343b23e4b0cfee227fc56965565bb0fb768d33 100644 (file)
@@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev)
                goto error;
        gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id;
 
-       for (i = func_num; --i; ) {
+       for (i = func_num; i--; ) {
                ret = functionfs_bind(ffs_tab[i].ffs_data, cdev);
                if (unlikely(ret < 0)) {
                        while (++i < func_num)
@@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
                gether_cleanup();
        gfs_ether_setup = false;
 
-       for (i = func_num; --i; )
+       for (i = func_num; i--; )
                if (ffs_tab[i].ffs_data)
                        functionfs_unbind(ffs_tab[i].ffs_data);
 
index d226058e3b88ab7a3f6d07783886d8f23e8b7365..32524b631959eda7a62e2e2b005c3ba7a2e88f4b 100644 (file)
@@ -59,7 +59,7 @@ static const char * const ep_name[] = {
 };
 
 #define DMA_ADDR_INVALID       (~(dma_addr_t)0)
-#ifdef CONFIG_USB_GADGET_NET2272_DMA
+#ifdef CONFIG_USB_NET2272_DMA
 /*
  * use_dma: the NET2272 can use an external DMA controller.
  * Note that since there is no generic DMA api, some functions,
@@ -1495,6 +1495,13 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
        for (i = 0; i < 4; ++i)
                net2272_dequeue_all(&dev->ep[i]);
 
+       /* report disconnect; the driver is already quiesced */
+       if (driver) {
+               spin_unlock(&dev->lock);
+               driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
+
        net2272_usb_reinit(dev);
 }
 
index a1b650e11339797ca9a17ca41e6de5cd8ab07ca2..3bd0f992fb49aebe5b5c4dc9e45356bb77c01581 100644 (file)
@@ -1924,7 +1924,6 @@ static int net2280_start(struct usb_gadget *_gadget,
 err_func:
        device_remove_file (&dev->pdev->dev, &dev_attr_function);
 err_unbind:
-       driver->unbind (&dev->gadget);
        dev->gadget.dev.driver = NULL;
        dev->driver = NULL;
        return retval;
@@ -1946,6 +1945,13 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
        for (i = 0; i < 7; i++)
                nuke (&dev->ep [i]);
 
+       /* report disconnect; the driver is already quiesced */
+       if (driver) {
+               spin_unlock(&dev->lock);
+               driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
+
        usb_reinit (dev);
 }
 
index c5034d9c946b479b52e7e06cba68729392727d07..b369292d4b90cd0c5c66dd147434f5d8aadbd752 100644 (file)
@@ -136,7 +136,7 @@ static struct portmaster {
        pr_debug(fmt, ##arg)
 #endif /* pr_vdebug */
 #else
-#ifndef pr_vdebig
+#ifndef pr_vdebug
 #define pr_vdebug(fmt, arg...) \
        ({ if (0) pr_debug(fmt, ##arg); })
 #endif /* pr_vdebug */
index 2a9cd369f71cd857cce96f48acb4b600a828c981..f8f62c3ed65e7a468b97f7348aac97ea133bbcf1 100644 (file)
@@ -216,7 +216,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
        usb_gadget_disconnect(udc->gadget);
        udc->driver->disconnect(udc->gadget);
        udc->driver->unbind(udc->gadget);
-       usb_gadget_udc_stop(udc->gadget, udc->driver);
+       usb_gadget_udc_stop(udc->gadget, NULL);
 
        udc->driver = NULL;
        udc->dev.driver = NULL;
index 5726cb144abfdf425870168589e0d609e12d059a..416a6dce5e11c8adeeed2eef487b93c93dd5d728 100644 (file)
@@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
 
 static void end_unlink_async(struct ehci_hcd *ehci);
 static void unlink_empty_async(struct ehci_hcd *ehci);
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
 static void ehci_work(struct ehci_hcd *ehci);
 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
index 4d3b294f203e3acefe469db340d695786b2ffc3f..7d06e77f6c4fa0116ea69ad239753ea4dffe20fd 100644 (file)
@@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
        ehci->rh_state = EHCI_RH_SUSPENDED;
 
        end_unlink_async(ehci);
-       unlink_empty_async(ehci);
+       unlink_empty_async_suspended(ehci);
        ehci_handle_intr_unlinks(ehci);
        end_free_itds(ehci);
 
index 5464665f0b6aa7b86b472b080213715dcacfe5f4..23d13690428591b3c71d6c10bc9b29a4ffd0c973 100644 (file)
@@ -1316,6 +1316,19 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
        }
 }
 
+/* The root hub is suspended; unlink all the async QHs */
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
+{
+       struct ehci_qh          *qh;
+
+       while (ehci->async->qh_next.qh) {
+               qh = ehci->async->qh_next.qh;
+               WARN_ON(!list_empty(&qh->qtd_list));
+               single_unlink_async(ehci, qh);
+       }
+       start_iaa_cycle(ehci, false);
+}
+
 /* makes sure the async qh will become idle */
 /* caller must own ehci->lock */
 
index b476daf49f6f3c6cf226b920f5c8ff01ca5d23aa..010f686d8881fc9796fd3bbee1c1f5c3aa71e878 100644 (file)
@@ -1214,6 +1214,7 @@ itd_urb_transaction (
 
                memset (itd, 0, sizeof *itd);
                itd->itd_dma = itd_dma;
+               itd->frame = 9999;              /* an invalid value */
                list_add (&itd->itd_list, &sched->td_list);
        }
        spin_unlock_irqrestore (&ehci->lock, flags);
@@ -1915,6 +1916,7 @@ sitd_urb_transaction (
 
                memset (sitd, 0, sizeof *sitd);
                sitd->sitd_dma = sitd_dma;
+               sitd->frame = 9999;             /* an invalid value */
                list_add (&sitd->sitd_list, &iso_sched->td_list);
        }
 
index 20dbdcbe9b0fc1fb1330da115bebb3546e45b8f2..c3fa1305f830bf53070a6553446428368553691d 100644 (file)
@@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
         * (a) SMP races against real IAA firing and retriggering, and
         * (b) clean HC shutdown, when IAA watchdog was pending.
         */
-       if (ehci->async_iaa) {
+       if (1) {
                u32 cmd, status;
 
                /* If we get here, IAA is *REALLY* late.  It's barely
index 35616ffbe3ae7435155bf29196d295f35a719b71..6dc238c592bc9aef5e3d8a564240b085e401cf6b 100644 (file)
@@ -1022,44 +1022,24 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
  * is attached to (or the roothub port its ancestor hub is attached to).  All we
  * know is the index of that port under either the USB 2.0 or the USB 3.0
  * roothub, but that doesn't give us the real index into the HW port status
- * registers.  Scan through the xHCI roothub port array, looking for the Nth
- * entry of the correct port speed.  Return the port number of that entry.
+ * registers. Call xhci_find_raw_port_number() to get real index.
  */
 static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
                struct usb_device *udev)
 {
        struct usb_device *top_dev;
-       unsigned int num_similar_speed_ports;
-       unsigned int faked_port_num;
-       int i;
+       struct usb_hcd *hcd;
+
+       if (udev->speed == USB_SPEED_SUPER)
+               hcd = xhci->shared_hcd;
+       else
+               hcd = xhci->main_hcd;
 
        for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
                        top_dev = top_dev->parent)
                /* Found device below root hub */;
-       faked_port_num = top_dev->portnum;
-       for (i = 0, num_similar_speed_ports = 0;
-                       i < HCS_MAX_PORTS(xhci->hcs_params1); i++) {
-               u8 port_speed = xhci->port_array[i];
-
-               /*
-                * Skip ports that don't have known speeds, or have duplicate
-                * Extended Capabilities port speed entries.
-                */
-               if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
-                       continue;
 
-               /*
-                * USB 3.0 ports are always under a USB 3.0 hub.  USB 2.0 and
-                * 1.1 ports are under the USB 2.0 hub.  If the port speed
-                * matches the device speed, it's a similar speed port.
-                */
-               if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER))
-                       num_similar_speed_ports++;
-               if (num_similar_speed_ports == faked_port_num)
-                       /* Roothub ports are numbered from 1 to N */
-                       return i+1;
-       }
-       return 0;
+       return  xhci_find_raw_port_number(hcd, top_dev->portnum);
 }
 
 /* Setup an xHCI virtual device for a Set Address command */
index af259e0ec172aa4a5664ca15acb2adf8d90bf02c..1a30c380043ce258aa660e67edb38210707fffd6 100644 (file)
@@ -313,6 +313,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
        .set_usb2_hw_lpm =      xhci_set_usb2_hardware_lpm,
        .enable_usb3_lpm_timeout =      xhci_enable_usb3_lpm_timeout,
        .disable_usb3_lpm_timeout =     xhci_disable_usb3_lpm_timeout,
+       .find_raw_port_number = xhci_find_raw_port_number,
 };
 
 /*-------------------------------------------------------------------------*/
index 88287546530179a867da6f3a9d88f615d87f9a81..1969c001b3f9a8bacbe926dfc4777c95206101a1 100644 (file)
@@ -1599,14 +1599,20 @@ static void handle_port_status(struct xhci_hcd *xhci,
        max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
        if ((port_id <= 0) || (port_id > max_ports)) {
                xhci_warn(xhci, "Invalid port id %d\n", port_id);
-               bogus_port_status = true;
-               goto cleanup;
+               inc_deq(xhci, xhci->event_ring);
+               return;
        }
 
        /* Figure out which usb_hcd this port is attached to:
         * is it a USB 3.0 port or a USB 2.0/1.1 port?
         */
        major_revision = xhci->port_array[port_id - 1];
+
+       /* Find the right roothub. */
+       hcd = xhci_to_hcd(xhci);
+       if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
+               hcd = xhci->shared_hcd;
+
        if (major_revision == 0) {
                xhci_warn(xhci, "Event for port %u not in "
                                "Extended Capabilities, ignoring.\n",
@@ -1629,10 +1635,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
         * into the index into the ports on the correct split roothub, and the
         * correct bus_state structure.
         */
-       /* Find the right roothub. */
-       hcd = xhci_to_hcd(xhci);
-       if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
-               hcd = xhci->shared_hcd;
        bus_state = &xhci->bus_state[hcd_index(hcd)];
        if (hcd->speed == HCD_USB3)
                port_array = xhci->usb3_ports;
@@ -2027,8 +2029,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                if (event_trb != ep_ring->dequeue &&
                                event_trb != td->last_trb)
                        td->urb->actual_length =
-                               td->urb->transfer_buffer_length
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               td->urb->transfer_buffer_length -
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
                else
                        td->urb->actual_length = 0;
 
@@ -2060,7 +2062,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                /* Maybe the event was for the data stage? */
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
                        xhci_dbg(xhci, "Waiting for status "
                                        "stage event\n");
                        return 0;
@@ -2096,7 +2098,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        /* handle completion code */
        switch (trb_comp_code) {
        case COMP_SUCCESS:
-               if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
+               if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) {
                        frame->status = 0;
                        break;
                }
@@ -2141,7 +2143,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
                }
                len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
-                       TRB_LEN(le32_to_cpu(event->transfer_len));
+                       EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
 
                if (trb_comp_code != COMP_STOP_INVAL) {
                        frame->actual_length = len;
@@ -2199,7 +2201,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
        case COMP_SUCCESS:
                /* Double check that the HW transferred everything. */
                if (event_trb != td->last_trb ||
-                               TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+                   EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        xhci_warn(xhci, "WARN Successful completion "
                                        "on short TX\n");
                        if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
@@ -2227,18 +2229,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                                "%d bytes untransferred\n",
                                td->urb->ep->desc.bEndpointAddress,
                                td->urb->transfer_buffer_length,
-                               TRB_LEN(le32_to_cpu(event->transfer_len)));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
        /* Fast path - was this the last TRB in the TD for this URB? */
        if (event_trb == td->last_trb) {
-               if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
+               if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
                        if (td->urb->transfer_buffer_length <
                                        td->urb->actual_length) {
                                xhci_warn(xhci, "HC gave bad length "
                                                "of %d bytes left\n",
-                                         TRB_LEN(le32_to_cpu(event->transfer_len)));
+                                         EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
                                td->urb->actual_length = 0;
                                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
                                        *status = -EREMOTEIO;
@@ -2280,7 +2282,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                if (trb_comp_code != COMP_STOP_INVAL)
                        td->urb->actual_length +=
                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
-                               TRB_LEN(le32_to_cpu(event->transfer_len));
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
        }
 
        return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2368,7 +2370,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
         * transfer type
         */
        case COMP_SUCCESS:
-               if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
+               if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
                        break;
                if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
                        trb_comp_code = COMP_SHORT_TX;
@@ -2461,14 +2463,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                 * TD list.
                 */
                if (list_empty(&ep_ring->td_list)) {
-                       xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
-                                       "with no TDs queued?\n",
-                                 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
-                                 ep_index);
-                       xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
-                                (le32_to_cpu(event->flags) &
-                                 TRB_TYPE_BITMASK)>>10);
-                       xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+                       /*
+                        * A stopped endpoint may generate an extra completion
+                        * event if the device was suspended.  Don't print
+                        * warnings.
+                        */
+                       if (!(trb_comp_code == COMP_STOP ||
+                                               trb_comp_code == COMP_STOP_INVAL)) {
+                               xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
+                                               TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
+                                               ep_index);
+                               xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
+                                               (le32_to_cpu(event->flags) &
+                                                TRB_TYPE_BITMASK)>>10);
+                               xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
+                       }
                        if (ep->skip) {
                                ep->skip = false;
                                xhci_dbg(xhci, "td_list is empty while skip "
index f1f01a834ba792cee0935c5ffb9373d53fb368bd..53b8f89a0b1c7e6199c95672c85d3d27fb3e0b90 100644 (file)
@@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
         * generate interrupts.  Don't even try to enable MSI.
         */
        if (xhci->quirks & XHCI_BROKEN_MSI)
-               return 0;
+               goto legacy_irq;
 
        /* unregister the legacy interrupt */
        if (hcd->irq)
@@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
                return -EINVAL;
        }
 
+ legacy_irq:
        /* fall back to legacy interrupt*/
        ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
                        hcd->irq_descr, hcd);
@@ -3778,6 +3779,28 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
        return 0;
 }
 
+/*
+ * Transfer the port index into real index in the HW port status
+ * registers. Caculate offset between the port's PORTSC register
+ * and port status base. Divide the number of per port register
+ * to get the real index. The raw port number bases 1.
+ */
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       __le32 __iomem *base_addr = &xhci->op_regs->port_status_base;
+       __le32 __iomem *addr;
+       int raw_port;
+
+       if (hcd->speed != HCD_USB3)
+               addr = xhci->usb2_ports[port1 - 1];
+       else
+               addr = xhci->usb3_ports[port1 - 1];
+
+       raw_port = (addr - base_addr)/NUM_PORT_REGS + 1;
+       return raw_port;
+}
+
 #ifdef CONFIG_USB_SUSPEND
 
 /* BESL to HIRD Encoding array for USB2 LPM */
index f791bd0aee6cda4ad2bf5407ba5ad71b912cc5f5..63582719e0fb26cafb4d19f5248c2b5f9aa8a43a 100644 (file)
@@ -206,8 +206,8 @@ struct xhci_op_regs {
 /* bits 12:31 are reserved (and should be preserved on writes). */
 
 /* IMAN - Interrupt Management Register */
-#define IMAN_IP                (1 << 1)
-#define IMAN_IE                (1 << 0)
+#define IMAN_IE                (1 << 1)
+#define IMAN_IP                (1 << 0)
 
 /* USBSTS - USB status - status bitmasks */
 /* HC not running - set to 1 when run/stop bit is cleared. */
@@ -972,6 +972,10 @@ struct xhci_transfer_event {
        __le32  flags;
 };
 
+/* Transfer event TRB length bit mask */
+/* bits 0:23 */
+#define        EVENT_TRB_LEN(p)                ((p) & 0xffffff)
+
 /** Transfer Event bit fields **/
 #define        TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
 
@@ -1829,6 +1833,7 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
                char *buf, u16 wLength);
 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
+int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
 
 #ifdef CONFIG_PM
 int xhci_bus_suspend(struct usb_hcd *hcd);
index 7c71769d71ff37370e6e8bf23be5c837bd72a677..41613a2b35e8530d8278bd8e4191bafae5617828 100644 (file)
@@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
                u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
                int err;
 
-               err = musb->int_usb & USB_INTR_VBUSERROR;
+               err = musb->int_usb & MUSB_INTR_VBUSERROR;
                if (err) {
                        /*
                         * The Mentor core doesn't debounce VBUS as needed
index be18537c5f1407cca17b31f65965863c0fa47385..83eddedcd9be5895908672b329ebf881d26d8f2f 100644 (file)
@@ -141,7 +141,9 @@ static inline void map_dma_buffer(struct musb_request *request,
 static inline void unmap_dma_buffer(struct musb_request *request,
                                struct musb *musb)
 {
-       if (!is_buffer_mapped(request))
+       struct musb_ep *musb_ep = request->ep;
+
+       if (!is_buffer_mapped(request) || !musb_ep->dma)
                return;
 
        if (request->request.dma == DMA_ADDR_INVALID) {
@@ -195,7 +197,10 @@ __acquires(ep->musb->lock)
 
        ep->busy = 1;
        spin_unlock(&musb->lock);
-       unmap_dma_buffer(req, musb);
+
+       if (!dma_mapping_error(&musb->g.dev, request->dma))
+               unmap_dma_buffer(req, musb);
+
        if (request->status == 0)
                dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
                                ep->end_point.name, request,
index 65217a5900689028e68a61e28c8011fbc9cbbaea..90549382eba5f139c6d54b7617319132189e6596 100644 (file)
@@ -38,6 +38,7 @@ config USB_ISP1301
        tristate "NXP ISP1301 USB transceiver support"
        depends on USB || USB_GADGET
        depends on I2C
+       select USB_OTG_UTILS
        help
          Say Y here to add support for the NXP ISP1301 USB transceiver driver.
          This chip is typically used as USB transceiver for USB host, gadget
index cbd904b8fba55f42ec07ed0d3a741878b516157c..4775f8209e5502b7f893d0a00a8f51622746a138 100644 (file)
@@ -62,7 +62,6 @@ static int is_irda(struct usb_serial *serial)
 }
 
 struct ark3116_private {
-       wait_queue_head_t       delta_msr_wait;
        struct async_icount     icount;
        int                     irda;   /* 1 for irda device */
 
@@ -146,7 +145,6 @@ static int ark3116_port_probe(struct usb_serial_port *port)
        if (!priv)
                return -ENOMEM;
 
-       init_waitqueue_head(&priv->delta_msr_wait);
        mutex_init(&priv->hw_lock);
        spin_lock_init(&priv->status_lock);
 
@@ -456,10 +454,14 @@ static int ark3116_ioctl(struct tty_struct *tty,
        case TIOCMIWAIT:
                for (;;) {
                        struct async_icount prev = priv->icount;
-                       interruptible_sleep_on(&priv->delta_msr_wait);
+                       interruptible_sleep_on(&port->delta_msr_wait);
                        /* see if a signal did it */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
                        if ((prev.rng == priv->icount.rng) &&
                            (prev.dsr == priv->icount.dsr) &&
                            (prev.dcd == priv->icount.dcd) &&
@@ -580,7 +582,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
                        priv->icount.dcd++;
                if (msr & UART_MSR_TERI)
                        priv->icount.rng++;
-               wake_up_interruptible(&priv->delta_msr_wait);
+               wake_up_interruptible(&port->delta_msr_wait);
        }
 }
 
index d255f66e708e80ee78d72bc052bef4e71ba578e2..07d4650a32abe2d89149ef8fc2a22bc17bebe8b2 100644 (file)
@@ -80,7 +80,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
 
 struct ch341_private {
        spinlock_t lock; /* access lock */
-       wait_queue_head_t delta_msr_wait; /* wait queue for modem status */
        unsigned baud_rate; /* set baud rate */
        u8 line_control; /* set line control value RTS/DTR */
        u8 line_status; /* active status of modem control inputs */
@@ -252,7 +251,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
 
        spin_lock_init(&priv->lock);
-       init_waitqueue_head(&priv->delta_msr_wait);
        priv->baud_rate = DEFAULT_BAUD_RATE;
        priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
 
@@ -298,7 +296,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on)
                priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR);
        spin_unlock_irqrestore(&priv->lock, flags);
        ch341_set_handshake(port->serial->dev, priv->line_control);
-       wake_up_interruptible(&priv->delta_msr_wait);
+       wake_up_interruptible(&port->delta_msr_wait);
 }
 
 static void ch341_close(struct usb_serial_port *port)
@@ -491,7 +489,7 @@ static void ch341_read_int_callback(struct urb *urb)
                        tty_kref_put(tty);
                }
 
-               wake_up_interruptible(&priv->delta_msr_wait);
+               wake_up_interruptible(&port->delta_msr_wait);
        }
 
 exit:
@@ -517,11 +515,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        while (!multi_change) {
-               interruptible_sleep_on(&priv->delta_msr_wait);
+               interruptible_sleep_on(&port->delta_msr_wait);
                /* see if a signal did it */
                if (signal_pending(current))
                        return -ERESTARTSYS;
 
+               if (port->serial->disconnected)
+                       return -EIO;
+
                spin_lock_irqsave(&priv->lock, flags);
                status = priv->line_status;
                multi_change = priv->multi_status_change;
index 8efa19d0e9fba756d37f7ea1f0b0d7e813ab9610..ba7352e4187efbd1ef73a09297d1cbb1825a4b08 100644 (file)
@@ -111,7 +111,6 @@ struct cypress_private {
        int baud_rate;                     /* stores current baud rate in
                                              integer form */
        int isthrottled;                   /* if throttled, discard reads */
-       wait_queue_head_t delta_msr_wait;  /* used for TIOCMIWAIT */
        char prev_status, diff_status;     /* used for TIOCMIWAIT */
        /* we pass a pointer to this as the argument sent to
           cypress_set_termios old_termios */
@@ -449,7 +448,6 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
                kfree(priv);
                return -ENOMEM;
        }
-       init_waitqueue_head(&priv->delta_msr_wait);
 
        usb_reset_configuration(serial->dev);
 
@@ -868,12 +866,16 @@ static int cypress_ioctl(struct tty_struct *tty,
        switch (cmd) {
        /* This code comes from drivers/char/serial.c and ftdi_sio.c */
        case TIOCMIWAIT:
-               while (priv != NULL) {
-                       interruptible_sleep_on(&priv->delta_msr_wait);
+               for (;;) {
+                       interruptible_sleep_on(&port->delta_msr_wait);
                        /* see if a signal did it */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
-                       else {
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
+                       {
                                char diff = priv->diff_status;
                                if (diff == 0)
                                        return -EIO; /* no change => error */
@@ -1187,7 +1189,7 @@ static void cypress_read_int_callback(struct urb *urb)
        if (priv->current_status != priv->prev_status) {
                priv->diff_status |= priv->current_status ^
                        priv->prev_status;
-               wake_up_interruptible(&priv->delta_msr_wait);
+               wake_up_interruptible(&port->delta_msr_wait);
                priv->prev_status = priv->current_status;
        }
        spin_unlock_irqrestore(&priv->lock, flags);
index b1b2dc64b50be013eab09a2dd5ce535875aeeb5a..a172ad5c5ce80c1c00983960a04339ac3c13fb24 100644 (file)
@@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(usb, id_table);
 
 struct f81232_private {
        spinlock_t lock;
-       wait_queue_head_t delta_msr_wait;
        u8 line_control;
        u8 line_status;
 };
@@ -111,7 +110,7 @@ static void f81232_process_read_urb(struct urb *urb)
        line_status = priv->line_status;
        priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
        spin_unlock_irqrestore(&priv->lock, flags);
-       wake_up_interruptible(&priv->delta_msr_wait);
+       wake_up_interruptible(&port->delta_msr_wait);
 
        if (!urb->actual_length)
                return;
@@ -256,11 +255,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        while (1) {
-               interruptible_sleep_on(&priv->delta_msr_wait);
+               interruptible_sleep_on(&port->delta_msr_wait);
                /* see if a signal did it */
                if (signal_pending(current))
                        return -ERESTARTSYS;
 
+               if (port->serial->disconnected)
+                       return -EIO;
+
                spin_lock_irqsave(&priv->lock, flags);
                status = priv->line_status;
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -322,7 +324,6 @@ static int f81232_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
 
        spin_lock_init(&priv->lock);
-       init_waitqueue_head(&priv->delta_msr_wait);
 
        usb_set_serial_port_data(port, priv);
 
index edd162df49caed8b02a1c0823a497c6c5d9d180e..9886180e45f1b5d73ddc1903f2e13eecb6297013 100644 (file)
@@ -69,9 +69,7 @@ struct ftdi_private {
        int flags;              /* some ASYNC_xxxx flags are supported */
        unsigned long last_dtr_rts;     /* saved modem control outputs */
        struct async_icount     icount;
-       wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
        char prev_status;        /* Used for TIOCMIWAIT */
-       bool dev_gone;        /* Used to abort TIOCMIWAIT */
        char transmit_empty;    /* If transmitter is empty or not */
        __u16 interface;        /* FT2232C, FT2232H or FT4232H port interface
                                   (0 for FT232/245) */
@@ -642,6 +640,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
        { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
        { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
+       { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
        { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -1691,10 +1690,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
 
        kref_init(&priv->kref);
        mutex_init(&priv->cfg_lock);
-       init_waitqueue_head(&priv->delta_msr_wait);
 
        priv->flags = ASYNC_LOW_LATENCY;
-       priv->dev_gone = false;
 
        if (quirk && quirk->port_probe)
                quirk->port_probe(priv);
@@ -1840,8 +1837,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
 {
        struct ftdi_private *priv = usb_get_serial_port_data(port);
 
-       priv->dev_gone = true;
-       wake_up_interruptible_all(&priv->delta_msr_wait);
+       wake_up_interruptible(&port->delta_msr_wait);
 
        remove_sysfs_attrs(port);
 
@@ -1989,7 +1985,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
                if (diff_status & FTDI_RS0_RLSD)
                        priv->icount.dcd++;
 
-               wake_up_interruptible_all(&priv->delta_msr_wait);
+               wake_up_interruptible(&port->delta_msr_wait);
                priv->prev_status = status;
        }
 
@@ -2440,11 +2436,15 @@ static int ftdi_ioctl(struct tty_struct *tty,
         */
        case TIOCMIWAIT:
                cprev = priv->icount;
-               while (!priv->dev_gone) {
-                       interruptible_sleep_on(&priv->delta_msr_wait);
+               for (;;) {
+                       interruptible_sleep_on(&port->delta_msr_wait);
                        /* see if a signal did it */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
                        cnow = priv->icount;
                        if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
                            ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
@@ -2454,8 +2454,6 @@ static int ftdi_ioctl(struct tty_struct *tty,
                        }
                        cprev = cnow;
                }
-               return -EIO;
-               break;
        case TIOCSERGETLSR:
                return get_lsr_info(port, (struct serial_struct __user *)arg);
                break;
index 9d359e189a645f7dcd13222433b9377787f81a34..e79861eeed4cca1471e4531ff727f0f8425e7892 100644 (file)
 #define CONTEC_VID             0x06CE  /* Vendor ID */
 #define CONTEC_COM1USBH_PID    0x8311  /* COM-1(USB)H */
 
+/*
+ * Mitsubishi Electric Corp. (http://www.meau.com)
+ * Submitted by Konstantin Holoborodko
+ */
+#define MITSUBISHI_VID         0x06D3
+#define MITSUBISHI_FXUSB_PID   0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */
+
 /*
  * Definitions for B&B Electronics products.
  */
index 1a07b12ef341a08e2bdb880d3d446054dccd5339..81caf5623ee295d609acce00a1e06a20ef34c20f 100644 (file)
@@ -956,10 +956,7 @@ static void garmin_close(struct usb_serial_port *port)
        if (!serial)
                return;
 
-       mutex_lock(&port->serial->disc_mutex);
-
-       if (!port->serial->disconnected)
-               garmin_clear(garmin_data_p);
+       garmin_clear(garmin_data_p);
 
        /* shutdown our urbs */
        usb_kill_urb(port->read_urb);
@@ -968,8 +965,6 @@ static void garmin_close(struct usb_serial_port *port)
        /* keep reset state so we know that we must start a new session */
        if (garmin_data_p->state != STATE_RESET)
                garmin_data_p->state = STATE_DISCONNECTED;
-
-       mutex_unlock(&port->serial->disc_mutex);
 }
 
 
index b00e5cbf741f16b63071c8273c7f69eaa279ca6d..efd8b978128c1d86412322f0c37e9da32b00610a 100644 (file)
@@ -110,7 +110,6 @@ struct edgeport_port {
        wait_queue_head_t       wait_chase;             /* for handling sleeping while waiting for chase to finish */
        wait_queue_head_t       wait_open;              /* for handling sleeping while waiting for open to finish */
        wait_queue_head_t       wait_command;           /* for handling sleeping while waiting for command to finish */
-       wait_queue_head_t       delta_msr_wait;         /* for handling sleeping while waiting for msr change to happen */
 
        struct async_icount     icount;
        struct usb_serial_port  *port;                  /* loop back to the owner of this object */
@@ -884,7 +883,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
        /* initialize our wait queues */
        init_waitqueue_head(&edge_port->wait_open);
        init_waitqueue_head(&edge_port->wait_chase);
-       init_waitqueue_head(&edge_port->delta_msr_wait);
        init_waitqueue_head(&edge_port->wait_command);
 
        /* initialize our icount structure */
@@ -1669,13 +1667,17 @@ static int edge_ioctl(struct tty_struct *tty,
                dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__,  port->number);
                cprev = edge_port->icount;
                while (1) {
-                       prepare_to_wait(&edge_port->delta_msr_wait,
+                       prepare_to_wait(&port->delta_msr_wait,
                                                &wait, TASK_INTERRUPTIBLE);
                        schedule();
-                       finish_wait(&edge_port->delta_msr_wait, &wait);
+                       finish_wait(&port->delta_msr_wait, &wait);
                        /* see if a signal did it */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
                        cnow = edge_port->icount;
                        if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
                            cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -2051,7 +2053,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr)
                        icount->dcd++;
                if (newMsr & EDGEPORT_MSR_DELTA_RI)
                        icount->rng++;
-               wake_up_interruptible(&edge_port->delta_msr_wait);
+               wake_up_interruptible(&edge_port->port->delta_msr_wait);
        }
 
        /* Save the new modem status */
index c23776679f7029a37598e5cea826060670be7e64..7777172206de1b42a971acb0747d97180230d670 100644 (file)
@@ -87,9 +87,6 @@ struct edgeport_port {
        int close_pending;
        int lsr_event;
        struct async_icount     icount;
-       wait_queue_head_t       delta_msr_wait; /* for handling sleeping while
-                                                  waiting for msr change to
-                                                  happen */
        struct edgeport_serial  *edge_serial;
        struct usb_serial_port  *port;
        __u8 bUartMode;         /* Port type, 0: RS232, etc. */
@@ -1459,7 +1456,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr)
                        icount->dcd++;
                if (msr & EDGEPORT_MSR_DELTA_RI)
                        icount->rng++;
-               wake_up_interruptible(&edge_port->delta_msr_wait);
+               wake_up_interruptible(&edge_port->port->delta_msr_wait);
        }
 
        /* Save the new modem status */
@@ -1754,7 +1751,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
        dev = port->serial->dev;
 
        memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
-       init_waitqueue_head(&edge_port->delta_msr_wait);
 
        /* turn off loopback */
        status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
@@ -2434,10 +2430,14 @@ static int edge_ioctl(struct tty_struct *tty,
                dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
                cprev = edge_port->icount;
                while (1) {
-                       interruptible_sleep_on(&edge_port->delta_msr_wait);
+                       interruptible_sleep_on(&port->delta_msr_wait);
                        /* see if a signal did it */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
                        cnow = edge_port->icount;
                        if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
                            cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -2649,6 +2649,7 @@ static struct usb_serial_driver edgeport_2port_device = {
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
        .tiocmset               = edge_tiocmset,
+       .get_icount             = edge_get_icount,
        .write                  = edge_write,
        .write_room             = edge_write_room,
        .chars_in_buffer        = edge_chars_in_buffer,
index a64d420f687b0a198ded7bac0cd38a17b89494c0..06d5a60be2c4cb87279744372edef4a41864b74b 100644 (file)
@@ -114,8 +114,6 @@ struct mct_u232_private {
        unsigned char        last_msr;      /* Modem Status Register */
        unsigned int         rx_flags;      /* Throttling flags */
        struct async_icount  icount;
-       wait_queue_head_t    msr_wait;  /* for handling sleeping while waiting
-                                               for msr change to happen */
 };
 
 #define THROTTLED              0x01
@@ -409,7 +407,6 @@ static int mct_u232_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
 
        spin_lock_init(&priv->lock);
-       init_waitqueue_head(&priv->msr_wait);
 
        usb_set_serial_port_data(port, priv);
 
@@ -601,7 +598,7 @@ static void mct_u232_read_int_callback(struct urb *urb)
                tty_kref_put(tty);
        }
 #endif
-       wake_up_interruptible(&priv->msr_wait);
+       wake_up_interruptible(&port->delta_msr_wait);
        spin_unlock_irqrestore(&priv->lock, flags);
 exit:
        retval = usb_submit_urb(urb, GFP_ATOMIC);
@@ -810,13 +807,17 @@ static int  mct_u232_ioctl(struct tty_struct *tty,
                cprev = mct_u232_port->icount;
                spin_unlock_irqrestore(&mct_u232_port->lock, flags);
                for ( ; ; ) {
-                       prepare_to_wait(&mct_u232_port->msr_wait,
+                       prepare_to_wait(&port->delta_msr_wait,
                                        &wait, TASK_INTERRUPTIBLE);
                        schedule();
-                       finish_wait(&mct_u232_port->msr_wait, &wait);
+                       finish_wait(&port->delta_msr_wait, &wait);
                        /* see if a signal did it */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
                        spin_lock_irqsave(&mct_u232_port->lock, flags);
                        cnow = mct_u232_port->icount;
                        spin_unlock_irqrestore(&mct_u232_port->lock, flags);
index 809fb329eca5f945d13c25e75f74bf79145389df..b8051fa61911a54a23de8f9a1662518181b6c084 100644 (file)
@@ -219,7 +219,6 @@ struct moschip_port {
        char open;
        char open_ports;
        wait_queue_head_t wait_chase;   /* for handling sleeping while waiting for chase to finish */
-       wait_queue_head_t delta_msr_wait;       /* for handling sleeping while waiting for msr change to happen */
        int delta_msr_cond;
        struct async_icount icount;
        struct usb_serial_port *port;   /* loop back to the owner of this object */
@@ -423,6 +422,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr)
                        icount->rng++;
                        smp_wmb();
                }
+
+               mos7840_port->delta_msr_cond = 1;
+               wake_up_interruptible(&port->port->delta_msr_wait);
        }
 }
 
@@ -1127,7 +1129,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
 
        /* initialize our wait queues */
        init_waitqueue_head(&mos7840_port->wait_chase);
-       init_waitqueue_head(&mos7840_port->delta_msr_wait);
 
        /* initialize our icount structure */
        memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount));
@@ -2017,8 +2018,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
                        mos7840_port->read_urb_busy = false;
                }
        }
-       wake_up(&mos7840_port->delta_msr_wait);
-       mos7840_port->delta_msr_cond = 1;
        dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__,
                mos7840_port->shadowLCR);
 }
@@ -2219,13 +2218,18 @@ static int mos7840_ioctl(struct tty_struct *tty,
                while (1) {
                        /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */
                        mos7840_port->delta_msr_cond = 0;
-                       wait_event_interruptible(mos7840_port->delta_msr_wait,
-                                                (mos7840_port->
+                       wait_event_interruptible(port->delta_msr_wait,
+                                                (port->serial->disconnected ||
+                                                 mos7840_port->
                                                  delta_msr_cond == 1));
 
                        /* see if a signal did it */
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
                        cnow = mos7840_port->icount;
                        smp_rmb();
                        if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
index a958fd41b5b312a699b5d963d8db1a49ea56810b..87c71ccfee87bfd06d41034a52a6e5dc27a21c7f 100644 (file)
@@ -188,7 +188,6 @@ struct oti6858_private {
        u8 setup_done;
        struct delayed_work delayed_setup_work;
 
-       wait_queue_head_t intr_wait;
        struct usb_serial_port *port;   /* USB port with which associated */
 };
 
@@ -339,7 +338,6 @@ static int oti6858_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
 
        spin_lock_init(&priv->lock);
-       init_waitqueue_head(&priv->intr_wait);
        priv->port = port;
        INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line);
        INIT_DELAYED_WORK(&priv->delayed_write_work, send_data);
@@ -664,11 +662,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        while (1) {
-               wait_event_interruptible(priv->intr_wait,
+               wait_event_interruptible(port->delta_msr_wait,
+                                       port->serial->disconnected ||
                                        priv->status.pin_state != prev);
                if (signal_pending(current))
                        return -ERESTARTSYS;
 
+               if (port->serial->disconnected)
+                       return -EIO;
+
                spin_lock_irqsave(&priv->lock, flags);
                status = priv->status.pin_state & PIN_MASK;
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -763,7 +765,7 @@ static void oti6858_read_int_callback(struct urb *urb)
 
                if (!priv->transient) {
                        if (xs->pin_state != priv->status.pin_state)
-                               wake_up_interruptible(&priv->intr_wait);
+                               wake_up_interruptible(&port->delta_msr_wait);
                        memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE);
                }
 
index 54adc9125e5c1e9eb29b37573433e0d6324dce0e..3b10018d89a34e44e03b5ffb5562c34c7e4e6b85 100644 (file)
@@ -139,7 +139,6 @@ struct pl2303_serial_private {
 
 struct pl2303_private {
        spinlock_t lock;
-       wait_queue_head_t delta_msr_wait;
        u8 line_control;
        u8 line_status;
 };
@@ -233,7 +232,6 @@ static int pl2303_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
 
        spin_lock_init(&priv->lock);
-       init_waitqueue_head(&priv->delta_msr_wait);
 
        usb_set_serial_port_data(port, priv);
 
@@ -607,11 +605,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        while (1) {
-               interruptible_sleep_on(&priv->delta_msr_wait);
+               interruptible_sleep_on(&port->delta_msr_wait);
                /* see if a signal did it */
                if (signal_pending(current))
                        return -ERESTARTSYS;
 
+               if (port->serial->disconnected)
+                       return -EIO;
+
                spin_lock_irqsave(&priv->lock, flags);
                status = priv->line_status;
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -719,7 +720,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port,
        spin_unlock_irqrestore(&priv->lock, flags);
        if (priv->line_status & UART_BREAK_ERROR)
                usb_serial_handle_break(port);
-       wake_up_interruptible(&priv->delta_msr_wait);
+       wake_up_interruptible(&port->delta_msr_wait);
 
        tty = tty_port_tty_get(&port->port);
        if (!tty)
@@ -783,7 +784,7 @@ static void pl2303_process_read_urb(struct urb *urb)
        line_status = priv->line_status;
        priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
        spin_unlock_irqrestore(&priv->lock, flags);
-       wake_up_interruptible(&priv->delta_msr_wait);
+       wake_up_interruptible(&port->delta_msr_wait);
 
        if (!urb->actual_length)
                return;
index d643a4d4d770819a68cd32f9402b5d2e8839c436..75f125ddb0c94136da28ebf5fa5d4e4200a0b410 100644 (file)
@@ -128,7 +128,6 @@ struct qt2_port_private {
        u8          shadowLSR;
        u8          shadowMSR;
 
-       wait_queue_head_t   delta_msr_wait; /* Used for TIOCMIWAIT */
        struct async_icount icount;
 
        struct usb_serial_port *port;
@@ -506,8 +505,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
        spin_unlock_irqrestore(&priv->lock, flags);
 
        while (1) {
-               wait_event_interruptible(priv->delta_msr_wait,
-                                        ((priv->icount.rng != prev.rng) ||
+               wait_event_interruptible(port->delta_msr_wait,
+                                        (port->serial->disconnected ||
+                                         (priv->icount.rng != prev.rng) ||
                                          (priv->icount.dsr != prev.dsr) ||
                                          (priv->icount.dcd != prev.dcd) ||
                                          (priv->icount.cts != prev.cts)));
@@ -515,6 +515,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
                if (signal_pending(current))
                        return -ERESTARTSYS;
 
+               if (port->serial->disconnected)
+                       return -EIO;
+
                spin_lock_irqsave(&priv->lock, flags);
                cur = priv->icount;
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -827,7 +830,6 @@ static int qt2_port_probe(struct usb_serial_port *port)
 
        spin_lock_init(&port_priv->lock);
        spin_lock_init(&port_priv->urb_lock);
-       init_waitqueue_head(&port_priv->delta_msr_wait);
        port_priv->port = port;
 
        port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -970,7 +972,7 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch)
                if (newMSR & UART_MSR_TERI)
                        port_priv->icount.rng++;
 
-               wake_up_interruptible(&port_priv->delta_msr_wait);
+               wake_up_interruptible(&port->delta_msr_wait);
        }
 }
 
index 91ff8e3bddbd76a4daa3502d17dd2aaab96ac154..549ef68ff5fa2ea156fa31ea65f3b823353b1cfa 100644 (file)
@@ -149,7 +149,6 @@ enum spcp8x5_type {
 struct spcp8x5_private {
        spinlock_t      lock;
        enum spcp8x5_type       type;
-       wait_queue_head_t       delta_msr_wait;
        u8                      line_control;
        u8                      line_status;
 };
@@ -179,7 +178,6 @@ static int spcp8x5_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
 
        spin_lock_init(&priv->lock);
-       init_waitqueue_head(&priv->delta_msr_wait);
        priv->type = type;
 
        usb_set_serial_port_data(port , priv);
@@ -475,7 +473,7 @@ static void spcp8x5_process_read_urb(struct urb *urb)
        priv->line_status &= ~UART_STATE_TRANSIENT_MASK;
        spin_unlock_irqrestore(&priv->lock, flags);
        /* wake up the wait for termios */
-       wake_up_interruptible(&priv->delta_msr_wait);
+       wake_up_interruptible(&port->delta_msr_wait);
 
        if (!urb->actual_length)
                return;
@@ -526,12 +524,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port,
 
        while (1) {
                /* wake up in bulk read */
-               interruptible_sleep_on(&priv->delta_msr_wait);
+               interruptible_sleep_on(&port->delta_msr_wait);
 
                /* see if a signal did it */
                if (signal_pending(current))
                        return -ERESTARTSYS;
 
+               if (port->serial->disconnected)
+                       return -EIO;
+
                spin_lock_irqsave(&priv->lock, flags);
                status = priv->line_status;
                spin_unlock_irqrestore(&priv->lock, flags);
index b57cf841c5b6aa2ab1c21e13c12e063d51a2a9e7..4b2a19757b4d531baa8dfb48c8009517d2d21cb5 100644 (file)
@@ -61,7 +61,6 @@ struct ssu100_port_private {
        spinlock_t status_lock;
        u8 shadowLSR;
        u8 shadowMSR;
-       wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
        struct async_icount icount;
 };
 
@@ -355,8 +354,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
        spin_unlock_irqrestore(&priv->status_lock, flags);
 
        while (1) {
-               wait_event_interruptible(priv->delta_msr_wait,
-                                        ((priv->icount.rng != prev.rng) ||
+               wait_event_interruptible(port->delta_msr_wait,
+                                        (port->serial->disconnected ||
+                                         (priv->icount.rng != prev.rng) ||
                                          (priv->icount.dsr != prev.dsr) ||
                                          (priv->icount.dcd != prev.dcd) ||
                                          (priv->icount.cts != prev.cts)));
@@ -364,6 +364,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
                if (signal_pending(current))
                        return -ERESTARTSYS;
 
+               if (port->serial->disconnected)
+                       return -EIO;
+
                spin_lock_irqsave(&priv->status_lock, flags);
                cur = priv->icount;
                spin_unlock_irqrestore(&priv->status_lock, flags);
@@ -445,7 +448,6 @@ static int ssu100_port_probe(struct usb_serial_port *port)
                return -ENOMEM;
 
        spin_lock_init(&priv->status_lock);
-       init_waitqueue_head(&priv->delta_msr_wait);
 
        usb_set_serial_port_data(port, priv);
 
@@ -537,7 +539,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr)
                        priv->icount.dcd++;
                if (msr & UART_MSR_TERI)
                        priv->icount.rng++;
-               wake_up_interruptible(&priv->delta_msr_wait);
+               wake_up_interruptible(&port->delta_msr_wait);
        }
 }
 
index 39cb9b807c3cf998842c39267e9d217b82460e59..73deb029fc05d3d9c9b728f1fae1616a8efee6e4 100644 (file)
@@ -74,7 +74,6 @@ struct ti_port {
        int                     tp_flags;
        int                     tp_closing_wait;/* in .01 secs */
        struct async_icount     tp_icount;
-       wait_queue_head_t       tp_msr_wait;    /* wait for msr change */
        wait_queue_head_t       tp_write_wait;
        struct ti_device        *tp_tdev;
        struct usb_serial_port  *tp_port;
@@ -432,7 +431,6 @@ static int ti_port_probe(struct usb_serial_port *port)
        else
                tport->tp_uart_base_addr = TI_UART2_BASE_ADDR;
        tport->tp_closing_wait = closing_wait;
-       init_waitqueue_head(&tport->tp_msr_wait);
        init_waitqueue_head(&tport->tp_write_wait);
        if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) {
                kfree(tport);
@@ -784,9 +782,13 @@ static int ti_ioctl(struct tty_struct *tty,
                dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
                cprev = tport->tp_icount;
                while (1) {
-                       interruptible_sleep_on(&tport->tp_msr_wait);
+                       interruptible_sleep_on(&port->delta_msr_wait);
                        if (signal_pending(current))
                                return -ERESTARTSYS;
+
+                       if (port->serial->disconnected)
+                               return -EIO;
+
                        cnow = tport->tp_icount;
                        if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
                            cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
@@ -1392,7 +1394,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr)
                        icount->dcd++;
                if (msr & TI_MSR_DELTA_RI)
                        icount->rng++;
-               wake_up_interruptible(&tport->tp_msr_wait);
+               wake_up_interruptible(&tport->tp_port->delta_msr_wait);
                spin_unlock_irqrestore(&tport->tp_lock, flags);
        }
 
index a19ed74d770d73b016e69070cb27e282bbc21fad..5d9b178484fdf805bfd9a6e264732d5082216088 100644 (file)
@@ -151,6 +151,7 @@ static void destroy_serial(struct kref *kref)
                }
        }
 
+       usb_put_intf(serial->interface);
        usb_put_dev(serial->dev);
        kfree(serial);
 }
@@ -620,7 +621,7 @@ static struct usb_serial *create_serial(struct usb_device *dev,
        }
        serial->dev = usb_get_dev(dev);
        serial->type = driver;
-       serial->interface = interface;
+       serial->interface = usb_get_intf(interface);
        kref_init(&serial->kref);
        mutex_init(&serial->disc_mutex);
        serial->minor = SERIAL_TTY_NO_MINOR;
@@ -902,6 +903,7 @@ static int usb_serial_probe(struct usb_interface *interface,
                port->port.ops = &serial_port_ops;
                port->serial = serial;
                spin_lock_init(&port->lock);
+               init_waitqueue_head(&port->delta_msr_wait);
                /* Keep this for private driver use for the moment but
                   should probably go away */
                INIT_WORK(&port->work, usb_serial_port_work);
index da04a074e7900c8dcc413bf761d5f471e29218a3..1799335288bd5e60522a60def1c311f07df62286 100644 (file)
@@ -496,6 +496,13 @@ UNUSUAL_DEV(  0x04e8, 0x5122, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG),
 
+/* Added by Dmitry Artamonow <mad_soft@inbox.ru> */
+UNUSUAL_DEV(  0x04e8, 0x5136, 0x0000, 0x9999,
+               "Samsung",
+               "YP-Z3",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_MAX_SECTORS_64),
+
 /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>.
  * Device uses standards-violating 32-byte Bulk Command Block Wrappers and
  * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011.
index 964ff22bf2819e5de87786e517fefe2bc50956af..aeb00fc2d3bea2caf19a650d30e4517caf89a76d 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/pci.h>
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
+#include <linux/slab.h>
 
 #include "vfio_pci_private.h"
 
index 3639371fa697e63e9bf6d3e5ccc49453e5ecff76..a96509187deb677a03e0a7e4902d76fe1f3c81a5 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/vfio.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <linux/slab.h>
 
 #include "vfio_pci_private.h"
 
index 959b1cd89e6a5be5a402a79089077609f8e30716..ec6fb3fa59bb5962281bb6277220941b91df053c 100644 (file)
@@ -339,7 +339,8 @@ static void handle_tx(struct vhost_net *net)
                                msg.msg_controllen = 0;
                                ubufs = NULL;
                        } else {
-                               struct ubuf_info *ubuf = &vq->ubuf_info[head];
+                               struct ubuf_info *ubuf;
+                               ubuf = vq->ubuf_info + vq->upend_idx;
 
                                vq->heads[vq->upend_idx].len =
                                        VHOST_DMA_IN_PROGRESS;
index 9951297b24279105aa118c1e9bba1c0320972a15..2968b4934659aab01dd626334a9ab7fc0e607383 100644 (file)
@@ -60,6 +60,15 @@ enum {
        VHOST_SCSI_VQ_IO = 2,
 };
 
+/*
+ * VIRTIO_RING_F_EVENT_IDX seems broken. Not sure the bug is in
+ * kernel but disabling it helps.
+ * TODO: debug and remove the workaround.
+ */
+enum {
+       VHOST_SCSI_FEATURES = VHOST_FEATURES & (~VIRTIO_RING_F_EVENT_IDX)
+};
+
 #define VHOST_SCSI_MAX_TARGET  256
 #define VHOST_SCSI_MAX_VQ      128
 
@@ -850,7 +859,7 @@ static int vhost_scsi_clear_endpoint(
        for (index = 0; index < vs->dev.nvqs; ++index) {
                if (!vhost_vq_access_ok(&vs->vqs[index])) {
                        ret = -EFAULT;
-                       goto err;
+                       goto err_dev;
                }
        }
        for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
@@ -860,10 +869,11 @@ static int vhost_scsi_clear_endpoint(
                if (!tv_tpg)
                        continue;
 
+               mutex_lock(&tv_tpg->tv_tpg_mutex);
                tv_tport = tv_tpg->tport;
                if (!tv_tport) {
                        ret = -ENODEV;
-                       goto err;
+                       goto err_tpg;
                }
 
                if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
@@ -872,16 +882,19 @@ static int vhost_scsi_clear_endpoint(
                                tv_tport->tport_name, tv_tpg->tport_tpgt,
                                t->vhost_wwpn, t->vhost_tpgt);
                        ret = -EINVAL;
-                       goto err;
+                       goto err_tpg;
                }
                tv_tpg->tv_tpg_vhost_count--;
                vs->vs_tpg[target] = NULL;
                vs->vs_endpoint = false;
+               mutex_unlock(&tv_tpg->tv_tpg_mutex);
        }
        mutex_unlock(&vs->dev.mutex);
        return 0;
 
-err:
+err_tpg:
+       mutex_unlock(&tv_tpg->tv_tpg_mutex);
+err_dev:
        mutex_unlock(&vs->dev.mutex);
        return ret;
 }
@@ -937,11 +950,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
                vhost_scsi_flush_vq(vs, i);
+       vhost_work_flush(&vs->dev, &vs->vs_completion_work);
 }
 
 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
 {
-       if (features & ~VHOST_FEATURES)
+       if (features & ~VHOST_SCSI_FEATURES)
                return -EOPNOTSUPP;
 
        mutex_lock(&vs->dev.mutex);
@@ -987,7 +1001,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
                        return -EFAULT;
                return 0;
        case VHOST_GET_FEATURES:
-               features = VHOST_FEATURES;
+               features = VHOST_SCSI_FEATURES;
                if (copy_to_user(featurep, &features, sizeof features))
                        return -EFAULT;
                return 0;
index 12cf5f31ee8f008158dfbee172b077185f3ef241..025428e04c33c7bb99a80f2310b5dfdf8c936c2b 100644 (file)
@@ -422,17 +422,22 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
                        = var->bits_per_pixel;
                break;
        case 16:
+               /* Older SOCs use IBGR:555 rather than BGR:565. */
+               if (sinfo->have_intensity_bit)
+                       var->green.length = 5;
+               else
+                       var->green.length = 6;
+
                if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
-                       /* RGB:565 mode */
-                       var->red.offset = 11;
+                       /* RGB:5X5 mode */
+                       var->red.offset = var->green.length + 5;
                        var->blue.offset = 0;
                } else {
-                       /* BGR:565 mode */
+                       /* BGR:5X5 mode */
                        var->red.offset = 0;
-                       var->blue.offset = 11;
+                       var->blue.offset = var->green.length + 5;
                }
                var->green.offset = 5;
-               var->green.length = 6;
                var->red.length = var->blue.length = 5;
                break;
        case 32:
@@ -679,8 +684,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
 
        case FB_VISUAL_PSEUDOCOLOR:
                if (regno < 256) {
-                       if (cpu_is_at91sam9261() || cpu_is_at91sam9263()
-                           || cpu_is_at91sam9rl()) {
+                       if (sinfo->have_intensity_bit) {
                                /* old style I+BGR:555 */
                                val  = ((red   >> 11) & 0x001f);
                                val |= ((green >>  6) & 0x03e0);
@@ -870,6 +874,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
        }
        sinfo->info = info;
        sinfo->pdev = pdev;
+       if (cpu_is_at91sam9261() || cpu_is_at91sam9263() ||
+                                                       cpu_is_at91sam9rl()) {
+               sinfo->have_intensity_bit = true;
+       }
 
        strcpy(info->fix.id, sinfo->pdev->name);
        info->flags = ATMEL_LCDFB_FBINFO_DEFAULT;
index 3f2519d3071574bd42dbe646afbf24dd1ba3d034..e06cd5d90c97806cb38c76f458c004a20b4acca0 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/fb.h>
+#include <linux/io.h>
 
 #include <linux/platform_data/video-ep93xx.h>
 
index 755556ca5b2d988ec68b8cc3c1f9bbe7aadf6a34..45169cbaba6e288e714e2ce414dff1061a4569af 100644 (file)
@@ -169,6 +169,7 @@ struct mxsfb_info {
        unsigned dotclk_delay;
        const struct mxsfb_devdata *devdata;
        int mapped;
+       u32 sync;
 };
 
 #define mxsfb_is_v3(host) (host->devdata->ipversion == 3)
@@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info)
                vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
        if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT)
                vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
-       if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT)
+       if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT)
                vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
-       if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT)
+       if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT)
                vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING;
 
        writel(vdctrl0, host->base + LCDC_VDCTRL0);
@@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev)
 
        INIT_LIST_HEAD(&fb_info->modelist);
 
+       host->sync = pdata->sync;
+
        ret = mxsfb_init_fbinfo(host);
        if (ret != 0)
                goto error_init_fb;
index e31f5b33b501e155fed50e9a5463614e9cdc1690..d40612c31a989d9437e482496ff52c6f451404d8 100644 (file)
@@ -32,6 +32,8 @@
 
 #include <linux/omap-dma.h>
 
+#include <mach/hardware.h>
+
 #include "omapfb.h"
 #include "lcdc.h"
 
index 6b6643911d296c707e7eaa29c30412081378cc5f..048c98381ef6affd36417518881b890ec1f170dd 100644 (file)
@@ -63,6 +63,9 @@ struct tpo_td043_device {
        u32 power_on_resume:1;
 };
 
+/* used to pass spi_device from SPI to DSS portion of the driver */
+static struct tpo_td043_device *g_tpo_td043;
+
 static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
 {
        struct spi_message      m;
@@ -403,7 +406,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
 
 static int tpo_td043_probe(struct omap_dss_device *dssdev)
 {
-       struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+       struct tpo_td043_device *tpo_td043 = g_tpo_td043;
        int nreset_gpio = dssdev->reset_gpio;
        int ret = 0;
 
@@ -440,6 +443,8 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev)
        if (ret)
                dev_warn(&dssdev->dev, "failed to create sysfs files\n");
 
+       dev_set_drvdata(&dssdev->dev, tpo_td043);
+
        return 0;
 
 fail_gpio_req:
@@ -505,6 +510,9 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
                return -ENODEV;
        }
 
+       if (g_tpo_td043 != NULL)
+               return -EBUSY;
+
        spi->bits_per_word = 16;
        spi->mode = SPI_MODE_0;
 
@@ -521,7 +529,7 @@ static int tpo_td043_spi_probe(struct spi_device *spi)
        tpo_td043->spi = spi;
        tpo_td043->nreset_gpio = dssdev->reset_gpio;
        dev_set_drvdata(&spi->dev, tpo_td043);
-       dev_set_drvdata(&dssdev->dev, tpo_td043);
+       g_tpo_td043 = tpo_td043;
 
        omap_dss_register_driver(&tpo_td043_driver);
 
@@ -534,6 +542,7 @@ static int tpo_td043_spi_remove(struct spi_device *spi)
 
        omap_dss_unregister_driver(&tpo_td043_driver);
        kfree(tpo_td043);
+       g_tpo_td043 = NULL;
 
        return 0;
 }
index d7d66ef5cb58098c6f80268651c0a277957fc2e0..7f791aeda4d24a1025e4d712f11e1b828cdb8b24 100644 (file)
@@ -202,12 +202,10 @@ static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = {
 
 static const enum omap_dss_output_id omap4_dss_supported_outputs[] = {
        /* OMAP_DSS_CHANNEL_LCD */
-       OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
-       OMAP_DSS_OUTPUT_DSI1,
+       OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1,
 
        /* OMAP_DSS_CHANNEL_DIGIT */
-       OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI |
-       OMAP_DSS_OUTPUT_DPI,
+       OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI,
 
        /* OMAP_DSS_CHANNEL_LCD2 */
        OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI |
index e3b8f757d2d3f641810cece7a0423fa268cf2b88..0e9d8c479c3551e2bb5a51cb9bfcd055f2bcb3e7 100644 (file)
 #include "sp5100_tco.h"
 
 /* Module and version information */
-#define TCO_VERSION "0.03"
+#define TCO_VERSION "0.05"
 #define TCO_MODULE_NAME "SP5100 TCO timer"
 #define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
 
 /* internal variables */
 static u32 tcobase_phys;
-static u32 resbase_phys;
 static u32 tco_wdt_fired;
 static void __iomem *tcobase;
 static unsigned int pm_iobase;
@@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock);    /* Guards the hardware */
 static unsigned long timer_alive;
 static char tco_expect_close;
 static struct pci_dev *sp5100_tco_pci;
-static struct resource wdt_res = {
-       .name = "Watchdog Timer",
-       .flags = IORESOURCE_MEM,
-};
 
 /* the watchdog platform device */
 static struct platform_device *sp5100_tco_platform_device;
@@ -75,12 +70,6 @@ module_param(nowayout, bool, 0);
 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started."
                " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static unsigned int force_addr;
-module_param(force_addr, uint, 0);
-MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address."
-               " ONLY USE THIS PARAMETER IF YOU REALLY KNOW"
-               " WHAT YOU ARE DOING (default=none)");
-
 /*
  * Some TCO specific functions
  */
@@ -176,39 +165,6 @@ static void tco_timer_enable(void)
        }
 }
 
-static void tco_timer_disable(void)
-{
-       int val;
-
-       if (sp5100_tco_pci->revision >= 0x40) {
-               /* For SB800 or later */
-               /* Enable watchdog decode bit and Disable watchdog timer */
-               outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG);
-               val = inb(SB800_IO_PM_DATA_REG);
-               val |= SB800_PCI_WATCHDOG_DECODE_EN;
-               val |= SB800_PM_WATCHDOG_DISABLE;
-               outb(val, SB800_IO_PM_DATA_REG);
-       } else {
-               /* For SP5100 or SB7x0 */
-               /* Enable watchdog decode bit */
-               pci_read_config_dword(sp5100_tco_pci,
-                                     SP5100_PCI_WATCHDOG_MISC_REG,
-                                     &val);
-
-               val |= SP5100_PCI_WATCHDOG_DECODE_EN;
-
-               pci_write_config_dword(sp5100_tco_pci,
-                                      SP5100_PCI_WATCHDOG_MISC_REG,
-                                      val);
-
-               /* Disable Watchdog timer */
-               outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
-               val = inb(SP5100_IO_PM_DATA_REG);
-               val |= SP5100_PM_WATCHDOG_DISABLE;
-               outb(val, SP5100_IO_PM_DATA_REG);
-       }
-}
-
 /*
  *     /dev/watchdog handling
  */
@@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdevice(void)
 {
        struct pci_dev *dev = NULL;
        const char *dev_name = NULL;
-       u32 val, tmp_val;
+       u32 val;
        u32 index_reg, data_reg, base_addr;
 
        /* Match the PCI device */
@@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdevice(void)
        } else
                pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val);
 
-       /*
-        * Lastly re-programming the watchdog timer MMIO address,
-        * This method is a last resort...
-        *
-        * Before re-programming, to ensure that the watchdog timer
-        * is disabled, disable the watchdog timer.
-        */
-       tco_timer_disable();
-
-       if (force_addr) {
-               /*
-                * Force the use of watchdog timer MMIO address, and aligned to
-                * 8byte boundary.
-                */
-               force_addr &= ~0x7;
-               val = force_addr;
-
-               pr_info("Force the use of 0x%04x as MMIO address\n", val);
-       } else {
-               /*
-                * Get empty slot into the resource tree for watchdog timer.
-                */
-               if (allocate_resource(&iomem_resource,
-                                     &wdt_res,
-                                     SP5100_WDT_MEM_MAP_SIZE,
-                                     0xf0000000,
-                                     0xfffffff8,
-                                     0x8,
-                                     NULL,
-                                     NULL)) {
-                       pr_err("MMIO allocation failed\n");
-                       goto unreg_region;
-               }
-
-               val = resbase_phys = wdt_res.start;
-               pr_debug("Got 0x%04x from resource tree\n", val);
-       }
-
-       /* Restore to the low three bits */
-       outb(base_addr+0, index_reg);
-       tmp_val = val | (inb(data_reg) & 0x7);
-
-       /* Re-programming the watchdog timer base address */
-       outb(base_addr+0, index_reg);
-       outb((tmp_val >>  0) & 0xff, data_reg);
-       outb(base_addr+1, index_reg);
-       outb((tmp_val >>  8) & 0xff, data_reg);
-       outb(base_addr+2, index_reg);
-       outb((tmp_val >> 16) & 0xff, data_reg);
-       outb(base_addr+3, index_reg);
-       outb((tmp_val >> 24) & 0xff, data_reg);
-
-       if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE,
-                                                                  dev_name)) {
-               pr_err("MMIO address 0x%04x already in use\n", val);
-               goto unreg_resource;
-       }
+       pr_notice("failed to find MMIO address, giving up.\n");
+       goto  unreg_region;
 
 setup_wdt:
        tcobase_phys = val;
@@ -555,9 +456,6 @@ setup_wdt:
 
 unreg_mem_region:
        release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
-unreg_resource:
-       if (resbase_phys)
-               release_resource(&wdt_res);
 unreg_region:
        release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
 exit:
@@ -567,7 +465,6 @@ exit:
 static int sp5100_tco_init(struct platform_device *dev)
 {
        int ret;
-       char addr_str[16];
 
        /*
         * Check whether or not the hardware watchdog is there. If found, then
@@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platform_device *dev)
        clear_bit(0, &timer_alive);
 
        /* Show module parameters */
-       if (force_addr == tcobase_phys)
-               /* The force_addr is vaild */
-               sprintf(addr_str, "0x%04x", force_addr);
-       else
-               strcpy(addr_str, "none");
-
-       pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, "
-               "force_addr=%s)\n",
-               tcobase, heartbeat, nowayout, addr_str);
+       pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n",
+               tcobase, heartbeat, nowayout);
 
        return 0;
 
 exit:
        iounmap(tcobase);
        release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
-       if (resbase_phys)
-               release_resource(&wdt_res);
        release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
        return ret;
 }
@@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void)
        misc_deregister(&sp5100_tco_miscdev);
        iounmap(tcobase);
        release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE);
-       if (resbase_phys)
-               release_resource(&wdt_res);
        release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
 }
 
index 71594a0c14b735f56decde5e1ac5e01543fbda6c..2b28c00da0df00d2f99a8c5283b2d2b7c84a9ffa 100644 (file)
@@ -57,7 +57,7 @@
 #define SB800_PM_WATCHDOG_DISABLE      (1 << 2)
 #define SB800_PM_WATCHDOG_SECOND_RES   (3 << 0)
 #define SB800_ACPI_MMIO_DECODE_EN      (1 << 0)
-#define SB800_ACPI_MMIO_SEL            (1 << 2)
+#define SB800_ACPI_MMIO_SEL            (1 << 1)
 
 
 #define SB800_PM_WDT_MMIO_OFFSET       0xB00
index 5a32232cf7c15cdc05536ecb157eeb86048154e0..67af155cf60286f87c6dc1c576d72008d6e5c4fb 100644 (file)
@@ -182,7 +182,7 @@ config XEN_PRIVCMD
 
 config XEN_STUB
        bool "Xen stub drivers"
-       depends on XEN && X86_64
+       depends on XEN && X86_64 && BROKEN
        default n
        help
          Allow kernel to install stub drivers, to reserve space for Xen drivers,
index d17aa41a9041428fbff17a890b2af18b64422ef4..aa85881d17b23f7ff6425be0140b6aed0c63c942 100644 (file)
@@ -403,11 +403,23 @@ static void unmask_evtchn(int port)
 
        if (unlikely((cpu != cpu_from_evtchn(port))))
                do_hypercall = 1;
-       else
+       else {
+               /*
+                * Need to clear the mask before checking pending to
+                * avoid a race with an event becoming pending.
+                *
+                * EVTCHNOP_unmask will only trigger an upcall if the
+                * mask bit was set, so if a hypercall is needed
+                * remask the event.
+                */
+               sync_clear_bit(port, BM(&s->evtchn_mask[0]));
                evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
 
-       if (unlikely(evtchn_pending && xen_hvm_domain()))
-               do_hypercall = 1;
+               if (unlikely(evtchn_pending && xen_hvm_domain())) {
+                       sync_set_bit(port, BM(&s->evtchn_mask[0]));
+                       do_hypercall = 1;
+               }
+       }
 
        /* Slow path (hypercall) if this is a non-local port or if this is
         * an hvm domain and an event is pending (hvm domains don't have
@@ -418,8 +430,6 @@ static void unmask_evtchn(int port)
        } else {
                struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
 
-               sync_clear_bit(port, BM(&s->evtchn_mask[0]));
-
                /*
                 * The following is basically the equivalent of
                 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
index 0ef7c4d40f86b9018ed2ae2ed0eda2071d4d7bfd..b04fb64c5a91ee2613b21c8d9c146ab16f82a941 100644 (file)
@@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg)
 }
 EXPORT_SYMBOL_GPL(xen_event_channel_op_compat);
 
-int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
+int xen_physdev_op_compat(int cmd, void *arg)
 {
        struct physdev_op op;
        int rc;
@@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg)
 
        return rc;
 }
+EXPORT_SYMBOL_GPL(xen_physdev_op_compat);
index f3278a6603ca3b0913280f4bfa5c7e07c0eff47a..90e34ac7e522dfcf2c5af269639f5f877d95ac3b 100644 (file)
@@ -505,6 +505,9 @@ static int __init xen_acpi_processor_init(void)
 
                pr = per_cpu(processors, i);
                perf = per_cpu_ptr(acpi_perf_data, i);
+               if (!pr)
+                       continue;
+
                pr->performance = perf;
                rc = acpi_processor_get_performance_info(pr);
                if (rc)
index 9204126f1560ced72321b267b96099acfe71b8dd..a2278ba7fb273a523476e0504388e49e48511093 100644 (file)
@@ -17,6 +17,7 @@
 #include <xen/events.h>
 #include <asm/xen/pci.h>
 #include <asm/xen/hypervisor.h>
+#include <xen/interface/physdev.h>
 #include "pciback.h"
 #include "conf_space.h"
 #include "conf_space_quirks.h"
@@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
 static void pcistub_device_release(struct kref *kref)
 {
        struct pcistub_device *psdev;
+       struct pci_dev *dev;
        struct xen_pcibk_dev_data *dev_data;
 
        psdev = container_of(kref, struct pcistub_device, kref);
-       dev_data = pci_get_drvdata(psdev->dev);
+       dev = psdev->dev;
+       dev_data = pci_get_drvdata(dev);
 
-       dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
+       dev_dbg(&dev->dev, "pcistub_device_release\n");
 
-       xen_unregister_device_domain_owner(psdev->dev);
+       xen_unregister_device_domain_owner(dev);
 
        /* Call the reset function which does not take lock as this
         * is called from "unbind" which takes a device_lock mutex.
         */
-       __pci_reset_function_locked(psdev->dev);
-       if (pci_load_and_free_saved_state(psdev->dev,
-                                         &dev_data->pci_saved_state)) {
-               dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n");
-       } else
-               pci_restore_state(psdev->dev);
+       __pci_reset_function_locked(dev);
+       if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
+               dev_dbg(&dev->dev, "Could not reload PCI state\n");
+       else
+               pci_restore_state(dev);
+
+       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+               struct physdev_pci_device ppdev = {
+                       .seg = pci_domain_nr(dev->bus),
+                       .bus = dev->bus->number,
+                       .devfn = dev->devfn
+               };
+               int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
+                                               &ppdev);
+
+               if (err)
+                       dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
+                                err);
+       }
 
        /* Disable the device */
-       xen_pcibk_reset_device(psdev->dev);
+       xen_pcibk_reset_device(dev);
 
        kfree(dev_data);
-       pci_set_drvdata(psdev->dev, NULL);
+       pci_set_drvdata(dev, NULL);
 
        /* Clean-up the device */
-       xen_pcibk_config_free_dyn_fields(psdev->dev);
-       xen_pcibk_config_free_dev(psdev->dev);
+       xen_pcibk_config_free_dyn_fields(dev);
+       xen_pcibk_config_free_dev(dev);
 
-       psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
-       pci_dev_put(psdev->dev);
+       dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+       pci_dev_put(dev);
 
        kfree(psdev);
 }
@@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev)
        if (err)
                goto config_release;
 
+       if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
+               struct physdev_pci_device ppdev = {
+                       .seg = pci_domain_nr(dev->bus),
+                       .bus = dev->bus->number,
+                       .devfn = dev->devfn
+               };
+
+               err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
+               if (err)
+                       dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
+                               err);
+       }
+
        /* We need the device active to save the state. */
        dev_dbg(&dev->dev, "save state of device\n");
        pci_save_state(dev);
index cbb09ce9730ac0494dc43fbef5d7d6f2ddd05b0b..5d8ee1319b5c1878c5b82f82bb150dfece7b2cf8 100644 (file)
@@ -82,7 +82,7 @@ fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
                                         qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
-fw-shipped-$(CONFIG_INFINIBAND_QIB) += qlogic/sd7220.fw
+fw-shipped-$(CONFIG_INFINIBAND_QIB) += intel/sd7220.fw
 fw-shipped-$(CONFIG_SND_KORG1212) += korg/k1212.dsp
 fw-shipped-$(CONFIG_SND_MAESTRO3) += ess/maestro3_assp_kernel.fw \
                                     ess/maestro3_assp_minisrc.fw
index ecd25a1b4e519562ff874112515052403877fbc4..ca9d8f1a3bb67968dab36b8df98c0c2093a2300f 100644 (file)
@@ -651,6 +651,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
        if (tree_mod_dont_log(fs_info, NULL))
                return 0;
 
+       __tree_mod_log_free_eb(fs_info, old_root);
+
        ret = tree_mod_alloc(fs_info, flags, &tm);
        if (ret < 0)
                goto out;
@@ -736,7 +738,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
 static noinline void
 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
                     struct extent_buffer *src, unsigned long dst_offset,
-                    unsigned long src_offset, int nr_items)
+                    unsigned long src_offset, int nr_items, int log_removal)
 {
        int ret;
        int i;
@@ -750,10 +752,12 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
        }
 
        for (i = 0; i < nr_items; i++) {
-               ret = tree_mod_log_insert_key_locked(fs_info, src,
-                                                    i + src_offset,
-                                                    MOD_LOG_KEY_REMOVE);
-               BUG_ON(ret < 0);
+               if (log_removal) {
+                       ret = tree_mod_log_insert_key_locked(fs_info, src,
+                                                       i + src_offset,
+                                                       MOD_LOG_KEY_REMOVE);
+                       BUG_ON(ret < 0);
+               }
                ret = tree_mod_log_insert_key_locked(fs_info, dst,
                                                     i + dst_offset,
                                                     MOD_LOG_KEY_ADD);
@@ -927,7 +931,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret); /* -ENOMEM */
                }
-               tree_mod_log_free_eb(root->fs_info, buf);
                clean_tree_block(trans, root, buf);
                *last_ref = 1;
        }
@@ -1046,6 +1049,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                btrfs_set_node_ptr_generation(parent, parent_slot,
                                              trans->transid);
                btrfs_mark_buffer_dirty(parent);
+               tree_mod_log_free_eb(root->fs_info, buf);
                btrfs_free_tree_block(trans, root, buf, parent_start,
                                      last_ref);
        }
@@ -1750,7 +1754,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        goto enospc;
                }
 
-               tree_mod_log_free_eb(root->fs_info, root->node);
                tree_mod_log_set_root_pointer(root, child);
                rcu_assign_pointer(root->node, child);
 
@@ -2995,7 +2998,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
                push_items = min(src_nritems - 8, push_items);
 
        tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
-                            push_items);
+                            push_items, 1);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(dst_nritems),
                           btrfs_node_key_ptr_offset(0),
@@ -3066,7 +3069,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
                                      sizeof(struct btrfs_key_ptr));
 
        tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
-                            src_nritems - push_items, push_items);
+                            src_nritems - push_items, push_items, 1);
        copy_extent_buffer(dst, src,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -3218,12 +3221,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
        int mid;
        int ret;
        u32 c_nritems;
+       int tree_mod_log_removal = 1;
 
        c = path->nodes[level];
        WARN_ON(btrfs_header_generation(c) != trans->transid);
        if (c == root->node) {
                /* trying to split the root, lets make a new one */
                ret = insert_new_root(trans, root, path, level + 1);
+               /*
+                * removal of root nodes has been logged by
+                * tree_mod_log_set_root_pointer due to locking
+                */
+               tree_mod_log_removal = 0;
                if (ret)
                        return ret;
        } else {
@@ -3261,7 +3270,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
                            (unsigned long)btrfs_header_chunk_tree_uuid(split),
                            BTRFS_UUID_SIZE);
 
-       tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
+       tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid,
+                            tree_mod_log_removal);
        copy_extent_buffer(split, c,
                           btrfs_node_key_ptr_offset(0),
                           btrfs_node_key_ptr_offset(mid),
index 7d84651e850b22cb937b29c68d34c1119af1e4a0..6d19a0a554aadc3aeadbe1df6eff03acd489375e 100644 (file)
@@ -1291,6 +1291,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
                                      0, objectid, NULL, 0, 0, 0);
        if (IS_ERR(leaf)) {
                ret = PTR_ERR(leaf);
+               leaf = NULL;
                goto fail;
        }
 
@@ -1334,11 +1335,16 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
 
        btrfs_tree_unlock(leaf);
 
+       return root;
+
 fail:
-       if (ret)
-               return ERR_PTR(ret);
+       if (leaf) {
+               btrfs_tree_unlock(leaf);
+               free_extent_buffer(leaf);
+       }
+       kfree(root);
 
-       return root;
+       return ERR_PTR(ret);
 }
 
 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
@@ -3253,7 +3259,7 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
        if (btrfs_root_refs(&root->root_item) == 0)
                synchronize_srcu(&fs_info->subvol_srcu);
 
-       if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
                btrfs_free_log(NULL, root);
                btrfs_free_log_root_tree(NULL, fs_info);
        }
index 9ac2eca681ebb09c605612dd563bc4417e4fc587..3d551231cabae9e43c7a1e90385f22fd25891239 100644 (file)
@@ -257,7 +257,8 @@ static int exclude_super_stripes(struct btrfs_root *root,
                cache->bytes_super += stripe_len;
                ret = add_excluded_extent(root, cache->key.objectid,
                                          stripe_len);
-               BUG_ON(ret); /* -ENOMEM */
+               if (ret)
+                       return ret;
        }
 
        for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
@@ -265,13 +266,17 @@ static int exclude_super_stripes(struct btrfs_root *root,
                ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
                                       cache->key.objectid, bytenr,
                                       0, &logical, &nr, &stripe_len);
-               BUG_ON(ret); /* -ENOMEM */
+               if (ret)
+                       return ret;
 
                while (nr--) {
                        cache->bytes_super += stripe_len;
                        ret = add_excluded_extent(root, logical[nr],
                                                  stripe_len);
-                       BUG_ON(ret); /* -ENOMEM */
+                       if (ret) {
+                               kfree(logical);
+                               return ret;
+                       }
                }
 
                kfree(logical);
@@ -4438,7 +4443,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
        spin_lock(&sinfo->lock);
        spin_lock(&block_rsv->lock);
 
-       block_rsv->size = num_bytes;
+       block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
 
        num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
                    sinfo->bytes_reserved + sinfo->bytes_readonly +
@@ -4793,14 +4798,49 @@ out_fail:
         * If the inodes csum_bytes is the same as the original
         * csum_bytes then we know we haven't raced with any free()ers
         * so we can just reduce our inodes csum bytes and carry on.
-        * Otherwise we have to do the normal free thing to account for
-        * the case that the free side didn't free up its reserve
-        * because of this outstanding reservation.
         */
-       if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+       if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
                calc_csum_metadata_size(inode, num_bytes, 0);
-       else
-               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+       } else {
+               u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
+               u64 bytes;
+
+               /*
+                * This is tricky, but first we need to figure out how much we
+                * free'd from any free-ers that occured during this
+                * reservation, so we reset ->csum_bytes to the csum_bytes
+                * before we dropped our lock, and then call the free for the
+                * number of bytes that were freed while we were trying our
+                * reservation.
+                */
+               bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
+               BTRFS_I(inode)->csum_bytes = csum_bytes;
+               to_free = calc_csum_metadata_size(inode, bytes, 0);
+
+
+               /*
+                * Now we need to see how much we would have freed had we not
+                * been making this reservation and our ->csum_bytes were not
+                * artificially inflated.
+                */
+               BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
+               bytes = csum_bytes - orig_csum_bytes;
+               bytes = calc_csum_metadata_size(inode, bytes, 0);
+
+               /*
+                * Now reset ->csum_bytes to what it should be.  If bytes is
+                * more than to_free then we would have free'd more space had we
+                * not had an artificially high ->csum_bytes, so we need to free
+                * the remainder.  If bytes is the same or less then we don't
+                * need to do anything, the other free-ers did the correct
+                * thing.
+                */
+               BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
+               if (bytes > to_free)
+                       to_free = bytes - to_free;
+               else
+                       to_free = 0;
+       }
        spin_unlock(&BTRFS_I(inode)->lock);
        if (dropped)
                to_free += btrfs_calc_trans_metadata_size(root, dropped);
@@ -7947,7 +7987,17 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                 * info has super bytes accounted for, otherwise we'll think
                 * we have more space than we actually do.
                 */
-               exclude_super_stripes(root, cache);
+               ret = exclude_super_stripes(root, cache);
+               if (ret) {
+                       /*
+                        * We may have excluded something, so call this just in
+                        * case.
+                        */
+                       free_excluded_extents(root, cache);
+                       kfree(cache->free_space_ctl);
+                       kfree(cache);
+                       goto error;
+               }
 
                /*
                 * check for two cases, either we are full, and therefore
@@ -8089,7 +8139,17 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
 
        cache->last_byte_to_unpin = (u64)-1;
        cache->cached = BTRFS_CACHE_FINISHED;
-       exclude_super_stripes(root, cache);
+       ret = exclude_super_stripes(root, cache);
+       if (ret) {
+               /*
+                * We may have excluded something, so call this just in
+                * case.
+                */
+               free_excluded_extents(root, cache);
+               kfree(cache->free_space_ctl);
+               kfree(cache);
+               return ret;
+       }
 
        add_new_free_space(cache, root->fs_info, chunk_offset,
                           chunk_offset + size);
index f173c5af64610de66597cce1e38ac493729d6125..cdee391fc7bfd57c596204a8474142e7afe60335 100644 (file)
@@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
                                GFP_NOFS);
 }
 
+int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
+{
+       unsigned long index = start >> PAGE_CACHE_SHIFT;
+       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       struct page *page;
+
+       while (index <= end_index) {
+               page = find_get_page(inode->i_mapping, index);
+               BUG_ON(!page); /* Pages should be in the extent_io_tree */
+               clear_page_dirty_for_io(page);
+               page_cache_release(page);
+               index++;
+       }
+       return 0;
+}
+
+int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
+{
+       unsigned long index = start >> PAGE_CACHE_SHIFT;
+       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
+       struct page *page;
+
+       while (index <= end_index) {
+               page = find_get_page(inode->i_mapping, index);
+               BUG_ON(!page); /* Pages should be in the extent_io_tree */
+               account_page_redirty(page);
+               __set_page_dirty_nobuffers(page);
+               page_cache_release(page);
+               index++;
+       }
+       return 0;
+}
+
 /*
  * helper function to set both pages and extents in the tree writeback
  */
index 6068a1985560eed72a8df42fa528377c3584c93a..258c92156857f1d1e9e525a89d7a706d6c126c66 100644 (file)
@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
                      unsigned long *map_len);
 int extent_range_uptodate(struct extent_io_tree *tree,
                          u64 start, u64 end);
+int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
+int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode,
                                struct extent_io_tree *tree,
                                u64 start, u64 end, struct page *locked_page,
index ec160202be3e38057c26498558e2f309af7859b7..c4628a201cb30fe6e2daa09eff83bd1fec79ecf1 100644 (file)
@@ -118,9 +118,11 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
                csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
                csums_in_item /= csum_size;
 
-               if (csum_offset >= csums_in_item) {
+               if (csum_offset == csums_in_item) {
                        ret = -EFBIG;
                        goto fail;
+               } else if (csum_offset > csums_in_item) {
+                       goto fail;
                }
        }
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
@@ -728,7 +730,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        sector_sum = sums->sums;
-       trans->adding_csums = 1;
 again:
        next_offset = (u64)-1;
        found_next = 0;
@@ -899,7 +900,6 @@ next_sector:
                goto again;
        }
 out:
-       trans->adding_csums = 0;
        btrfs_free_path(path);
        return ret;
 
index 5b4ea5f55b8f47d0bf9a90992dd9da2d33880e14..ade03e6f7bd2706bbaac3406f1bcc31cf70cb578 100644 (file)
@@ -2142,6 +2142,7 @@ static long btrfs_fallocate(struct file *file, int mode,
 {
        struct inode *inode = file_inode(file);
        struct extent_state *cached_state = NULL;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 cur_offset;
        u64 last_byte;
        u64 alloc_start;
@@ -2169,6 +2170,11 @@ static long btrfs_fallocate(struct file *file, int mode,
        ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
        if (ret)
                return ret;
+       if (root->fs_info->quota_enabled) {
+               ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start);
+               if (ret)
+                       goto out_reserve_fail;
+       }
 
        /*
         * wait for ordered IO before we have any locks.  We'll loop again
@@ -2272,6 +2278,9 @@ static long btrfs_fallocate(struct file *file, int mode,
                             &cached_state, GFP_NOFS);
 out:
        mutex_unlock(&inode->i_mutex);
+       if (root->fs_info->quota_enabled)
+               btrfs_qgroup_free(root, alloc_end - alloc_start);
+out_reserve_fail:
        /* Let go of our reservation. */
        btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
        return ret;
index ca1b767d51f760672f8de72c48efbf3df8e58a75..09c58a35b429d6a82dc261014df2875e7c15f920 100644 (file)
@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode,
        int i;
        int will_compress;
        int compress_type = root->fs_info->compress_type;
+       int redirty = 0;
 
        /* if this is a small write inside eof, kick off a defrag */
        if ((end - start + 1) < 16 * 1024 &&
@@ -415,6 +416,17 @@ again:
                if (BTRFS_I(inode)->force_compress)
                        compress_type = BTRFS_I(inode)->force_compress;
 
+               /*
+                * we need to call clear_page_dirty_for_io on each
+                * page in the range.  Otherwise applications with the file
+                * mmap'd can wander in and change the page contents while
+                * we are compressing them.
+                *
+                * If the compression fails for any reason, we set the pages
+                * dirty again later on.
+                */
+               extent_range_clear_dirty_for_io(inode, start, end);
+               redirty = 1;
                ret = btrfs_compress_pages(compress_type,
                                           inode->i_mapping, start,
                                           total_compressed, pages,
@@ -554,6 +566,8 @@ cleanup_and_bail_uncompressed:
                        __set_page_dirty_nobuffers(locked_page);
                        /* unlocked later on in the async handlers */
                }
+               if (redirty)
+                       extent_range_redirty_for_io(inode, start, end);
                add_async_extent(async_cow, start, end - start + 1,
                                 0, NULL, 0, BTRFS_COMPRESS_NONE);
                *num_added += 1;
@@ -1743,8 +1757,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
        struct btrfs_ordered_sum *sum;
 
        list_for_each_entry(sum, list, list) {
+               trans->adding_csums = 1;
                btrfs_csum_file_blocks(trans,
                       BTRFS_I(inode)->root->fs_info->csum_root, sum);
+               trans->adding_csums = 0;
        }
        return 0;
 }
@@ -3679,11 +3695,9 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
         * 1 for the dir item
         * 1 for the dir index
         * 1 for the inode ref
-        * 1 for the inode ref in the tree log
-        * 2 for the dir entries in the log
         * 1 for the inode
         */
-       trans = btrfs_start_transaction(root, 8);
+       trans = btrfs_start_transaction(root, 5);
        if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
                return trans;
 
@@ -8127,7 +8141,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
         * should cover the worst case number of items we'll modify.
         */
-       trans = btrfs_start_transaction(root, 20);
+       trans = btrfs_start_transaction(root, 11);
        if (IS_ERR(trans)) {
                 ret = PTR_ERR(trans);
                 goto out_notrans;
index dc08d77b717ea47f0eb7d43e351153f556c75eaa..005c45db699eecc0fef93fca0832b91633dd5d8a 100644 (file)
@@ -557,6 +557,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
        INIT_LIST_HEAD(&splice);
        INIT_LIST_HEAD(&works);
 
+       mutex_lock(&root->fs_info->ordered_operations_mutex);
        spin_lock(&root->fs_info->ordered_extent_lock);
        list_splice_init(&root->fs_info->ordered_extents, &splice);
        while (!list_empty(&splice)) {
@@ -600,6 +601,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
 
                cond_resched();
        }
+       mutex_unlock(&root->fs_info->ordered_operations_mutex);
 }
 
 /*
index 5471e47d6559eafeb36e55035cb427710c5ee4ba..b44124dd2370ea726e66a099400163c342d8fa12 100644 (file)
@@ -1153,7 +1153,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
        ret = btrfs_find_all_roots(trans, fs_info, node->bytenr,
                                   sgn > 0 ? node->seq - 1 : node->seq, &roots);
        if (ret < 0)
-               goto out;
+               return ret;
 
        spin_lock(&fs_info->qgroup_lock);
        quota_root = fs_info->quota_root;
@@ -1275,7 +1275,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
        ret = 0;
 unlock:
        spin_unlock(&fs_info->qgroup_lock);
-out:
        ulist_free(roots);
        ulist_free(tmp);
 
index 53c3501fa4ca348f13cb17617b363643a59b1f6f..85e072b956d564d64c527c1b58d3b5b26019c34f 100644 (file)
@@ -542,7 +542,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
        eb = path->nodes[0];
        ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
        item_size = btrfs_item_size_nr(eb, path->slots[0]);
-       btrfs_release_path(path);
 
        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                do {
@@ -558,7 +557,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
                                ret < 0 ? -1 : ref_level,
                                ret < 0 ? -1 : ref_root);
                } while (ret != 1);
+               btrfs_release_path(path);
        } else {
+               btrfs_release_path(path);
                swarn.path = path;
                swarn.dev = dev;
                iterate_extent_inodes(fs_info, found_key.objectid,
index f7a8b861058b5234094d24e9fc35a49d9e78fe1c..c85e7c6b4598af950d240582f648224570889ca4 100644 (file)
@@ -3945,12 +3945,10 @@ static int is_extent_unchanged(struct send_ctx *sctx,
                    found_key.type != key.type) {
                        key.offset += right_len;
                        break;
-               } else {
-                       if (found_key.offset != key.offset + right_len) {
-                               /* Should really not happen */
-                               ret = -EIO;
-                               goto out;
-                       }
+               }
+               if (found_key.offset != key.offset + right_len) {
+                       ret = 0;
+                       goto out;
                }
                key = found_key;
        }
index 5989a92236f7f1b578ec3ccf88078bb69c1093d3..2854c824ab6443d04eac19cf0b540f79c257cfa0 100644 (file)
@@ -4935,7 +4935,18 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        em = lookup_extent_mapping(em_tree, chunk_start, 1);
        read_unlock(&em_tree->lock);
 
-       BUG_ON(!em || em->start != chunk_start);
+       if (!em) {
+               printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n",
+                      chunk_start);
+               return -EIO;
+       }
+
+       if (em->start != chunk_start) {
+               printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n",
+                      em->start, chunk_start);
+               free_extent_map(em);
+               return -EIO;
+       }
        map = (struct map_lookup *)em->bdev;
 
        length = em->len;
index cfd1ce34e0bc7b8c4794c81e1aa937e7f009ca75..1d36db114772b953065555cd7e80aa5abca36a7d 100644 (file)
@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length,
                }
        }
 
-       /* mechlistMIC */
-       if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
-               /* Check if we have reached the end of the blob, but with
-                  no mechListMic (e.g. NTLMSSP instead of KRB5) */
-               if (ctx.error == ASN1_ERR_DEC_EMPTY)
-                       goto decode_negtoken_exit;
-               cFYI(1, "Error decoding last part negTokenInit exit3");
-               return 0;
-       } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
-               /* tag = 3 indicating mechListMIC */
-               cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)",
-                       cls, con, tag, end, *end);
-               return 0;
-       }
-
-       /* sequence */
-       if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
-               cFYI(1, "Error decoding last part negTokenInit exit5");
-               return 0;
-       } else if ((cls != ASN1_UNI) || (con != ASN1_CON)
-                  || (tag != ASN1_SEQ)) {
-               cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)",
-                       cls, con, tag, end, *end);
-       }
-
-       /* sequence of */
-       if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
-               cFYI(1, "Error decoding last part negTokenInit exit 7");
-               return 0;
-       } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) {
-               cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)",
-                       cls, con, tag, end, *end);
-               return 0;
-       }
-
-       /* general string */
-       if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
-               cFYI(1, "Error decoding last part negTokenInit exit9");
-               return 0;
-       } else if ((cls != ASN1_UNI) || (con != ASN1_PRI)
-                  || (tag != ASN1_GENSTR)) {
-               cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)",
-                       cls, con, tag, end, *end);
-               return 0;
-       }
-       cFYI(1, "Need to call asn1_octets_decode() function for %s",
-               ctx.pointer);   /* is this UTF-8 or ASCII? */
-decode_negtoken_exit:
+       /*
+        * We currently ignore anything at the end of the SPNEGO blob after
+        * the mechTypes have been parsed, since none of that info is
+        * used at the moment.
+        */
        return 1;
 }
index 3cf8a15af91607bf000467e3c7c166337a8148f0..345fc89c4286985ed55cd972410c287a09c440f3 100644 (file)
@@ -91,6 +91,30 @@ struct workqueue_struct      *cifsiod_wq;
 __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE];
 #endif
 
+/*
+ * Bumps refcount for cifs super block.
+ * Note that it should be only called if a referece to VFS super block is
+ * already held, e.g. in open-type syscalls context. Otherwise it can race with
+ * atomic_dec_and_test in deactivate_locked_super.
+ */
+void
+cifs_sb_active(struct super_block *sb)
+{
+       struct cifs_sb_info *server = CIFS_SB(sb);
+
+       if (atomic_inc_return(&server->active) == 1)
+               atomic_inc(&sb->s_active);
+}
+
+void
+cifs_sb_deactive(struct super_block *sb)
+{
+       struct cifs_sb_info *server = CIFS_SB(sb);
+
+       if (atomic_dec_and_test(&server->active))
+               deactivate_super(sb);
+}
+
 static int
 cifs_read_super(struct super_block *sb)
 {
index 7163419cecd95186f912522ba1736eb4e8c95454..0e32c3446ce9330b21898c59e72bd5430c47e48b 100644 (file)
@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type;
 extern const struct address_space_operations cifs_addr_ops;
 extern const struct address_space_operations cifs_addr_ops_smallbuf;
 
+/* Functions related to super block operations */
+extern void cifs_sb_active(struct super_block *sb);
+extern void cifs_sb_deactive(struct super_block *sb);
+
 /* Functions related to inodes */
 extern const struct inode_operations cifs_dir_inode_ops;
 extern struct inode *cifs_root_iget(struct super_block *);
index 8c0d8557731443ffb564905bb1ad0a9c64b8169b..7a0dd99e45077dfd2f7f780c140954575c9145a6 100644 (file)
@@ -300,6 +300,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
        INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
        mutex_init(&cfile->fh_mutex);
 
+       cifs_sb_active(inode->i_sb);
+
        /*
         * If the server returned a read oplock and we have mandatory brlocks,
         * set oplock level to None.
@@ -349,7 +351,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
-       struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+       struct super_block *sb = inode->i_sb;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
        struct cifsLockInfo *li, *tmp;
        struct cifs_fid fid;
        struct cifs_pending_open open;
@@ -414,6 +417,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 
        cifs_put_tlink(cifs_file->tlink);
        dput(cifs_file->dentry);
+       cifs_sb_deactive(sb);
        kfree(cifs_file);
 }
 
index 0079696305c980def5c17cc1906f75d20f3565c4..20887bf63121286e3d52f582db922ec64f4a71e5 100644 (file)
@@ -1043,7 +1043,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
                                   cifs_sb->mnt_cifs_flags &
                                            CIFS_MOUNT_MAP_SPECIAL_CHR);
        if (rc != 0) {
-               rc = -ETXTBSY;
+               rc = -EBUSY;
                goto undo_setattr;
        }
 
@@ -1062,7 +1062,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry,
                if (rc == -ENOENT)
                        rc = 0;
                else if (rc != 0) {
-                       rc = -ETXTBSY;
+                       rc = -EBUSY;
                        goto undo_rename;
                }
                cifsInode->delete_pending = true;
@@ -1169,15 +1169,13 @@ psx_del_no_retry:
                        cifs_drop_nlink(inode);
        } else if (rc == -ENOENT) {
                d_drop(dentry);
-       } else if (rc == -ETXTBSY) {
+       } else if (rc == -EBUSY) {
                if (server->ops->rename_pending_delete) {
                        rc = server->ops->rename_pending_delete(full_path,
                                                                dentry, xid);
                        if (rc == 0)
                                cifs_drop_nlink(inode);
                }
-               if (rc == -ETXTBSY)
-                       rc = -EBUSY;
        } else if ((rc == -EACCES) && (dosattr == 0) && inode) {
                attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
                if (attrs == NULL) {
@@ -1518,7 +1516,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
         * source. Note that cross directory moves do not work with
         * rename by filehandle to various Windows servers.
         */
-       if (rc == 0 || rc != -ETXTBSY)
+       if (rc == 0 || rc != -EBUSY)
                goto do_rename_exit;
 
        /* open-file renames don't work across directories */
index a82bc51fdc82abad102b286399fd037ef10428c5..c0b25b28be6ccc1079185bc3effd6672c56976de 100644 (file)
@@ -62,7 +62,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
        {ERRdiffdevice, -EXDEV},
        {ERRnofiles, -ENOENT},
        {ERRwriteprot, -EROFS},
-       {ERRbadshare, -ETXTBSY},
+       {ERRbadshare, -EBUSY},
        {ERRlock, -EACCES},
        {ERRunsup, -EINVAL},
        {ERRnosuchshare, -ENXIO},
index fbfae008ba44e3e8223f42d4823cfeae2a673ac9..e8bc3420d63edcc28f0992217c860082fe424261 100644 (file)
@@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path,
        bool slash = false;
        int error = 0;
 
-       br_read_lock(&vfsmount_lock);
        while (dentry != root->dentry || vfsmnt != root->mnt) {
                struct dentry * parent;
 
@@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path,
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
-out:
-       br_read_unlock(&vfsmount_lock);
        return error;
 
 global_root:
@@ -2590,7 +2587,7 @@ global_root:
                error = prepend(buffer, buflen, "/", 1);
        if (!error)
                error = is_mounted(vfsmnt) ? 1 : 2;
-       goto out;
+       return error;
 }
 
 /**
@@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path,
        int error;
 
        prepend(&res, &buflen, "\0", 1);
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
+       br_read_unlock(&vfsmount_lock);
 
        if (error < 0)
                return ERR_PTR(error);
@@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path,
        int error;
 
        prepend(&res, &buflen, "\0", 1);
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        error = prepend_path(path, &root, &res, &buflen);
        write_sequnlock(&rename_lock);
+       br_read_unlock(&vfsmount_lock);
 
        if (error > 1)
                error = -EINVAL;
@@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen)
                return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
 
        get_fs_root(current->fs, &root);
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        error = path_with_deleted(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+       br_read_unlock(&vfsmount_lock);
        if (error < 0)
                res = ERR_PTR(error);
-       write_sequnlock(&rename_lock);
        path_put(&root);
        return res;
 }
@@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        get_fs_root_and_pwd(current->fs, &root, &pwd);
 
        error = -ENOENT;
+       br_read_lock(&vfsmount_lock);
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
@@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
                prepend(&cwd, &buflen, "\0", 1);
                error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
+               br_read_unlock(&vfsmount_lock);
 
                if (error < 0)
                        goto out;
@@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
                }
        } else {
                write_sequnlock(&rename_lock);
+               br_read_unlock(&vfsmount_lock);
        }
 
 out:
index 4a01ba315262259241081ec695c1f1d3d179eb32..3b83cd6047964ab7218aeb26ab73bce7be472430 100644 (file)
@@ -335,9 +335,9 @@ struct ext4_group_desc
  */
 
 struct flex_groups {
-       atomic_t free_inodes;
-       atomic_t free_clusters;
-       atomic_t used_dirs;
+       atomic64_t      free_clusters;
+       atomic_t        free_inodes;
+       atomic_t        used_dirs;
 };
 
 #define EXT4_BG_INODE_UNINIT   0x0001 /* Inode table/bitmap not in use */
@@ -2617,7 +2617,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
 extern int __init ext4_init_pageio(void);
 extern void ext4_add_complete_io(ext4_io_end_t *io_end);
 extern void ext4_exit_pageio(void);
-extern void ext4_ioend_wait(struct inode *);
+extern void ext4_ioend_shutdown(struct inode *);
 extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
 extern void ext4_end_io_work(struct work_struct *work);
index 28dd8eeea6a93f2ce5ae727777ede13582e2038f..56efcaadf8485a5887ae04493b9cacf28b5c73ef 100644 (file)
@@ -1584,10 +1584,12 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
        unsigned short ext1_ee_len, ext2_ee_len, max_len;
 
        /*
-        * Make sure that either both extents are uninitialized, or
-        * both are _not_.
+        * Make sure that both extents are initialized. We don't merge
+        * uninitialized extents so that we can be sure that end_io code has
+        * the extent that was written properly split out and conversion to
+        * initialized is trivial.
         */
-       if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
+       if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
                return 0;
 
        if (ext4_ext_is_uninitialized(ex1))
@@ -2923,7 +2925,7 @@ static int ext4_split_extent_at(handle_t *handle,
 {
        ext4_fsblk_t newblock;
        ext4_lblk_t ee_block;
-       struct ext4_extent *ex, newex, orig_ex;
+       struct ext4_extent *ex, newex, orig_ex, zero_ex;
        struct ext4_extent *ex2 = NULL;
        unsigned int ee_len, depth;
        int err = 0;
@@ -2943,6 +2945,10 @@ static int ext4_split_extent_at(handle_t *handle,
        newblock = split - ee_block + ext4_ext_pblock(ex);
 
        BUG_ON(split < ee_block || split >= (ee_block + ee_len));
+       BUG_ON(!ext4_ext_is_uninitialized(ex) &&
+              split_flag & (EXT4_EXT_MAY_ZEROOUT |
+                            EXT4_EXT_MARK_UNINIT1 |
+                            EXT4_EXT_MARK_UNINIT2));
 
        err = ext4_ext_get_access(handle, inode, path + depth);
        if (err)
@@ -2990,12 +2996,26 @@ static int ext4_split_extent_at(handle_t *handle,
        err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
        if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
                if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
-                       if (split_flag & EXT4_EXT_DATA_VALID1)
+                       if (split_flag & EXT4_EXT_DATA_VALID1) {
                                err = ext4_ext_zeroout(inode, ex2);
-                       else
+                               zero_ex.ee_block = ex2->ee_block;
+                               zero_ex.ee_len = ext4_ext_get_actual_len(ex2);
+                               ext4_ext_store_pblock(&zero_ex,
+                                                     ext4_ext_pblock(ex2));
+                       } else {
                                err = ext4_ext_zeroout(inode, ex);
-               } else
+                               zero_ex.ee_block = ex->ee_block;
+                               zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+                               ext4_ext_store_pblock(&zero_ex,
+                                                     ext4_ext_pblock(ex));
+                       }
+               } else {
                        err = ext4_ext_zeroout(inode, &orig_ex);
+                       zero_ex.ee_block = orig_ex.ee_block;
+                       zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex);
+                       ext4_ext_store_pblock(&zero_ex,
+                                             ext4_ext_pblock(&orig_ex));
+               }
 
                if (err)
                        goto fix_extent_len;
@@ -3003,6 +3023,12 @@ static int ext4_split_extent_at(handle_t *handle,
                ex->ee_len = cpu_to_le16(ee_len);
                ext4_ext_try_to_merge(handle, inode, path, ex);
                err = ext4_ext_dirty(handle, inode, path + path->p_depth);
+               if (err)
+                       goto fix_extent_len;
+
+               /* update extent status tree */
+               err = ext4_es_zeroout(inode, &zero_ex);
+
                goto out;
        } else if (err)
                goto fix_extent_len;
@@ -3041,6 +3067,7 @@ static int ext4_split_extent(handle_t *handle,
        int err = 0;
        int uninitialized;
        int split_flag1, flags1;
+       int allocated = map->m_len;
 
        depth = ext_depth(inode);
        ex = path[depth].p_ext;
@@ -3060,20 +3087,29 @@ static int ext4_split_extent(handle_t *handle,
                                map->m_lblk + map->m_len, split_flag1, flags1);
                if (err)
                        goto out;
+       } else {
+               allocated = ee_len - (map->m_lblk - ee_block);
        }
-
+       /*
+        * Update path is required because previous ext4_split_extent_at() may
+        * result in split of original leaf or extent zeroout.
+        */
        ext4_ext_drop_refs(path);
        path = ext4_ext_find_extent(inode, map->m_lblk, path);
        if (IS_ERR(path))
                return PTR_ERR(path);
+       depth = ext_depth(inode);
+       ex = path[depth].p_ext;
+       uninitialized = ext4_ext_is_uninitialized(ex);
+       split_flag1 = 0;
 
        if (map->m_lblk >= ee_block) {
-               split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
-                                           EXT4_EXT_DATA_VALID2);
-               if (uninitialized)
+               split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
+               if (uninitialized) {
                        split_flag1 |= EXT4_EXT_MARK_UNINIT1;
-               if (split_flag & EXT4_EXT_MARK_UNINIT2)
-                       split_flag1 |= EXT4_EXT_MARK_UNINIT2;
+                       split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
+                                                    EXT4_EXT_MARK_UNINIT2);
+               }
                err = ext4_split_extent_at(handle, inode, path,
                                map->m_lblk, split_flag1, flags);
                if (err)
@@ -3082,7 +3118,7 @@ static int ext4_split_extent(handle_t *handle,
 
        ext4_ext_show_leaf(inode, path);
 out:
-       return err ? err : map->m_len;
+       return err ? err : allocated;
 }
 
 /*
@@ -3137,6 +3173,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        ee_block = le32_to_cpu(ex->ee_block);
        ee_len = ext4_ext_get_actual_len(ex);
        allocated = ee_len - (map->m_lblk - ee_block);
+       zero_ex.ee_len = 0;
 
        trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
 
@@ -3227,13 +3264,16 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
 
        if (EXT4_EXT_MAY_ZEROOUT & split_flag)
                max_zeroout = sbi->s_extent_max_zeroout_kb >>
-                       inode->i_sb->s_blocksize_bits;
+                       (inode->i_sb->s_blocksize_bits - 10);
 
        /* If extent is less than s_max_zeroout_kb, zeroout directly */
        if (max_zeroout && (ee_len <= max_zeroout)) {
                err = ext4_ext_zeroout(inode, ex);
                if (err)
                        goto out;
+               zero_ex.ee_block = ex->ee_block;
+               zero_ex.ee_len = ext4_ext_get_actual_len(ex);
+               ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
 
                err = ext4_ext_get_access(handle, inode, path + depth);
                if (err)
@@ -3292,6 +3332,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                err = allocated;
 
 out:
+       /* If we have gotten a failure, don't zero out status tree */
+       if (!err)
+               err = ext4_es_zeroout(inode, &zero_ex);
        return err ? err : allocated;
 }
 
@@ -3374,8 +3417,19 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
                "block %llu, max_blocks %u\n", inode->i_ino,
                  (unsigned long long)ee_block, ee_len);
 
-       /* If extent is larger than requested then split is required */
+       /* If extent is larger than requested it is a clear sign that we still
+        * have some extent state machine issues left. So extent_split is still
+        * required.
+        * TODO: Once all related issues will be fixed this situation should be
+        * illegal.
+        */
        if (ee_block != map->m_lblk || ee_len > map->m_len) {
+#ifdef EXT4_DEBUG
+               ext4_warning("Inode (%ld) finished: extent logical block %llu,"
+                            " len %u; IO logical block %llu, len %u\n",
+                            inode->i_ino, (unsigned long long)ee_block, ee_len,
+                            (unsigned long long)map->m_lblk, map->m_len);
+#endif
                err = ext4_split_unwritten_extents(handle, inode, map, path,
                                                   EXT4_GET_BLOCKS_CONVERT);
                if (err < 0)
@@ -3626,6 +3680,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                                                 path, map->m_len);
                } else
                        err = ret;
+               map->m_flags |= EXT4_MAP_MAPPED;
+               if (allocated > map->m_len)
+                       allocated = map->m_len;
+               map->m_len = allocated;
                goto out2;
        }
        /* buffered IO case */
@@ -3675,6 +3733,7 @@ out:
                                        allocated - map->m_len);
                allocated = map->m_len;
        }
+       map->m_len = allocated;
 
        /*
         * If we have done fallocate with the offset that is already
@@ -4106,9 +4165,6 @@ got_allocated_blocks:
                        }
                } else {
                        BUG_ON(allocated_clusters < reserved_clusters);
-                       /* We will claim quota for all newly allocated blocks.*/
-                       ext4_da_update_reserve_space(inode, allocated_clusters,
-                                                       1);
                        if (reserved_clusters < allocated_clusters) {
                                struct ext4_inode_info *ei = EXT4_I(inode);
                                int reservation = allocated_clusters -
@@ -4159,6 +4215,15 @@ got_allocated_blocks:
                                ei->i_reserved_data_blocks += reservation;
                                spin_unlock(&ei->i_block_reservation_lock);
                        }
+                       /*
+                        * We will claim quota for all newly allocated blocks.
+                        * We're updating the reserved space *after* the
+                        * correction above so we do not accidentally free
+                        * all the metadata reservation because we might
+                        * actually need it later on.
+                        */
+                       ext4_da_update_reserve_space(inode, allocated_clusters,
+                                                       1);
                }
        }
 
@@ -4368,8 +4433,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (len <= EXT_UNINIT_MAX_LEN << blkbits)
                flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
 
-       /* Prevent race condition between unwritten */
-       ext4_flush_unwritten_io(inode);
 retry:
        while (ret >= 0 && ret < max_blocks) {
                map.m_lblk = map.m_lblk + ret;
index 95796a1b7522b7e02dd72dfab15fbf2cebdb6b13..fe3337a85edeaecd6135333759173d8c6fbc0e78 100644 (file)
@@ -333,17 +333,27 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
 static int ext4_es_can_be_merged(struct extent_status *es1,
                                 struct extent_status *es2)
 {
-       if (es1->es_lblk + es1->es_len != es2->es_lblk)
+       if (ext4_es_status(es1) != ext4_es_status(es2))
                return 0;
 
-       if (ext4_es_status(es1) != ext4_es_status(es2))
+       if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL)
                return 0;
 
-       if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
-           (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2)))
+       if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk)
                return 0;
 
-       return 1;
+       if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) &&
+           (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2)))
+               return 1;
+
+       if (ext4_es_is_hole(es1))
+               return 1;
+
+       /* we need to check delayed extent is without unwritten status */
+       if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1))
+               return 1;
+
+       return 0;
 }
 
 static struct extent_status *
@@ -389,6 +399,179 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
        return es;
 }
 
+#ifdef ES_AGGRESSIVE_TEST
+static void ext4_es_insert_extent_ext_check(struct inode *inode,
+                                           struct extent_status *es)
+{
+       struct ext4_ext_path *path = NULL;
+       struct ext4_extent *ex;
+       ext4_lblk_t ee_block;
+       ext4_fsblk_t ee_start;
+       unsigned short ee_len;
+       int depth, ee_status, es_status;
+
+       path = ext4_ext_find_extent(inode, es->es_lblk, NULL);
+       if (IS_ERR(path))
+               return;
+
+       depth = ext_depth(inode);
+       ex = path[depth].p_ext;
+
+       if (ex) {
+
+               ee_block = le32_to_cpu(ex->ee_block);
+               ee_start = ext4_ext_pblock(ex);
+               ee_len = ext4_ext_get_actual_len(ex);
+
+               ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0;
+               es_status = ext4_es_is_unwritten(es) ? 1 : 0;
+
+               /*
+                * Make sure ex and es are not overlap when we try to insert
+                * a delayed/hole extent.
+                */
+               if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) {
+                       if (in_range(es->es_lblk, ee_block, ee_len)) {
+                               pr_warn("ES insert assertation failed for "
+                                       "inode: %lu we can find an extent "
+                                       "at block [%d/%d/%llu/%c], but we "
+                                       "want to add an delayed/hole extent "
+                                       "[%d/%d/%llu/%llx]\n",
+                                       inode->i_ino, ee_block, ee_len,
+                                       ee_start, ee_status ? 'u' : 'w',
+                                       es->es_lblk, es->es_len,
+                                       ext4_es_pblock(es), ext4_es_status(es));
+                       }
+                       goto out;
+               }
+
+               /*
+                * We don't check ee_block == es->es_lblk, etc. because es
+                * might be a part of whole extent, vice versa.
+                */
+               if (es->es_lblk < ee_block ||
+                   ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) {
+                       pr_warn("ES insert assertation failed for inode: %lu "
+                               "ex_status [%d/%d/%llu/%c] != "
+                               "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
+                               ee_block, ee_len, ee_start,
+                               ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
+                               ext4_es_pblock(es), es_status ? 'u' : 'w');
+                       goto out;
+               }
+
+               if (ee_status ^ es_status) {
+                       pr_warn("ES insert assertation failed for inode: %lu "
+                               "ex_status [%d/%d/%llu/%c] != "
+                               "es_status [%d/%d/%llu/%c]\n", inode->i_ino,
+                               ee_block, ee_len, ee_start,
+                               ee_status ? 'u' : 'w', es->es_lblk, es->es_len,
+                               ext4_es_pblock(es), es_status ? 'u' : 'w');
+               }
+       } else {
+               /*
+                * We can't find an extent on disk.  So we need to make sure
+                * that we don't want to add an written/unwritten extent.
+                */
+               if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) {
+                       pr_warn("ES insert assertation failed for inode: %lu "
+                               "can't find an extent at block %d but we want "
+                               "to add an written/unwritten extent "
+                               "[%d/%d/%llu/%llx]\n", inode->i_ino,
+                               es->es_lblk, es->es_lblk, es->es_len,
+                               ext4_es_pblock(es), ext4_es_status(es));
+               }
+       }
+out:
+       if (path) {
+               ext4_ext_drop_refs(path);
+               kfree(path);
+       }
+}
+
+static void ext4_es_insert_extent_ind_check(struct inode *inode,
+                                           struct extent_status *es)
+{
+       struct ext4_map_blocks map;
+       int retval;
+
+       /*
+        * Here we call ext4_ind_map_blocks to lookup a block mapping because
+        * 'Indirect' structure is defined in indirect.c.  So we couldn't
+        * access direct/indirect tree from outside.  It is too dirty to define
+        * this function in indirect.c file.
+        */
+
+       map.m_lblk = es->es_lblk;
+       map.m_len = es->es_len;
+
+       retval = ext4_ind_map_blocks(NULL, inode, &map, 0);
+       if (retval > 0) {
+               if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) {
+                       /*
+                        * We want to add a delayed/hole extent but this
+                        * block has been allocated.
+                        */
+                       pr_warn("ES insert assertation failed for inode: %lu "
+                               "We can find blocks but we want to add a "
+                               "delayed/hole extent [%d/%d/%llu/%llx]\n",
+                               inode->i_ino, es->es_lblk, es->es_len,
+                               ext4_es_pblock(es), ext4_es_status(es));
+                       return;
+               } else if (ext4_es_is_written(es)) {
+                       if (retval != es->es_len) {
+                               pr_warn("ES insert assertation failed for "
+                                       "inode: %lu retval %d != es_len %d\n",
+                                       inode->i_ino, retval, es->es_len);
+                               return;
+                       }
+                       if (map.m_pblk != ext4_es_pblock(es)) {
+                               pr_warn("ES insert assertation failed for "
+                                       "inode: %lu m_pblk %llu != "
+                                       "es_pblk %llu\n",
+                                       inode->i_ino, map.m_pblk,
+                                       ext4_es_pblock(es));
+                               return;
+                       }
+               } else {
+                       /*
+                        * We don't need to check unwritten extent because
+                        * indirect-based file doesn't have it.
+                        */
+                       BUG_ON(1);
+               }
+       } else if (retval == 0) {
+               if (ext4_es_is_written(es)) {
+                       pr_warn("ES insert assertation failed for inode: %lu "
+                               "We can't find the block but we want to add "
+                               "an written extent [%d/%d/%llu/%llx]\n",
+                               inode->i_ino, es->es_lblk, es->es_len,
+                               ext4_es_pblock(es), ext4_es_status(es));
+                       return;
+               }
+       }
+}
+
+static inline void ext4_es_insert_extent_check(struct inode *inode,
+                                              struct extent_status *es)
+{
+       /*
+        * We don't need to worry about the race condition because
+        * caller takes i_data_sem locking.
+        */
+       BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
+       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+               ext4_es_insert_extent_ext_check(inode, es);
+       else
+               ext4_es_insert_extent_ind_check(inode, es);
+}
+#else
+static inline void ext4_es_insert_extent_check(struct inode *inode,
+                                              struct extent_status *es)
+{
+}
+#endif
+
 static int __es_insert_extent(struct inode *inode, struct extent_status *newes)
 {
        struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree;
@@ -471,6 +654,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
        ext4_es_store_status(&newes, status);
        trace_ext4_es_insert_extent(inode, &newes);
 
+       ext4_es_insert_extent_check(inode, &newes);
+
        write_lock(&EXT4_I(inode)->i_es_lock);
        err = __es_remove_extent(inode, lblk, end);
        if (err != 0)
@@ -669,6 +854,23 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
        return err;
 }
 
+int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
+{
+       ext4_lblk_t  ee_block;
+       ext4_fsblk_t ee_pblock;
+       unsigned int ee_len;
+
+       ee_block  = le32_to_cpu(ex->ee_block);
+       ee_len    = ext4_ext_get_actual_len(ex);
+       ee_pblock = ext4_ext_pblock(ex);
+
+       if (ee_len == 0)
+               return 0;
+
+       return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
+                                    EXTENT_STATUS_WRITTEN);
+}
+
 static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
 {
        struct ext4_sb_info *sbi = container_of(shrink,
index f190dfe969dacc59e34d15f03e2a7c5e3c5ef5e2..d8e2d4dc311e62c99843fa16c7fc8d4a0798bddb 100644 (file)
 #define es_debug(fmt, ...)     no_printk(fmt, ##__VA_ARGS__)
 #endif
 
+/*
+ * With ES_AGGRESSIVE_TEST defined, the result of es caching will be
+ * checked with old map_block's result.
+ */
+#define ES_AGGRESSIVE_TEST__
+
 /*
  * These flags live in the high bits of extent_status.es_pblk
  */
@@ -33,6 +39,8 @@
                                 EXTENT_STATUS_DELAYED | \
                                 EXTENT_STATUS_HOLE)
 
+struct ext4_extent;
+
 struct extent_status {
        struct rb_node rb_node;
        ext4_lblk_t es_lblk;    /* first logical block extent covers */
@@ -58,6 +66,7 @@ extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
                                        struct extent_status *es);
 extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
                                 struct extent_status *es);
+extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex);
 
 static inline int ext4_es_is_written(struct extent_status *es)
 {
index 32fd2b9075dd4029925d11adb0b84df1a399a8b1..6c5bb8d993fe8ebb07ae48dab2697e247c78ea01 100644 (file)
@@ -324,8 +324,8 @@ error_return:
 }
 
 struct orlov_stats {
+       __u64 free_clusters;
        __u32 free_inodes;
-       __u32 free_clusters;
        __u32 used_dirs;
 };
 
@@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
 
        if (flex_size > 1) {
                stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
-               stats->free_clusters = atomic_read(&flex_group[g].free_clusters);
+               stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
                stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
                return;
        }
index 9ea0cde3fa9e0ffe7aebc28940293c422ae75a63..b3a5213bc73eac2082cbfa16bad66bba89c32ed4 100644 (file)
@@ -185,8 +185,6 @@ void ext4_evict_inode(struct inode *inode)
 
        trace_ext4_evict_inode(inode);
 
-       ext4_ioend_wait(inode);
-
        if (inode->i_nlink) {
                /*
                 * When journalling data dirty buffers are tracked only in the
@@ -207,7 +205,8 @@ void ext4_evict_inode(struct inode *inode)
                 * don't use page cache.
                 */
                if (ext4_should_journal_data(inode) &&
-                   (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
+                   (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
+                   inode->i_ino != EXT4_JOURNAL_INO) {
                        journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
                        tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
 
@@ -216,6 +215,7 @@ void ext4_evict_inode(struct inode *inode)
                        filemap_write_and_wait(&inode->i_data);
                }
                truncate_inode_pages(&inode->i_data, 0);
+               ext4_ioend_shutdown(inode);
                goto no_delete;
        }
 
@@ -225,6 +225,7 @@ void ext4_evict_inode(struct inode *inode)
        if (ext4_should_order_data(inode))
                ext4_begin_ordered_truncate(inode, 0);
        truncate_inode_pages(&inode->i_data, 0);
+       ext4_ioend_shutdown(inode);
 
        if (is_bad_inode(inode))
                goto no_delete;
@@ -482,6 +483,58 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
        return num;
 }
 
+#ifdef ES_AGGRESSIVE_TEST
+static void ext4_map_blocks_es_recheck(handle_t *handle,
+                                      struct inode *inode,
+                                      struct ext4_map_blocks *es_map,
+                                      struct ext4_map_blocks *map,
+                                      int flags)
+{
+       int retval;
+
+       map->m_flags = 0;
+       /*
+        * There is a race window that the result is not the same.
+        * e.g. xfstests #223 when dioread_nolock enables.  The reason
+        * is that we lookup a block mapping in extent status tree with
+        * out taking i_data_sem.  So at the time the unwritten extent
+        * could be converted.
+        */
+       if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+               down_read((&EXT4_I(inode)->i_data_sem));
+       if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+               retval = ext4_ext_map_blocks(handle, inode, map, flags &
+                                            EXT4_GET_BLOCKS_KEEP_SIZE);
+       } else {
+               retval = ext4_ind_map_blocks(handle, inode, map, flags &
+                                            EXT4_GET_BLOCKS_KEEP_SIZE);
+       }
+       if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
+               up_read((&EXT4_I(inode)->i_data_sem));
+       /*
+        * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
+        * because it shouldn't be marked in es_map->m_flags.
+        */
+       map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
+
+       /*
+        * We don't check m_len because extent will be collpased in status
+        * tree.  So the m_len might not equal.
+        */
+       if (es_map->m_lblk != map->m_lblk ||
+           es_map->m_flags != map->m_flags ||
+           es_map->m_pblk != map->m_pblk) {
+               printk("ES cache assertation failed for inode: %lu "
+                      "es_cached ex [%d/%d/%llu/%x] != "
+                      "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
+                      inode->i_ino, es_map->m_lblk, es_map->m_len,
+                      es_map->m_pblk, es_map->m_flags, map->m_lblk,
+                      map->m_len, map->m_pblk, map->m_flags,
+                      retval, flags);
+       }
+}
+#endif /* ES_AGGRESSIVE_TEST */
+
 /*
  * The ext4_map_blocks() function tries to look up the requested blocks,
  * and returns if the blocks are already mapped.
@@ -509,6 +562,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
 {
        struct extent_status es;
        int retval;
+#ifdef ES_AGGRESSIVE_TEST
+       struct ext4_map_blocks orig_map;
+
+       memcpy(&orig_map, map, sizeof(*map));
+#endif
 
        map->m_flags = 0;
        ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
@@ -531,6 +589,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                } else {
                        BUG_ON(1);
                }
+#ifdef ES_AGGRESSIVE_TEST
+               ext4_map_blocks_es_recheck(handle, inode, map,
+                                          &orig_map, flags);
+#endif
                goto found;
        }
 
@@ -551,6 +613,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
                int ret;
                unsigned long long status;
 
+#ifdef ES_AGGRESSIVE_TEST
+               if (retval != map->m_len) {
+                       printk("ES len assertation failed for inode: %lu "
+                              "retval %d != map->m_len %d "
+                              "in %s (lookup)\n", inode->i_ino, retval,
+                              map->m_len, __func__);
+               }
+#endif
+
                status = map->m_flags & EXT4_MAP_UNWRITTEN ?
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
                if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
@@ -643,6 +714,24 @@ found:
                int ret;
                unsigned long long status;
 
+#ifdef ES_AGGRESSIVE_TEST
+               if (retval != map->m_len) {
+                       printk("ES len assertation failed for inode: %lu "
+                              "retval %d != map->m_len %d "
+                              "in %s (allocation)\n", inode->i_ino, retval,
+                              map->m_len, __func__);
+               }
+#endif
+
+               /*
+                * If the extent has been zeroed out, we don't need to update
+                * extent status tree.
+                */
+               if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
+                   ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
+                       if (ext4_es_is_written(&es))
+                               goto has_zeroout;
+               }
                status = map->m_flags & EXT4_MAP_UNWRITTEN ?
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
                if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
@@ -655,6 +744,7 @@ found:
                        retval = ret;
        }
 
+has_zeroout:
        up_write((&EXT4_I(inode)->i_data_sem));
        if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
                int ret = check_block_validity(inode, map);
@@ -1215,6 +1305,55 @@ static int ext4_journalled_write_end(struct file *file,
        return ret ? ret : copied;
 }
 
+/*
+ * Reserve a metadata for a single block located at lblock
+ */
+static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
+{
+       int retries = 0;
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       struct ext4_inode_info *ei = EXT4_I(inode);
+       unsigned int md_needed;
+       ext4_lblk_t save_last_lblock;
+       int save_len;
+
+       /*
+        * recalculate the amount of metadata blocks to reserve
+        * in order to allocate nrblocks
+        * worse case is one extent per block
+        */
+repeat:
+       spin_lock(&ei->i_block_reservation_lock);
+       /*
+        * ext4_calc_metadata_amount() has side effects, which we have
+        * to be prepared undo if we fail to claim space.
+        */
+       save_len = ei->i_da_metadata_calc_len;
+       save_last_lblock = ei->i_da_metadata_calc_last_lblock;
+       md_needed = EXT4_NUM_B2C(sbi,
+                                ext4_calc_metadata_amount(inode, lblock));
+       trace_ext4_da_reserve_space(inode, md_needed);
+
+       /*
+        * We do still charge estimated metadata to the sb though;
+        * we cannot afford to run out of free blocks.
+        */
+       if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
+               ei->i_da_metadata_calc_len = save_len;
+               ei->i_da_metadata_calc_last_lblock = save_last_lblock;
+               spin_unlock(&ei->i_block_reservation_lock);
+               if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+                       cond_resched();
+                       goto repeat;
+               }
+               return -ENOSPC;
+       }
+       ei->i_reserved_meta_blocks += md_needed;
+       spin_unlock(&ei->i_block_reservation_lock);
+
+       return 0;       /* success */
+}
+
 /*
  * Reserve a single cluster located at lblock
  */
@@ -1263,7 +1402,7 @@ repeat:
                ei->i_da_metadata_calc_last_lblock = save_last_lblock;
                spin_unlock(&ei->i_block_reservation_lock);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
-                       yield();
+                       cond_resched();
                        goto repeat;
                }
                dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
@@ -1768,6 +1907,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
        struct extent_status es;
        int retval;
        sector_t invalid_block = ~((sector_t) 0xffff);
+#ifdef ES_AGGRESSIVE_TEST
+       struct ext4_map_blocks orig_map;
+
+       memcpy(&orig_map, map, sizeof(*map));
+#endif
 
        if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
                invalid_block = ~0;
@@ -1809,6 +1953,9 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
                else
                        BUG_ON(1);
 
+#ifdef ES_AGGRESSIVE_TEST
+               ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
+#endif
                return retval;
        }
 
@@ -1843,8 +1990,11 @@ add_delayed:
                 * XXX: __block_prepare_write() unmaps passed block,
                 * is it OK?
                 */
-               /* If the block was allocated from previously allocated cluster,
-                * then we dont need to reserve it again. */
+               /*
+                * If the block was allocated from previously allocated cluster,
+                * then we don't need to reserve it again. However we still need
+                * to reserve metadata for every block we're going to write.
+                */
                if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
                        ret = ext4_da_reserve_space(inode, iblock);
                        if (ret) {
@@ -1852,6 +2002,13 @@ add_delayed:
                                retval = ret;
                                goto out_unlock;
                        }
+               } else {
+                       ret = ext4_da_reserve_metadata(inode, iblock);
+                       if (ret) {
+                               /* not enough space to reserve */
+                               retval = ret;
+                               goto out_unlock;
+                       }
                }
 
                ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -1873,6 +2030,15 @@ add_delayed:
                int ret;
                unsigned long long status;
 
+#ifdef ES_AGGRESSIVE_TEST
+               if (retval != map->m_len) {
+                       printk("ES len assertation failed for inode: %lu "
+                              "retval %d != map->m_len %d "
+                              "in %s (lookup)\n", inode->i_ino, retval,
+                              map->m_len, __func__);
+               }
+#endif
+
                status = map->m_flags & EXT4_MAP_UNWRITTEN ?
                                EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
                ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@@ -2908,8 +3074,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
 
        trace_ext4_releasepage(page);
 
-       WARN_ON(PageChecked(page));
-       if (!page_has_buffers(page))
+       /* Page has dirty journalled data -> cannot release */
+       if (PageChecked(page))
                return 0;
        if (journal)
                return jbd2_journal_try_to_free_buffers(journal, page, wait);
index 7bb713a46fe4c3f85ae9d35bd15bea73aa7957f5..ee6614bdb63950b7481443f944ca3c87681903be 100644 (file)
@@ -2804,8 +2804,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi,
                                                          ac->ac_b_ex.fe_group);
-               atomic_sub(ac->ac_b_ex.fe_len,
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_sub(ac->ac_b_ex.fe_len,
+                            &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
        err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -3692,11 +3692,7 @@ repeat:
        if (free < needed && busy) {
                busy = 0;
                ext4_unlock_group(sb, group);
-               /*
-                * Yield the CPU here so that we don't get soft lockup
-                * in non preempt case.
-                */
-               yield();
+               cond_resched();
                goto repeat;
        }
 
@@ -4246,7 +4242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
                        ext4_claim_free_clusters(sbi, ar->len, ar->flags)) {
 
                        /* let others to free the space */
-                       yield();
+                       cond_resched();
                        ar->len = ar->len >> 1;
                }
                if (!ar->len) {
@@ -4464,7 +4460,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
        struct buffer_head *bitmap_bh = NULL;
        struct super_block *sb = inode->i_sb;
        struct ext4_group_desc *gdp;
-       unsigned long freed = 0;
        unsigned int overflow;
        ext4_grpblk_t bit;
        struct buffer_head *gd_bh;
@@ -4666,14 +4661,12 @@ do_more:
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
-               atomic_add(count_clusters,
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_add(count_clusters,
+                            &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
        ext4_mb_unload_buddy(&e4b);
 
-       freed += count;
-
        if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
                dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
 
@@ -4811,8 +4804,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
-               atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed),
+                            &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
        ext4_mb_unload_buddy(&e4b);
index 4e81d47aa8cb8af0553b93c1cb7dcbbea2d5d0a8..33e1c086858b54ad704e80345c92b946ae3c565b 100644 (file)
  */
 static inline int
 get_ext_path(struct inode *inode, ext4_lblk_t lblock,
-               struct ext4_ext_path **path)
+               struct ext4_ext_path **orig_path)
 {
        int ret = 0;
+       struct ext4_ext_path *path;
 
-       *path = ext4_ext_find_extent(inode, lblock, *path);
-       if (IS_ERR(*path)) {
-               ret = PTR_ERR(*path);
-               *path = NULL;
-       } else if ((*path)[ext_depth(inode)].p_ext == NULL)
+       path = ext4_ext_find_extent(inode, lblock, *orig_path);
+       if (IS_ERR(path))
+               ret = PTR_ERR(path);
+       else if (path[ext_depth(inode)].p_ext == NULL)
                ret = -ENODATA;
+       else
+               *orig_path = path;
 
        return ret;
 }
@@ -611,24 +613,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
 {
        struct ext4_ext_path *path = NULL;
        struct ext4_extent *ext;
+       int ret = 0;
        ext4_lblk_t last = from + count;
        while (from < last) {
                *err = get_ext_path(inode, from, &path);
                if (*err)
-                       return 0;
+                       goto out;
                ext = path[ext_depth(inode)].p_ext;
-               if (!ext) {
-                       ext4_ext_drop_refs(path);
-                       return 0;
-               }
-               if (uninit != ext4_ext_is_uninitialized(ext)) {
-                       ext4_ext_drop_refs(path);
-                       return 0;
-               }
+               if (uninit != ext4_ext_is_uninitialized(ext))
+                       goto out;
                from += ext4_ext_get_actual_len(ext);
                ext4_ext_drop_refs(path);
        }
-       return 1;
+       ret = 1;
+out:
+       if (path) {
+               ext4_ext_drop_refs(path);
+               kfree(path);
+       }
+       return ret;
 }
 
 /**
@@ -666,6 +669,14 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
        int replaced_count = 0;
        int dext_alen;
 
+       *err = ext4_es_remove_extent(orig_inode, from, count);
+       if (*err)
+               goto out;
+
+       *err = ext4_es_remove_extent(donor_inode, from, count);
+       if (*err)
+               goto out;
+
        /* Get the original extent for the block "orig_off" */
        *err = get_ext_path(orig_inode, orig_off, &orig_path);
        if (*err)
index 809b31003ecc0fedd35d71e8abed4047bbe18beb..047a6de04a0ac8195d5453de514a8e95526fcce4 100644 (file)
@@ -50,11 +50,21 @@ void ext4_exit_pageio(void)
        kmem_cache_destroy(io_page_cachep);
 }
 
-void ext4_ioend_wait(struct inode *inode)
+/*
+ * This function is called by ext4_evict_inode() to make sure there is
+ * no more pending I/O completion work left to do.
+ */
+void ext4_ioend_shutdown(struct inode *inode)
 {
        wait_queue_head_t *wq = ext4_ioend_wq(inode);
 
        wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
+       /*
+        * We need to make sure the work structure is finished being
+        * used before we let the inode get destroyed.
+        */
+       if (work_pending(&EXT4_I(inode)->i_unwritten_work))
+               cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
 }
 
 static void put_io_page(struct ext4_io_page *io_page)
index b2c8ee56eb98744ab7481e4d0849e2fb5ca71365..c169477a62c987a1ba2d3362dde6bd8d3cd0f260 100644 (file)
@@ -1360,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb,
            sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group;
                flex_group = ext4_flex_group(sbi, group_data[0].group);
-               atomic_add(EXT4_NUM_B2C(sbi, free_blocks),
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
+                            &sbi->s_flex_groups[flex_group].free_clusters);
                atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
                           &sbi->s_flex_groups[flex_group].free_inodes);
        }
index b3818b48f418577ad5a7abeb979e1b7d1acd836b..5d6d53578124dda01132a6545100a5acb2025f73 100644 (file)
@@ -1927,8 +1927,8 @@ static int ext4_fill_flex_info(struct super_block *sb)
                flex_group = ext4_flex_group(sbi, i);
                atomic_add(ext4_free_inodes_count(sb, gdp),
                           &sbi->s_flex_groups[flex_group].free_inodes);
-               atomic_add(ext4_free_group_clusters(sb, gdp),
-                          &sbi->s_flex_groups[flex_group].free_clusters);
+               atomic64_add(ext4_free_group_clusters(sb, gdp),
+                            &sbi->s_flex_groups[flex_group].free_clusters);
                atomic_add(ext4_used_dirs_count(sb, gdp),
                           &sbi->s_flex_groups[flex_group].used_dirs);
        }
index 507141fceb99669f7554e330d81605d2fff248b2..4be78237d896d759229eaf300f583288e1a28086 100644 (file)
@@ -125,3 +125,8 @@ extern int invalidate_inodes(struct super_block *, bool);
  * dcache.c
  */
 extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
+
+/*
+ * read_write.c
+ */
+extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
index d6ee5aed56b178ef98850b3a9abc75f6afc54f7f..325bc019ed8813ea00321594405e86c739dad5fb 100644 (file)
@@ -1065,9 +1065,12 @@ out:
 void jbd2_journal_set_triggers(struct buffer_head *bh,
                               struct jbd2_buffer_trigger_type *type)
 {
-       struct journal_head *jh = bh2jh(bh);
+       struct journal_head *jh = jbd2_journal_grab_journal_head(bh);
 
+       if (WARN_ON(!jh))
+               return;
        jh->b_triggers = type;
+       jbd2_journal_put_journal_head(jh);
 }
 
 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
@@ -1119,17 +1122,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
 {
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal = transaction->t_journal;
-       struct journal_head *jh = bh2jh(bh);
+       struct journal_head *jh;
        int ret = 0;
 
-       jbd_debug(5, "journal_head %p\n", jh);
-       JBUFFER_TRACE(jh, "entry");
        if (is_handle_aborted(handle))
                goto out;
-       if (!buffer_jbd(bh)) {
+       jh = jbd2_journal_grab_journal_head(bh);
+       if (!jh) {
                ret = -EUCLEAN;
                goto out;
        }
+       jbd_debug(5, "journal_head %p\n", jh);
+       JBUFFER_TRACE(jh, "entry");
 
        jbd_lock_bh_state(bh);
 
@@ -1220,6 +1224,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
        spin_unlock(&journal->j_list_lock);
 out_unlock_bh:
        jbd_unlock_bh_state(bh);
+       jbd2_journal_put_journal_head(jh);
 out:
        JBUFFER_TRACE(jh, "exit");
        WARN_ON(ret);   /* All errors are bugs, so dump the stack */
index 50ca17d3cb4506de87465bb4d62f3da5f00553a5..d581e45c0a9fd6ada94edd2bb6ea4fe4c7fc9e23 100644 (file)
@@ -798,6 +798,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
        }
 
        mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+       /* Don't allow unprivileged users to change mount flags */
+       if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
+               mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
+
        atomic_inc(&sb->s_active);
        mnt->mnt.mnt_sb = sb;
        mnt->mnt.mnt_root = dget(root);
@@ -1713,6 +1717,9 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags)
        if (readonly_request == __mnt_is_readonly(mnt))
                return 0;
 
+       if (mnt->mnt_flags & MNT_LOCK_READONLY)
+               return -EPERM;
+
        if (readonly_request)
                error = mnt_make_readonly(real_mount(mnt));
        else
@@ -2339,7 +2346,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
        /* First pass: copy the tree topology */
        copy_flags = CL_COPY_ALL | CL_EXPIRE;
        if (user_ns != mnt_ns->user_ns)
-               copy_flags |= CL_SHARED_TO_SLAVE;
+               copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;
        new = copy_tree(old, old->mnt.mnt_root, copy_flags);
        if (IS_ERR(new)) {
                up_write(&namespace_sem);
@@ -2732,6 +2739,51 @@ bool our_mnt(struct vfsmount *mnt)
        return check_mnt(real_mount(mnt));
 }
 
+bool current_chrooted(void)
+{
+       /* Does the current process have a non-standard root */
+       struct path ns_root;
+       struct path fs_root;
+       bool chrooted;
+
+       /* Find the namespace root */
+       ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt;
+       ns_root.dentry = ns_root.mnt->mnt_root;
+       path_get(&ns_root);
+       while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root))
+               ;
+
+       get_fs_root(current->fs, &fs_root);
+
+       chrooted = !path_equal(&fs_root, &ns_root);
+
+       path_put(&fs_root);
+       path_put(&ns_root);
+
+       return chrooted;
+}
+
+void update_mnt_policy(struct user_namespace *userns)
+{
+       struct mnt_namespace *ns = current->nsproxy->mnt_ns;
+       struct mount *mnt;
+
+       down_read(&namespace_sem);
+       list_for_each_entry(mnt, &ns->list, mnt_list) {
+               switch (mnt->mnt.mnt_sb->s_magic) {
+               case SYSFS_MAGIC:
+                       userns->may_mount_sysfs = true;
+                       break;
+               case PROC_SUPER_MAGIC:
+                       userns->may_mount_proc = true;
+                       break;
+               }
+               if (userns->may_mount_sysfs && userns->may_mount_proc)
+                       break;
+       }
+       up_read(&namespace_sem);
+}
+
 static void *mntns_get(struct task_struct *task)
 {
        struct mnt_namespace *ns = NULL;
index 737d839bc17b5aa0ae58e5350a235af1f8adfb2b..6fc7b5cae92bf6526bee07696f12977322f2cb5e 100644 (file)
@@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev)
 
        bl_pipe_msg.bl_wq = &nn->bl_wq;
        memset(msg, 0, sizeof(*msg));
-       msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
+       msg->len = sizeof(bl_msg) + bl_msg.totallen;
+       msg->data = kzalloc(msg->len, GFP_NOFS);
        if (!msg->data)
                goto out;
 
@@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev)
        memcpy(msg->data, &bl_msg, sizeof(bl_msg));
        dataptr = (uint8_t *) msg->data;
        memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
-       msg->len = sizeof(bl_msg) + bl_msg.totallen;
 
        add_wait_queue(&nn->bl_wq, &wq);
        if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) {
index dc0f98dfa71773bd467a1b6891b2f916b696b734..c516da5873fd12df0d3c9877b2c3189423c0dec4 100644 (file)
@@ -726,9 +726,9 @@ out1:
        return ret;
 }
 
-static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data)
+static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen)
 {
-       return key_instantiate_and_link(key, data, strlen(data) + 1,
+       return key_instantiate_and_link(key, data, datalen,
                                        id_resolver_cache->thread_keyring,
                                        authkey);
 }
@@ -738,6 +738,7 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
                struct key *key, struct key *authkey)
 {
        char id_str[NFS_UINT_MAXLEN];
+       size_t len;
        int ret = -ENOKEY;
 
        /* ret = -ENOKEY */
@@ -747,13 +748,15 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
        case IDMAP_CONV_NAMETOID:
                if (strcmp(upcall->im_name, im->im_name) != 0)
                        break;
-               sprintf(id_str, "%d", im->im_id);
-               ret = nfs_idmap_instantiate(key, authkey, id_str);
+               /* Note: here we store the NUL terminator too */
+               len = sprintf(id_str, "%d", im->im_id) + 1;
+               ret = nfs_idmap_instantiate(key, authkey, id_str, len);
                break;
        case IDMAP_CONV_IDTONAME:
                if (upcall->im_id != im->im_id)
                        break;
-               ret = nfs_idmap_instantiate(key, authkey, im->im_name);
+               len = strlen(im->im_name);
+               ret = nfs_idmap_instantiate(key, authkey, im->im_name, len);
                break;
        default:
                ret = -EINVAL;
index 49eeb044c109c0bb3c3878db7b8b60a780b50f97..4fb234d3aefb240f3d067523bda6df467336d2d0 100644 (file)
@@ -129,7 +129,6 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo)
 {
        if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
                return;
-       clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags);
        pnfs_return_layout(inode);
 }
 
index b2671cb0f901b8e904ee4b91961d2029e12fb2dc..26431cf62ddbc393fd5fe1e432742be37d06e12e 100644 (file)
@@ -2632,7 +2632,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
        int status;
 
        if (pnfs_ld_layoutret_on_setattr(inode))
-               pnfs_return_layout(inode);
+               pnfs_commit_and_return_layout(inode);
 
        nfs_fattr_init(fattr);
        
@@ -6416,22 +6416,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
 static void nfs4_layoutcommit_release(void *calldata)
 {
        struct nfs4_layoutcommit_data *data = calldata;
-       struct pnfs_layout_segment *lseg, *tmp;
-       unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
 
        pnfs_cleanup_layoutcommit(data);
-       /* Matched by references in pnfs_set_layoutcommit */
-       list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
-               list_del_init(&lseg->pls_lc_list);
-               if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
-                                      &lseg->pls_flags))
-                       pnfs_put_lseg(lseg);
-       }
-
-       clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
-       smp_mb__after_clear_bit();
-       wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
-
        put_rpccred(data->cred);
        kfree(data);
 }
index 48ac5aad62589cd7140f55a52d5ee63f31e743b5..4bdffe0ba025228803b65d4fe54ab5cb0adda0ed 100644 (file)
@@ -417,6 +417,16 @@ should_free_lseg(struct pnfs_layout_range *lseg_range,
               lo_seg_intersecting(lseg_range, recall_range);
 }
 
+static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
+               struct list_head *tmp_list)
+{
+       if (!atomic_dec_and_test(&lseg->pls_refcount))
+               return false;
+       pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
+       list_add(&lseg->pls_list, tmp_list);
+       return true;
+}
+
 /* Returns 1 if lseg is removed from list, 0 otherwise */
 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
                             struct list_head *tmp_list)
@@ -430,11 +440,8 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
                 */
                dprintk("%s: lseg %p ref %d\n", __func__, lseg,
                        atomic_read(&lseg->pls_refcount));
-               if (atomic_dec_and_test(&lseg->pls_refcount)) {
-                       pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
-                       list_add(&lseg->pls_list, tmp_list);
+               if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
                        rv = 1;
-               }
        }
        return rv;
 }
@@ -777,6 +784,21 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        return lseg;
 }
 
+static void pnfs_clear_layoutcommit(struct inode *inode,
+               struct list_head *head)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+       struct pnfs_layout_segment *lseg, *tmp;
+
+       if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
+               return;
+       list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
+               if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+                       continue;
+               pnfs_lseg_dec_and_remove_zero(lseg, head);
+       }
+}
+
 /*
  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  * when the layout segment list is empty.
@@ -808,6 +830,7 @@ _pnfs_return_layout(struct inode *ino)
        /* Reference matched in nfs4_layoutreturn_release */
        pnfs_get_layout_hdr(lo);
        empty = list_empty(&lo->plh_segs);
+       pnfs_clear_layoutcommit(ino, &tmp_list);
        pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
        /* Don't send a LAYOUTRETURN if list was initially empty */
        if (empty) {
@@ -820,8 +843,6 @@ _pnfs_return_layout(struct inode *ino)
        spin_unlock(&ino->i_lock);
        pnfs_free_lseg_list(&tmp_list);
 
-       WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
-
        lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
        if (unlikely(lrp == NULL)) {
                status = -ENOMEM;
@@ -845,6 +866,33 @@ out:
 }
 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
 
+int
+pnfs_commit_and_return_layout(struct inode *inode)
+{
+       struct pnfs_layout_hdr *lo;
+       int ret;
+
+       spin_lock(&inode->i_lock);
+       lo = NFS_I(inode)->layout;
+       if (lo == NULL) {
+               spin_unlock(&inode->i_lock);
+               return 0;
+       }
+       pnfs_get_layout_hdr(lo);
+       /* Block new layoutgets and read/write to ds */
+       lo->plh_block_lgets++;
+       spin_unlock(&inode->i_lock);
+       filemap_fdatawait(inode->i_mapping);
+       ret = pnfs_layoutcommit_inode(inode, true);
+       if (ret == 0)
+               ret = _pnfs_return_layout(inode);
+       spin_lock(&inode->i_lock);
+       lo->plh_block_lgets--;
+       spin_unlock(&inode->i_lock);
+       pnfs_put_layout_hdr(lo);
+       return ret;
+}
+
 bool pnfs_roc(struct inode *ino)
 {
        struct pnfs_layout_hdr *lo;
@@ -1458,7 +1506,6 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
        dprintk("pnfs write error = %d\n", hdr->pnfs_error);
        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
            PNFS_LAYOUTRET_ON_ERROR) {
-               clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
                pnfs_return_layout(hdr->inode);
        }
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1613,7 +1660,6 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
        dprintk("pnfs read error = %d\n", hdr->pnfs_error);
        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
            PNFS_LAYOUTRET_ON_ERROR) {
-               clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
                pnfs_return_layout(hdr->inode);
        }
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
@@ -1746,11 +1792,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
 
        list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
                if (lseg->pls_range.iomode == IOMODE_RW &&
-                   test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+                   test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
                        list_add(&lseg->pls_lc_list, listp);
        }
 }
 
+static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
+{
+       struct pnfs_layout_segment *lseg, *tmp;
+       unsigned long *bitlock = &NFS_I(inode)->flags;
+
+       /* Matched by references in pnfs_set_layoutcommit */
+       list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
+               list_del_init(&lseg->pls_lc_list);
+               pnfs_put_lseg(lseg);
+       }
+
+       clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
+       smp_mb__after_clear_bit();
+       wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
+}
+
 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
 {
        pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
@@ -1795,6 +1857,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
 
        if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
                nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
+       pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
 }
 
 /*
index 94ba804177483d3c78694ae1708e95a2d34048d5..f5f8a470a647c7dc2f3a475e3e852d1aa7c97f5e 100644 (file)
@@ -219,6 +219,7 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
 int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
 int _pnfs_return_layout(struct inode *);
+int pnfs_commit_and_return_layout(struct inode *);
 void pnfs_ld_write_done(struct nfs_write_data *);
 void pnfs_ld_read_done(struct nfs_read_data *);
 struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
@@ -407,6 +408,11 @@ static inline int pnfs_return_layout(struct inode *ino)
        return 0;
 }
 
+static inline int pnfs_commit_and_return_layout(struct inode *inode)
+{
+       return 0;
+}
+
 static inline bool
 pnfs_ld_layoutret_on_setattr(struct inode *inode)
 {
index 62c1ee128aebafbd82e70863e57caa74de804e13..ca05f6dc3544b4c216eb369d9e01fe7a48f0e90a 100644 (file)
@@ -102,7 +102,8 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
 {
        if (rp->c_type == RC_REPLBUFF)
                kfree(rp->c_replvec.iov_base);
-       hlist_del(&rp->c_hash);
+       if (!hlist_unhashed(&rp->c_hash))
+               hlist_del(&rp->c_hash);
        list_del(&rp->c_lru);
        --num_drc_entries;
        kmem_cache_free(drc_slab, rp);
@@ -118,6 +119,10 @@ nfsd_reply_cache_free(struct svc_cacherep *rp)
 
 int nfsd_reply_cache_init(void)
 {
+       INIT_LIST_HEAD(&lru_head);
+       max_drc_entries = nfsd_cache_size_limit();
+       num_drc_entries = 0;
+
        register_shrinker(&nfsd_reply_cache_shrinker);
        drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep),
                                        0, 0, NULL);
@@ -128,10 +133,6 @@ int nfsd_reply_cache_init(void)
        if (!cache_hash)
                goto out_nomem;
 
-       INIT_LIST_HEAD(&lru_head);
-       max_drc_entries = nfsd_cache_size_limit();
-       num_drc_entries = 0;
-
        return 0;
 out_nomem:
        printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
index 2a7eb536de0bec80dfbfd7d981139f2e925e644e..2b2e2396a86913b4d4e69c376852ded82dd9be3a 100644 (file)
@@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
        int                     host_err;
        int                     stable = *stablep;
        int                     use_wgather;
+       loff_t                  pos = offset;
 
        dentry = file->f_path.dentry;
        inode = dentry->d_inode;
@@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
 
        /* Write the data. */
        oldfs = get_fs(); set_fs(KERNEL_DS);
-       host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
+       host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
        set_fs(oldfs);
        if (host_err < 0)
                goto out_nfserr;
index 3e000a51ac0d09556d184d26422a91d0bc4c0ff9..8b29d2164da6aef6edb88b9872368c849e658530 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mnt_namespace.h>
 #include <linux/mount.h>
 #include <linux/fs.h>
+#include <linux/nsproxy.h>
 #include "internal.h"
 #include "pnode.h"
 
@@ -220,6 +221,7 @@ static struct mount *get_source(struct mount *dest,
 int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
                    struct mount *source_mnt, struct list_head *tree_list)
 {
+       struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
        struct mount *m, *child;
        int ret = 0;
        struct mount *prev_dest_mnt = dest_mnt;
@@ -237,6 +239,10 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
 
                source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);
 
+               /* Notice when we are propagating across user namespaces */
+               if (m->mnt_ns->user_ns != user_ns)
+                       type |= CL_UNPRIVILEGED;
+
                child = copy_tree(source, source->mnt.mnt_root, type);
                if (IS_ERR(child)) {
                        ret = PTR_ERR(child);
index 19b853a3445cb907665b4403484984a0c525af68..a0493d5ebfbf52be2eb07a794df459ab2a32cd6a 100644 (file)
@@ -23,6 +23,7 @@
 #define CL_MAKE_SHARED                 0x08
 #define CL_PRIVATE             0x10
 #define CL_SHARED_TO_SLAVE     0x20
+#define CL_UNPRIVILEGED                0x40
 
 static inline void set_mnt_shared(struct mount *mnt)
 {
index a86aebc9ba7c252bbfa73907612b59c177fca202..869116c2afbe4d192bee231541d141b8622cd6e3 100644 (file)
@@ -446,9 +446,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
 
 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
-       struct inode *inode = iget_locked(sb, de->low_ino);
+       struct inode *inode = new_inode_pseudo(sb);
 
-       if (inode && (inode->i_state & I_NEW)) {
+       if (inode) {
+               inode->i_ino = de->low_ino;
                inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
                PROC_I(inode)->pde = de;
 
@@ -476,7 +477,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
                                inode->i_fop = de->proc_fops;
                        }
                }
-               unlock_new_inode(inode);
        } else
               pde_put(de);
        return inode;
index c6e9fac26bace4e9b63bd57dce624589dc67dfd7..9c7fab1d23f0d17d68446bf3d46ce1a906ae0907 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/bitops.h>
+#include <linux/user_namespace.h>
 #include <linux/mount.h>
 #include <linux/pid_namespace.h>
 #include <linux/parser.h>
@@ -108,6 +109,9 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
        } else {
                ns = task_active_pid_ns(current);
                options = data;
+
+               if (!current_user_ns()->may_mount_proc)
+                       return ERR_PTR(-EPERM);
        }
 
        sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
index a698eff457fb6e510c0543c9b762cc8c8d702f23..e6ddc8dceb96fc48a8fbe379799d0efed52d2b4a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/splice.h>
 #include <linux/compat.h>
 #include "read_write.h"
+#include "internal.h"
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -417,6 +418,33 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
 
 EXPORT_SYMBOL(do_sync_write);
 
+ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos)
+{
+       mm_segment_t old_fs;
+       const char __user *p;
+       ssize_t ret;
+
+       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+               return -EINVAL;
+
+       old_fs = get_fs();
+       set_fs(get_ds());
+       p = (__force const char __user *)buf;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       if (file->f_op->write)
+               ret = file->f_op->write(file, p, count, pos);
+       else
+               ret = do_sync_write(file, p, count, pos);
+       set_fs(old_fs);
+       if (ret > 0) {
+               fsnotify_modify(file);
+               add_wchar(current, ret);
+       }
+       inc_syscw(current);
+       return ret;
+}
+
 ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
 {
        ssize_t ret;
index 718bd0056384688af6ead056156574638c0be6e9..29e394e49ddda7c7721d3939d993632a29f4499a 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/security.h>
 #include <linux/gfp.h>
 #include <linux/socket.h>
+#include "internal.h"
 
 /*
  * Attempt to steal a page from a pipe buffer. This should perhaps go into
@@ -1048,9 +1049,10 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 {
        int ret;
        void *data;
+       loff_t tmp = sd->pos;
 
        data = buf->ops->map(pipe, buf, 0);
-       ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos);
+       ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
        buf->ops->unmap(pipe, buf, data);
 
        return ret;
index 2fbdff6be25ce3d546e17fa4e72783e9d3f03637..e14512678c9b7b8042f5ba55a329a0101849f606 100644 (file)
@@ -1020,6 +1020,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
                ino = parent_sd->s_ino;
                if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0)
                        filp->f_pos++;
+               else
+                       return 0;
        }
        if (filp->f_pos == 1) {
                if (parent_sd->s_parent)
@@ -1028,6 +1030,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
                        ino = parent_sd->s_ino;
                if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0)
                        filp->f_pos++;
+               else
+                       return 0;
        }
        mutex_lock(&sysfs_mutex);
        for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos);
@@ -1058,10 +1062,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir)
        return 0;
 }
 
+static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+       struct inode *inode = file_inode(file);
+       loff_t ret;
+
+       mutex_lock(&inode->i_mutex);
+       ret = generic_file_llseek(file, offset, whence);
+       mutex_unlock(&inode->i_mutex);
+
+       return ret;
+}
 
 const struct file_operations sysfs_dir_operations = {
        .read           = generic_read_dir,
        .readdir        = sysfs_readdir,
        .release        = sysfs_dir_release,
-       .llseek         = generic_file_llseek,
+       .llseek         = sysfs_dir_llseek,
 };
index 8d924b5ec733450e37599c8338ab69ea3b15027f..afd83273e6cea8112e1570c8835bfa829303b2f8 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
+#include <linux/user_namespace.h>
 
 #include "sysfs.h"
 
@@ -111,6 +112,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
        struct super_block *sb;
        int error;
 
+       if (!(flags & MS_KERNMOUNT) && !current_user_ns()->may_mount_sysfs)
+               return ERR_PTR(-EPERM);
+
        info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (!info)
                return ERR_PTR(-ENOMEM);
index 4e8f0df82d025e6bc38615a94d254bf003c24972..8459b5d8cb71ceaa98ff64990cae8d4daaf6f1b1 100644 (file)
@@ -1334,6 +1334,12 @@ _xfs_buf_ioapply(
        int             size;
        int             i;
 
+       /*
+        * Make sure we capture only current IO errors rather than stale errors
+        * left over from previous use of the buffer (e.g. failed readahead).
+        */
+       bp->b_error = 0;
+
        if (bp->b_flags & XBF_WRITE) {
                if (bp->b_flags & XBF_SYNCIO)
                        rw = WRITE_SYNC;
index 912d83d8860a984a938b604f3e2447c3e390ce89..5a30dd899d2b32c38dbc1fc552e323d233f33e50 100644 (file)
@@ -325,7 +325,7 @@ xfs_iomap_eof_want_preallocate(
  * rather than falling short due to things like stripe unit/width alignment of
  * real extents.
  */
-STATIC int
+STATIC xfs_fsblock_t
 xfs_iomap_eof_prealloc_initial_size(
        struct xfs_mount        *mp,
        struct xfs_inode        *ip,
@@ -413,7 +413,7 @@ xfs_iomap_prealloc_size(
                 * have a large file on a small filesystem and the above
                 * lowspace thresholds are smaller than MAXEXTLEN.
                 */
-               while (alloc_blocks >= freesp)
+               while (alloc_blocks && alloc_blocks >= freesp)
                        alloc_blocks >>= 4;
        }
 
index a386b0b654cced33e17eb6a7917a935c76ff1de5..918e8fe2f5e9b3f07d49fbf9f71252c60f318692 100644 (file)
        {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
-       {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
index a975de1ff59feaba6758afa9682ca9203bfc2c42..3bd46f766751caa9b143adcc5ce546441f6b868d 100644 (file)
@@ -51,7 +51,7 @@ struct task_struct;
 extern void debug_show_all_locks(void);
 extern void debug_show_held_locks(struct task_struct *task);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void debug_check_no_locks_held(void);
+extern void debug_check_no_locks_held(struct task_struct *task);
 #else
 static inline void debug_show_all_locks(void)
 {
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
 }
 
 static inline void
-debug_check_no_locks_held(void)
+debug_check_no_locks_held(struct task_struct *task)
 {
 }
 #endif
index 4fd4999ccb5bf3386db3c27fec3f0f1d4a048b42..0b763276f6199a255d304b572e0c414c90252056 100644 (file)
@@ -561,7 +561,6 @@ struct csrow_info {
 
        u32 ue_count;           /* Uncorrectable Errors for this csrow */
        u32 ce_count;           /* Correctable Errors for this csrow */
-       u32 nr_pages;           /* combined pages count of all channels */
 
        struct mem_ctl_info *mci;       /* the parent */
 
@@ -676,11 +675,11 @@ struct mem_ctl_info {
         * sees memory sticks ("dimms"), and the ones that sees memory ranks.
         * All old memory controllers enumerate memories per rank, but most
         * of the recent drivers enumerate memories per DIMM, instead.
-        * When the memory controller is per rank, mem_is_per_rank is true.
+        * When the memory controller is per rank, csbased is true.
         */
        unsigned n_layers;
        struct edac_mc_layer *layers;
-       bool mem_is_per_rank;
+       bool csbased;
 
        /*
         * DIMM info. Will eventually remove the entire csrows_info some day
@@ -741,8 +740,6 @@ struct mem_ctl_info {
        u32 fake_inject_ue;
        u16 fake_inject_count;
 #endif
-       __u8 csbased : 1,       /* csrow-based memory controller */
-            __resv  : 7;
 };
 
 #endif
index 043a5cf8b5baf3e917dd9ba265c365a334365216..e70df40d84f6fe83c72f732aa44b454148661b6e 100644 (file)
@@ -3,7 +3,6 @@
 #ifndef FREEZER_H_INCLUDED
 #define FREEZER_H_INCLUDED
 
-#include <linux/debug_locks.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
 #include <linux/atomic.h>
@@ -49,8 +48,6 @@ extern void thaw_kernel_threads(void);
 
 static inline bool try_to_freeze(void)
 {
-       if (!(current->flags & PF_NOFREEZE))
-               debug_check_no_locks_held();
        might_sleep();
        if (likely(!freezing(current)))
                return false;
index 729eded4b24f09fa93b7f85b50aaca4b297fd2f9..2b93a9a5a1e6b8ef4a15a6aaf36ade3b48bd0d98 100644 (file)
@@ -50,4 +50,6 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root,
        spin_unlock(&fs->lock);
 }
 
+extern bool current_chrooted(void);
+
 #endif /* _LINUX_FS_STRUCT_H */
index 61c97ae22e01881d7e1744401e52705c7986fb99..f09a0ae4d858557c8bc7c071c34cd5ec46c29043 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <asm/types.h>
+#include <linux/compiler.h>
 
 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
 #define GOLDEN_RATIO_PRIME_32 0x9e370001UL
@@ -31,7 +32,7 @@
 #error Wordsize not 32 or 64
 #endif
 
-static inline u64 hash_64(u64 val, unsigned int bits)
+static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
index f5dbce50466e6546fc72f4084ed0d0c91d91fe1e..66017028dcb3d0aebf70aa7281e7758a42bcb3bf 100644 (file)
@@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work);
 #ifdef CONFIG_IRQ_WORK
 bool irq_work_needs_cpu(void);
 #else
-static bool irq_work_needs_cpu(void) { return false; }
+static inline bool irq_work_needs_cpu(void) { return false; }
 #endif
 
 #endif /* _LINUX_IRQ_WORK_H */
index 80d36874689b40da26a3107ba509a8ca10dc74da..79fdd80a42d4f001157bc70b9570aa5eda0b8f6c 100644 (file)
@@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struct pid *pgrp);
 unsigned long int_sqrt(unsigned long);
 
 extern void bust_spinlocks(int yes);
-extern void wake_up_klogd(void);
 extern int oops_in_progress;           /* If set, an oops, panic(), BUG() or die() is in progress */
 extern int panic_timeout;
 extern int panic_on_oops;
index 5b18ecde69b58be85c4fb39db00464de7d6885bf..1aa4f13cdfa6a4f2b0ceec56a689600f8d53626c 100644 (file)
@@ -106,6 +106,29 @@ enum max77693_muic_reg {
        MAX77693_MUIC_REG_END,
 };
 
+/* MAX77693 INTMASK1~2 Register */
+#define INTMASK1_ADC1K_SHIFT           3
+#define INTMASK1_ADCERR_SHIFT          2
+#define INTMASK1_ADCLOW_SHIFT          1
+#define INTMASK1_ADC_SHIFT             0
+#define INTMASK1_ADC1K_MASK            (1 << INTMASK1_ADC1K_SHIFT)
+#define INTMASK1_ADCERR_MASK           (1 << INTMASK1_ADCERR_SHIFT)
+#define INTMASK1_ADCLOW_MASK           (1 << INTMASK1_ADCLOW_SHIFT)
+#define INTMASK1_ADC_MASK              (1 << INTMASK1_ADC_SHIFT)
+
+#define INTMASK2_VIDRM_SHIFT           5
+#define INTMASK2_VBVOLT_SHIFT          4
+#define INTMASK2_DXOVP_SHIFT           3
+#define INTMASK2_DCDTMR_SHIFT          2
+#define INTMASK2_CHGDETRUN_SHIFT       1
+#define INTMASK2_CHGTYP_SHIFT          0
+#define INTMASK2_VIDRM_MASK            (1 << INTMASK2_VIDRM_SHIFT)
+#define INTMASK2_VBVOLT_MASK           (1 << INTMASK2_VBVOLT_SHIFT)
+#define INTMASK2_DXOVP_MASK            (1 << INTMASK2_DXOVP_SHIFT)
+#define INTMASK2_DCDTMR_MASK           (1 << INTMASK2_DCDTMR_SHIFT)
+#define INTMASK2_CHGDETRUN_MASK                (1 << INTMASK2_CHGDETRUN_SHIFT)
+#define INTMASK2_CHGTYP_MASK           (1 << INTMASK2_CHGTYP_SHIFT)
+
 /* MAX77693 MUIC - STATUS1~3 Register */
 #define STATUS1_ADC_SHIFT              (0)
 #define STATUS1_ADCLOW_SHIFT           (5)
index 7acc9dc73c9f272bda990e741041a1b5c1237cc5..e19ff30ad0a21453cece0df7524d0b336a69078f 100644 (file)
@@ -87,7 +87,6 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_PFNMAP      0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 #define VM_DENYWRITE   0x00000800      /* ETXTBSY on write attempts.. */
 
-#define VM_POPULATE     0x00001000
 #define VM_LOCKED      0x00002000
 #define VM_IO           0x00004000     /* Memory mapped I/O or similar */
 
index 61c7a87e5d2b358484dc16ab1b0db79253fc93b3..9aa863da287fedf383f3c507c287aa220cbd6a86 100644 (file)
@@ -79,8 +79,6 @@ calc_vm_flag_bits(unsigned long flags)
 {
        return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
               _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
-              ((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) |
-              (((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ?
-                                                       VM_POPULATE : 0);
+              _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    );
 }
 #endif /* _LINUX_MMAN_H */
index ede274957e05b59e61725f85f0c4acff7b482f33..c74092eebf5c9da5a052168b8885c75290efa2d2 100644 (file)
@@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(const struct zone *zone)
        return test_bit(ZONE_OOM_LOCKED, &zone->flags);
 }
 
-static inline unsigned zone_end_pfn(const struct zone *zone)
+static inline unsigned long zone_end_pfn(const struct zone *zone)
 {
        return zone->zone_start_pfn + zone->spanned_pages;
 }
index d7029f4a191a093ef9294104c75756b6d3b7faee..73005f9957ead2b95adb4107329434afbbf482a7 100644 (file)
@@ -47,6 +47,8 @@ struct mnt_namespace;
 
 #define MNT_INTERNAL   0x4000
 
+#define MNT_LOCK_READONLY      0x400000
+
 struct vfsmount {
        struct dentry *mnt_root;        /* root of the mounted tree */
        struct super_block *mnt_sb;     /* pointer to superblock */
index 7ccb3c59ed605d592fcd0663680b41427a218e7d..ef52d9c91459e0204e31c98483922b7ab95b1ebb 100644 (file)
@@ -187,6 +187,13 @@ typedef enum {
  * This happens with the Renesas AG-AND chips, possibly others.
  */
 #define BBT_AUTO_REFRESH       0x00000080
+/*
+ * Chip requires ready check on read (for auto-incremented sequential read).
+ * True only for small page devices; large page devices do not support
+ * autoincrement.
+ */
+#define NAND_NEED_READRDY      0x00000100
+
 /* Chip does not allow subpage writes */
 #define NAND_NO_SUBPAGE_WRITE  0x00000200
 
index f14943d55315695e36186a75c5db56f07407dd20..f80af86743425ca6219c8c2cfc5b180f2325c85b 100644 (file)
@@ -24,8 +24,8 @@
 #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */
 #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */
 
-#define FB_SYNC_DATA_ENABLE_HIGH_ACT   (1 << 6)
-#define FB_SYNC_DOTCLK_FAILING_ACT     (1 << 7) /* failing/negtive edge sampling */
+#define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT        (1 << 6)
+#define MXSFB_SYNC_DOTCLK_FAILING_ACT  (1 << 7) /* failing/negtive edge sampling */
 
 struct mxsfb_platform_data {
        struct fb_videomode *mode_list;
@@ -44,6 +44,9 @@ struct mxsfb_platform_data {
                                 * allocated. If specified,fb_size must also be specified.
                                 * fb_phys must be unused by Linux.
                                 */
+       u32 sync;               /* sync mask, contains MXSFB specifics not
+                                * carried in fb_info->var.sync
+                                */
 };
 
 #endif /* __LINUX_MXSFB_H */
index b3d00fa4b3149b614365dd215a611ed638fdb924..8bfa95600e48bd11e2196571a39296a2fd7c0d5a 100644 (file)
@@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo {
  *
  * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
  * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- *                          struct net_device *dev)
+ *                          struct net_device *dev, u32 filter_mask)
  *
  * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
  *     Called to change device carrier. Soft-devices (like dummy, team, etc)
index c25cccaa555a420b5cc6fd26d5e83a89937470a6..4fa3b0b9b071358a779c4448d874e3dffe57900d 100644 (file)
@@ -137,6 +137,34 @@ enum {
        NVME_LBAF_RP_DEGRADED   = 3,
 };
 
+struct nvme_smart_log {
+       __u8                    critical_warning;
+       __u8                    temperature[2];
+       __u8                    avail_spare;
+       __u8                    spare_thresh;
+       __u8                    percent_used;
+       __u8                    rsvd6[26];
+       __u8                    data_units_read[16];
+       __u8                    data_units_written[16];
+       __u8                    host_reads[16];
+       __u8                    host_writes[16];
+       __u8                    ctrl_busy_time[16];
+       __u8                    power_cycles[16];
+       __u8                    power_on_hours[16];
+       __u8                    unsafe_shutdowns[16];
+       __u8                    media_errors[16];
+       __u8                    num_err_log_entries[16];
+       __u8                    rsvd192[320];
+};
+
+enum {
+       NVME_SMART_CRIT_SPARE           = 1 << 0,
+       NVME_SMART_CRIT_TEMPERATURE     = 1 << 1,
+       NVME_SMART_CRIT_RELIABILITY     = 1 << 2,
+       NVME_SMART_CRIT_MEDIA           = 1 << 3,
+       NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
+};
+
 struct nvme_lba_range_type {
        __u8                    type;
        __u8                    attributes;
index 1249a54d17e0ab471501919dbdc8d63e27658c35..822171fcb1c82bd110472975d63fd4c2ed5fe8f2 100644 (file)
@@ -134,6 +134,8 @@ extern int printk_delay_msec;
 extern int dmesg_restrict;
 extern int kptr_restrict;
 
+extern void wake_up_klogd(void);
+
 void log_buf_kexec_setup(void);
 void __init setup_log_buf(int early);
 #else
@@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
        return false;
 }
 
+static inline void wake_up_klogd(void)
+{
+}
+
 static inline void log_buf_kexec_setup(void)
 {
 }
index 821c7f45d2a7357085c757c2309177e443b34990..441f5bfdab8ea476749bb512d2c7deeed7399a83 100644 (file)
@@ -500,7 +500,7 @@ struct sk_buff {
        union {
                __u32           mark;
                __u32           dropcount;
-               __u32           avail_size;
+               __u32           reserved_tailroom;
        };
 
        sk_buff_data_t          inner_transport_header;
@@ -1288,11 +1288,13 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
         * do not lose pfmemalloc information as the pages would not be
         * allocated using __GFP_MEMALLOC.
         */
-       if (page->pfmemalloc && !page->mapping)
-               skb->pfmemalloc = true;
        frag->page.p              = page;
        frag->page_offset         = off;
        skb_frag_size_set(frag, size);
+
+       page = compound_head(page);
+       if (page->pfmemalloc && !page->mapping)
+               skb->pfmemalloc = true;
 }
 
 /**
@@ -1447,7 +1449,10 @@ static inline int skb_tailroom(const struct sk_buff *skb)
  */
 static inline int skb_availroom(const struct sk_buff *skb)
 {
-       return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
+       if (skb_is_nonlinear(skb))
+               return 0;
+
+       return skb->end - skb->tail - skb->reserved_tailroom;
 }
 
 /**
index f0bd7f90a90d45d3aeeb3aed8bc2d2f8295c8be0..e3c0ae9bb1faf876afca191701e481ecc30a4f1b 100644 (file)
@@ -44,7 +44,7 @@
 /* Adding event notification support elements */
 #define THERMAL_GENL_FAMILY_NAME                "thermal_event"
 #define THERMAL_GENL_VERSION                    0x01
-#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_group"
+#define THERMAL_GENL_MCAST_GROUP_NAME           "thermal_mc_grp"
 
 /* Default Thermal Governor */
 #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
index 9d81de123c9017f4463ba4fa09a61b8b7425a8bf..42278bbf7a882d90360d141db9c8cc1b1eca1719 100644 (file)
@@ -68,6 +68,7 @@ struct udp_sock {
         * For encapsulation sockets.
         */
        int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+       void (*encap_destroy)(struct sock *sk);
 };
 
 static inline struct udp_sock *udp_sk(const struct sock *sk)
index 3b8f9d4fc3fe7df42af02fda48f83379b24e77ca..cc25b70af33c0b641caa53422c5faacc929809a7 100644 (file)
@@ -127,6 +127,7 @@ struct cdc_ncm_ctx {
        u16 connected;
 };
 
+extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf);
 extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
 extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
 extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign);
index 0a78df5f6cfd23d616ecd4572b6079d1f8605dd2..59694b5e5e906821a873d52a1a33b9c5d67af5d4 100644 (file)
@@ -357,6 +357,7 @@ struct hc_driver {
                 */
        int     (*disable_usb3_lpm_timeout)(struct usb_hcd *,
                        struct usb_device *, enum usb3_link_state state);
+       int     (*find_raw_port_number)(struct usb_hcd *, int);
 };
 
 extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -396,6 +397,7 @@ extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd);
 extern int usb_add_hcd(struct usb_hcd *hcd,
                unsigned int irqnum, unsigned long irqflags);
 extern void usb_remove_hcd(struct usb_hcd *hcd);
+extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
 
 struct platform_device;
 extern void usb_hcd_platform_shutdown(struct platform_device *dev);
index ef9be7e1e1906ea05d1cfef7c6fe58f7ef4ceab3..1819b59aab2a697aa573cbf8fe4e1081292dc713 100644 (file)
@@ -66,6 +66,7 @@
  *     port.
  * @flags: usb serial port flags
  * @write_wait: a wait_queue_head_t used by the port.
+ * @delta_msr_wait: modem-status-change wait queue
  * @work: work queue entry for the line discipline waking up.
  * @throttled: nonzero if the read urb is inactive to throttle the device
  * @throttle_req: nonzero if the tty wants to throttle us
@@ -112,6 +113,7 @@ struct usb_serial_port {
 
        unsigned long           flags;
        wait_queue_head_t       write_wait;
+       wait_queue_head_t       delta_msr_wait;
        struct work_struct      work;
        char                    throttled;
        char                    throttle_req;
index 6f033a415ecb85caa0291a58940922b5d6304419..5c295c26ad37c8d743020ca4bab5996bc52f4df2 100644 (file)
 
 /*-------------------------------------------------------------------------*/
 
+#if IS_ENABLED(CONFIG_USB_ULPI)
 struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
                                        unsigned int flags);
+#else
+static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
+                                             unsigned int flags)
+{
+       return NULL;
+}
+#endif
 
 #ifdef CONFIG_USB_ULPI_VIEWPORT
 /* access ops for controllers with a viewport register */
index 4ce009324933ebc2b048ebd5f834b0c1bf44e52d..b6b215f13b453091ff2be0fae11b37a5a51c0c84 100644 (file)
@@ -26,6 +26,8 @@ struct user_namespace {
        kuid_t                  owner;
        kgid_t                  group;
        unsigned int            proc_inum;
+       bool                    may_mount_sysfs;
+       bool                    may_mount_proc;
 };
 
 extern struct user_namespace init_user_ns;
@@ -82,4 +84,6 @@ static inline void put_user_ns(struct user_namespace *ns)
 
 #endif
 
+void update_mnt_policy(struct user_namespace *userns);
+
 #endif /* _LINUX_USER_H */
index 853cda11e518be98cbcf0d4bd5fa9270c804d2f5..1f8fd109e2254f10118929fe96e073bbceb13f5a 100644 (file)
@@ -413,13 +413,15 @@ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
 
 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
 {
-       return dst->ops->neigh_lookup(dst, NULL, daddr);
+       struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr);
+       return IS_ERR(n) ? NULL : n;
 }
 
 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
                                                     struct sk_buff *skb)
 {
-       return dst->ops->neigh_lookup(dst, skb, NULL);
+       struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
+       return IS_ERR(n) ? NULL : n;
 }
 
 static inline void dst_link_failure(struct sk_buff *skb)
index 80461c1ae9efcceea9875000629f62608c6e699e..bb8271d487b7bea10861c6548dde50d0964c4a20 100644 (file)
@@ -9,6 +9,7 @@ struct flow_keys {
                __be32 ports;
                __be16 port16[2];
        };
+       u16 thoff;
        u8 ip_proto;
 };
 
index 76c3fe5ecc2e6ed9fcf1b81be7dbd3d3dc258d73..0a1dcc2fa2f58c5be1f177f579fa4fa57c138dfd 100644 (file)
@@ -43,6 +43,13 @@ struct inet_frag_queue {
 
 #define INETFRAGS_HASHSZ               64
 
+/* averaged:
+ * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
+ *            rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
+ *            struct frag_queue))
+ */
+#define INETFRAGS_MAXDEPTH             128
+
 struct inet_frags {
        struct hlist_head       hash[INETFRAGS_HASHSZ];
        /* This rwlock is a global lock (seperate per IPv4, IPv6 and
@@ -76,6 +83,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
                struct inet_frags *f, void *key, unsigned int hash)
        __releases(&f->lock);
+void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+                                  const char *prefix);
 
 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
 {
index 9497be1ad4c0c5cdf4fe2df49b79fa5f66a255f8..e49db91593a953422b0ce0a15c0eb0902ee33c56 100644 (file)
@@ -152,18 +152,16 @@ struct fib_result_nl {
 };
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-
 #define FIB_RES_NH(res)                ((res).fi->fib_nh[(res).nh_sel])
-
-#define FIB_TABLE_HASHSZ 2
-
 #else /* CONFIG_IP_ROUTE_MULTIPATH */
-
 #define FIB_RES_NH(res)                ((res).fi->fib_nh[0])
+#endif /* CONFIG_IP_ROUTE_MULTIPATH */
 
+#ifdef CONFIG_IP_MULTIPLE_TABLES
 #define FIB_TABLE_HASHSZ 256
-
-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+#else
+#define FIB_TABLE_HASHSZ 2
+#endif
 
 extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
 
index 68c69d54d39281ee033d765a1b9b66567c55d06f..fce8e6b66d558d8ff8bb629c4da5e358113a675e 100644 (file)
@@ -976,6 +976,7 @@ struct netns_ipvs {
        int                     sysctl_sync_retries;
        int                     sysctl_nat_icmp_send;
        int                     sysctl_pmtu_disc;
+       int                     sysctl_backup_only;
 
        /* ip_vs_lblc */
        int                     sysctl_lblc_expiration;
@@ -1067,6 +1068,12 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
        return ipvs->sysctl_pmtu_disc;
 }
 
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+       return ipvs->sync_state & IP_VS_STATE_BACKUP &&
+              ipvs->sysctl_backup_only;
+}
+
 #else
 
 static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs)
@@ -1114,6 +1121,11 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs)
        return 1;
 }
 
+static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
+{
+       return 0;
+}
+
 #endif
 
 /*
index fd19625ff99db290fd289b3caf18f32c9b72ffc3..982141c15200d1fc60b144a2ca6ba53c0d320200 100644 (file)
@@ -77,15 +77,11 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb,
 {
        struct iphdr *iph = ip_hdr(skb);
 
-       if (iph->frag_off & htons(IP_DF))
-               iph->id = 0;
-       else {
-               /* Use inner packet iph-id if possible. */
-               if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
-                       iph->id = old_iph->id;
-               else
-                       __ip_select_ident(iph, dst,
-                                         (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-       }
+       /* Use inner packet iph-id if possible. */
+       if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
+               iph->id = old_iph->id;
+       else
+               __ip_select_ident(iph, dst,
+                                 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 }
 #endif
index 93f5fa94a431c167d70902ce6ded5a2275215b53..afafd703ad92b1595a5c7c326883fe89816f920f 100644 (file)
@@ -33,9 +33,11 @@ enum {
        PACKET_DIAG_TX_RING,
        PACKET_DIAG_FANOUT,
 
-       PACKET_DIAG_MAX,
+       __PACKET_DIAG_MAX,
 };
 
+#define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1)
+
 struct packet_diag_info {
        __u32   pdi_index;
        __u32   pdi_version;
index b8a24941db21e82b11d80d0912c057a54bcf7742..b9e2a6a7446f077e528dd15fac8e8d97f0d9a232 100644 (file)
@@ -39,9 +39,11 @@ enum {
        UNIX_DIAG_MEMINFO,
        UNIX_DIAG_SHUTDOWN,
 
-       UNIX_DIAG_MAX,
+       __UNIX_DIAG_MAX,
 };
 
+#define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1)
+
 struct unix_diag_vfs {
        __u32   udiag_vfs_ino;
        __u32   udiag_vfs_dev;
index 28447f1594fa3fcf5a3a337cf34b4671528a424f..8deb22672ada5ea98247a3a2dbb7a6131dede1b2 100644 (file)
@@ -30,7 +30,6 @@
  */
 #define ATMEL_LCDC_WIRING_BGR  0
 #define ATMEL_LCDC_WIRING_RGB  1
-#define ATMEL_LCDC_WIRING_RGB555       2
 
 
  /* LCD Controller info data structure, stored in device platform_data */
@@ -62,6 +61,7 @@ struct atmel_lcdfb_info {
        void (*atmel_lcdfb_power_control)(int on);
        struct fb_monspecs      *default_monspecs;
        u32                     pseudo_palette[16];
+       bool                    have_intensity_bit;
 };
 
 #define ATMEL_LCDC_DMABADDR1   0x00
index 01c3d62436ef8c44c4d0deffc3393bd2d02b8116..ffd4652de91ca390fff537f18f524300bf470246 100644 (file)
@@ -138,11 +138,21 @@ struct blkif_request_discard {
        uint8_t        _pad3;
 } __attribute__((__packed__));
 
+struct blkif_request_other {
+       uint8_t      _pad1;
+       blkif_vdev_t _pad2;        /* only for read/write requests         */
+#ifdef CONFIG_X86_64
+       uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/
+#endif
+       uint64_t     id;           /* private guest value, echoed in resp  */
+} __attribute__((__packed__));
+
 struct blkif_request {
        uint8_t        operation;    /* BLKIF_OP_???                         */
        union {
                struct blkif_request_rw rw;
                struct blkif_request_discard discard;
+               struct blkif_request_other other;
        } u;
 } __attribute__((__packed__));
 
index 1844d31f45520fef57c93be90ba7e247e1a0b042..7000bb1f6e96fb018fcc5360cf6266d567f6121e 100644 (file)
@@ -251,6 +251,12 @@ struct physdev_pci_device_add {
 
 #define PHYSDEVOP_pci_device_remove     26
 #define PHYSDEVOP_restore_msi_ext       27
+/*
+ * Dom0 should use these two to announce MMIO resources assigned to
+ * MSI-X capable devices won't (prepare) or may (release) change.
+ */
+#define PHYSDEVOP_prepare_msix          30
+#define PHYSDEVOP_release_msix          31
 struct physdev_pci_device {
     /* IN */
     uint16_t seg;
index e5c4f609f22c2a902789e710756aa117f40cf5cc..e4e47f64744635bbebe26fda491ff64eda8e4161 100644 (file)
@@ -330,8 +330,16 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type,
                         int flags, const char *dev_name,
                         void *data)
 {
-       if (!(flags & MS_KERNMOUNT))
-               data = current->nsproxy->ipc_ns;
+       if (!(flags & MS_KERNMOUNT)) {
+               struct ipc_namespace *ns = current->nsproxy->ipc_ns;
+               /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
+                * over the ipc namespace.
+                */
+               if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+                       return ERR_PTR(-EPERM);
+
+               data = ns;
+       }
        return mount_ns(fs_type, flags, data, mqueue_fill_super);
 }
 
@@ -840,7 +848,8 @@ out_putfd:
                fd = error;
        }
        mutex_unlock(&root->d_inode->i_mutex);
-       mnt_drop_write(mnt);
+       if (!ro)
+               mnt_drop_write(mnt);
 out_putname:
        putname(name);
        return fd;
index b0cd86501c30db1a1320d284f2d52eb1ec499f2b..59412d037eed2a81163483b89fb38d9feb30623a 100644 (file)
@@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event)
                        if (ctxn < 0)
                                goto next;
                        ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
+                       if (ctx)
+                               perf_event_task_ctx(ctx, task_event);
                }
-               if (ctx)
-                       perf_event_task_ctx(ctx, task_event);
 next:
                put_cpu_ptr(pmu->pmu_cpu_context);
        }
+       if (task_event->task_ctx)
+               perf_event_task_ctx(task_event->task_ctx, task_event);
+
        rcu_read_unlock();
 }
 
@@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
                event->attr.sample_period = NSEC_PER_SEC / freq;
                hwc->sample_period = event->attr.sample_period;
                local64_set(&hwc->period_left, hwc->sample_period);
+               hwc->last_period = hwc->sample_period;
                event->attr.freq = 0;
        }
 }
index 51e485ca993560015dbb56701fd249637afc721c..60bc027c61c3a3be8d5bc26b024ffac0a79f1af9 100644 (file)
@@ -835,7 +835,7 @@ void do_exit(long code)
        /*
         * Make sure we are holding no locks:
         */
-       debug_check_no_locks_held();
+       debug_check_no_locks_held(tsk);
        /*
         * We can do this unlocked here. The futex code uses this flag
         * just to verify whether the pi state cleanup has been done
index 259db207b5d90806536da88963e7d1b469e1b943..8a0efac4f99de7c5d0711bea6792db1f385f66eb 100644 (file)
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
-static void print_held_locks_bug(void)
+static void print_held_locks_bug(struct task_struct *curr)
 {
        if (!debug_locks_off())
                return;
@@ -4097,21 +4097,22 @@ static void print_held_locks_bug(void)
 
        printk("\n");
        printk("=====================================\n");
-       printk("[ BUG: %s/%d still has locks held! ]\n",
-              current->comm, task_pid_nr(current));
+       printk("[ BUG: lock held at task exit time! ]\n");
        print_kernel_ident();
        printk("-------------------------------------\n");
-       lockdep_print_held_locks(current);
+       printk("%s/%d is exiting with locks still held!\n",
+               curr->comm, task_pid_nr(curr));
+       lockdep_print_held_locks(curr);
+
        printk("\nstack backtrace:\n");
        dump_stack();
 }
 
-void debug_check_no_locks_held(void)
+void debug_check_no_locks_held(struct task_struct *task)
 {
-       if (unlikely(current->lockdep_depth > 0))
-               print_held_locks_bug();
+       if (unlikely(task->lockdep_depth > 0))
+               print_held_locks_bug(task);
 }
-EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
 
 void debug_show_all_locks(void)
 {
index c1c3dc1c60233f337a01ff13587f1a5a7f57f1cd..bea15bdf82b04c28d2f5d8c1a32a46b2621cb295 100644 (file)
@@ -181,6 +181,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
        int nr;
        int rc;
        struct task_struct *task, *me = current;
+       int init_pids = thread_group_leader(me) ? 1 : 2;
 
        /* Don't allow any more processes into the pid namespace */
        disable_pid_allocation(pid_ns);
@@ -230,7 +231,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
         */
        for (;;) {
                set_current_state(TASK_UNINTERRUPTIBLE);
-               if (pid_ns->nr_hashed == 1)
+               if (pid_ns->nr_hashed == init_pids)
                        break;
                schedule();
        }
index 0b31715f335a7a8ba1a846fe3b93fe71d97cd8d7..abbdd9e2ac82c423a3dc6c778632c37887241b41 100644 (file)
@@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
 #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */
 #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */
 
-DECLARE_WAIT_QUEUE_HEAD(log_wait);
-
 int console_printk[4] = {
        DEFAULT_CONSOLE_LOGLEVEL,       /* console_loglevel */
        DEFAULT_MESSAGE_LOGLEVEL,       /* default_message_loglevel */
@@ -224,6 +222,7 @@ struct log {
 static DEFINE_RAW_SPINLOCK(logbuf_lock);
 
 #ifdef CONFIG_PRINTK
+DECLARE_WAIT_QUEUE_HEAD(log_wait);
 /* the next printk record to read by syslog(READ) or /proc/kmsg */
 static u64 syslog_seq;
 static u32 syslog_idx;
@@ -1957,45 +1956,6 @@ int is_console_locked(void)
        return console_locked;
 }
 
-/*
- * Delayed printk version, for scheduler-internal messages:
- */
-#define PRINTK_BUF_SIZE                512
-
-#define PRINTK_PENDING_WAKEUP  0x01
-#define PRINTK_PENDING_SCHED   0x02
-
-static DEFINE_PER_CPU(int, printk_pending);
-static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
-
-static void wake_up_klogd_work_func(struct irq_work *irq_work)
-{
-       int pending = __this_cpu_xchg(printk_pending, 0);
-
-       if (pending & PRINTK_PENDING_SCHED) {
-               char *buf = __get_cpu_var(printk_sched_buf);
-               printk(KERN_WARNING "[sched_delayed] %s", buf);
-       }
-
-       if (pending & PRINTK_PENDING_WAKEUP)
-               wake_up_interruptible(&log_wait);
-}
-
-static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
-       .func = wake_up_klogd_work_func,
-       .flags = IRQ_WORK_LAZY,
-};
-
-void wake_up_klogd(void)
-{
-       preempt_disable();
-       if (waitqueue_active(&log_wait)) {
-               this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
-               irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
-       }
-       preempt_enable();
-}
-
 static void console_cont_flush(char *text, size_t size)
 {
        unsigned long flags;
@@ -2458,6 +2418,44 @@ static int __init printk_late_init(void)
 late_initcall(printk_late_init);
 
 #if defined CONFIG_PRINTK
+/*
+ * Delayed printk version, for scheduler-internal messages:
+ */
+#define PRINTK_BUF_SIZE                512
+
+#define PRINTK_PENDING_WAKEUP  0x01
+#define PRINTK_PENDING_SCHED   0x02
+
+static DEFINE_PER_CPU(int, printk_pending);
+static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
+
+static void wake_up_klogd_work_func(struct irq_work *irq_work)
+{
+       int pending = __this_cpu_xchg(printk_pending, 0);
+
+       if (pending & PRINTK_PENDING_SCHED) {
+               char *buf = __get_cpu_var(printk_sched_buf);
+               printk(KERN_WARNING "[sched_delayed] %s", buf);
+       }
+
+       if (pending & PRINTK_PENDING_WAKEUP)
+               wake_up_interruptible(&log_wait);
+}
+
+static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
+       .func = wake_up_klogd_work_func,
+       .flags = IRQ_WORK_LAZY,
+};
+
+void wake_up_klogd(void)
+{
+       preempt_disable();
+       if (waitqueue_active(&log_wait)) {
+               this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
+               irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
+       }
+       preempt_enable();
+}
 
 int printk_sched(const char *fmt, ...)
 {
index 81f56445fba949790b34b7bf3f97383555134672..39c9c4a2949f3166b114f11534957ff2f6060565 100644 (file)
@@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
 
 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
 
-static int __orderly_poweroff(void)
+static int __orderly_poweroff(bool force)
 {
-       int argc;
        char **argv;
        static char *envp[] = {
                "HOME=/",
@@ -2196,20 +2195,40 @@ static int __orderly_poweroff(void)
        };
        int ret;
 
-       argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc);
-       if (argv == NULL) {
+       argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL);
+       if (argv) {
+               ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+               argv_free(argv);
+       } else {
                printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
-                      __func__, poweroff_cmd);
-               return -ENOMEM;
+                                        __func__, poweroff_cmd);
+               ret = -ENOMEM;
        }
 
-       ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
-                                     NULL, NULL, NULL);
-       argv_free(argv);
+       if (ret && force) {
+               printk(KERN_WARNING "Failed to start orderly shutdown: "
+                                       "forcing the issue\n");
+               /*
+                * I guess this should try to kick off some daemon to sync and
+                * poweroff asap.  Or not even bother syncing if we're doing an
+                * emergency shutdown?
+                */
+               emergency_sync();
+               kernel_power_off();
+       }
 
        return ret;
 }
 
+static bool poweroff_force;
+
+static void poweroff_work_func(struct work_struct *work)
+{
+       __orderly_poweroff(poweroff_force);
+}
+
+static DECLARE_WORK(poweroff_work, poweroff_work_func);
+
 /**
  * orderly_poweroff - Trigger an orderly system poweroff
  * @force: force poweroff if command execution fails
@@ -2219,21 +2238,9 @@ static int __orderly_poweroff(void)
  */
 int orderly_poweroff(bool force)
 {
-       int ret = __orderly_poweroff();
-
-       if (ret && force) {
-               printk(KERN_WARNING "Failed to start orderly shutdown: "
-                      "forcing the issue\n");
-
-               /*
-                * I guess this should try to kick off some daemon to sync and
-                * poweroff asap.  Or not even bother syncing if we're doing an
-                * emergency shutdown?
-                */
-               emergency_sync();
-               kernel_power_off();
-       }
-
-       return ret;
+       if (force) /* do not override the pending "true" */
+               poweroff_force = true;
+       schedule_work(&poweroff_work);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(orderly_poweroff);
index 2fb8cb88df8d0296e655f320bc7d5cbce1a858c8..7f32fe0e52cd46489c8d90e4b85f9d74204aab16 100644 (file)
@@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
  */
 int tick_check_broadcast_device(struct clock_event_device *dev)
 {
-       if ((tick_broadcast_device.evtdev &&
+       if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
+           (tick_broadcast_device.evtdev &&
             tick_broadcast_device.evtdev->rating >= dev->rating) ||
             (dev->features & CLOCK_EVT_FEAT_C3STOP))
                return 0;
index ab25b88aae56f4e22380766b02378669c8b05c57..6893d5a2bf086014f2919496ca7062c246935dad 100644 (file)
@@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                        continue;
                        }
 
-                       hlist_del(&entry->node);
-                       call_rcu(&entry->rcu, ftrace_free_entry_rcu);
+                       hlist_del_rcu(&entry->node);
+                       call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
                }
        }
        __disable_ftrace_function_probe();
index 1f835a83cb2c4c74b13b0c8641ce648a75591ae6..4f1dade56981d6fe5d880e9f6caaf609ab1e9c02 100644 (file)
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct ring_buffer *buf = tr->buffer;
+       struct ring_buffer *buf;
 
        if (trace_stop_count)
                return;
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        arch_spin_lock(&ftrace_max_lock);
 
+       buf = tr->buffer;
        tr->buffer = max_tr.buffer;
        max_tr.buffer = buf;
 
@@ -2880,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
        return -EINVAL;
 }
 
-static void set_tracer_flags(unsigned int mask, int enabled)
+/* Some tracers require overwrite to stay enabled */
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
+{
+       if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
+               return -1;
+
+       return 0;
+}
+
+int set_tracer_flag(unsigned int mask, int enabled)
 {
        /* do nothing if flag is already set */
        if (!!(trace_flags & mask) == !!enabled)
-               return;
+               return 0;
+
+       /* Give the tracer a chance to approve the change */
+       if (current_trace->flag_changed)
+               if (current_trace->flag_changed(current_trace, mask, !!enabled))
+                       return -EINVAL;
 
        if (enabled)
                trace_flags |= mask;
@@ -2894,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled)
        if (mask == TRACE_ITER_RECORD_CMD)
                trace_event_enable_cmd_record(enabled);
 
-       if (mask == TRACE_ITER_OVERWRITE)
+       if (mask == TRACE_ITER_OVERWRITE) {
                ring_buffer_change_overwrite(global_trace.buffer, enabled);
+#ifdef CONFIG_TRACER_MAX_TRACE
+               ring_buffer_change_overwrite(max_tr.buffer, enabled);
+#endif
+       }
 
        if (mask == TRACE_ITER_PRINTK)
                trace_printk_start_stop_comm(enabled);
+
+       return 0;
 }
 
 static int trace_set_options(char *option)
 {
        char *cmp;
        int neg = 0;
-       int ret = 0;
+       int ret = -ENODEV;
        int i;
 
        cmp = strstrip(option);
@@ -2915,19 +2936,20 @@ static int trace_set_options(char *option)
                cmp += 2;
        }
 
+       mutex_lock(&trace_types_lock);
+
        for (i = 0; trace_options[i]; i++) {
                if (strcmp(cmp, trace_options[i]) == 0) {
-                       set_tracer_flags(1 << i, !neg);
+                       ret = set_tracer_flag(1 << i, !neg);
                        break;
                }
        }
 
        /* If no option could be set, test the specific tracer options */
-       if (!trace_options[i]) {
-               mutex_lock(&trace_types_lock);
+       if (!trace_options[i])
                ret = set_tracer_option(current_trace, cmp, neg);
-               mutex_unlock(&trace_types_lock);
-       }
+
+       mutex_unlock(&trace_types_lock);
 
        return ret;
 }
@@ -2937,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
                        size_t cnt, loff_t *ppos)
 {
        char buf[64];
+       int ret;
 
        if (cnt >= sizeof(buf))
                return -EINVAL;
@@ -2946,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
 
        buf[cnt] = 0;
 
-       trace_set_options(buf);
+       ret = trace_set_options(buf);
+       if (ret < 0)
+               return ret;
 
        *ppos += cnt;
 
@@ -3250,6 +3275,9 @@ static int tracing_set_tracer(const char *buf)
                goto out;
 
        trace_branch_disable();
+
+       current_trace->enabled = false;
+
        if (current_trace->reset)
                current_trace->reset(tr);
 
@@ -3294,6 +3322,7 @@ static int tracing_set_tracer(const char *buf)
        }
 
        current_trace = t;
+       current_trace->enabled = true;
        trace_branch_enable(tr);
  out:
        mutex_unlock(&trace_types_lock);
@@ -4780,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
 
        if (val != 0 && val != 1)
                return -EINVAL;
-       set_tracer_flags(1 << index, val);
+
+       mutex_lock(&trace_types_lock);
+       ret = set_tracer_flag(1 << index, val);
+       mutex_unlock(&trace_types_lock);
+
+       if (ret < 0)
+               return ret;
 
        *ppos += cnt;
 
index 57d7e5397d56da86f667e3139fb14703a99e8fcf..2081971367ea8e54d5b42e188c7aaaaee22c8f2a 100644 (file)
@@ -283,11 +283,15 @@ struct tracer {
        enum print_line_t       (*print_line)(struct trace_iterator *iter);
        /* If you handled the flag setting, return 0 */
        int                     (*set_flag)(u32 old_flags, u32 bit, int set);
+       /* Return 0 if OK with change, else return non-zero */
+       int                     (*flag_changed)(struct tracer *tracer,
+                                               u32 mask, int set);
        struct tracer           *next;
        struct tracer_flags     *flags;
        bool                    print_max;
        bool                    use_max_tr;
        bool                    allocated_snapshot;
+       bool                    enabled;
 };
 
 
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[];
 
 void trace_printk_init_buffers(void);
 void trace_printk_start_comm(void);
+int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
+int set_tracer_flag(unsigned int mask, int enabled);
 
 #undef FTRACE_ENTRY
 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)    \
index 713a2cac48816461c0c0a5b11d5a854ea5c4b3d3..443b25b43b4f60f3b58102daab06c6150f01ad64 100644 (file)
@@ -32,7 +32,7 @@ enum {
 
 static int trace_type __read_mostly;
 
-static int save_lat_flag;
+static int save_flags;
 
 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
 
 static void __irqsoff_tracer_init(struct trace_array *tr)
 {
-       save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
-       trace_flags |= TRACE_ITER_LATENCY_FMT;
+       save_flags = trace_flags;
+
+       /* non overwrite screws up the latency tracers */
+       set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
+       set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
 
        tracing_max_latency = 0;
        irqsoff_trace = tr;
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
 
 static void irqsoff_tracer_reset(struct trace_array *tr)
 {
+       int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
+       int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+
        stop_irqsoff_tracer(tr, is_graph());
 
-       if (!save_lat_flag)
-               trace_flags &= ~TRACE_ITER_LATENCY_FMT;
+       set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
+       set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
 }
 
 static void irqsoff_tracer_start(struct trace_array *tr)
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly =
        .print_line     = irqsoff_print_line,
        .flags          = &tracer_flags,
        .set_flag       = irqsoff_set_flag,
+       .flag_changed   = trace_keep_overwrite,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_irqsoff,
 #endif
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly =
        .print_line     = irqsoff_print_line,
        .flags          = &tracer_flags,
        .set_flag       = irqsoff_set_flag,
+       .flag_changed   = trace_keep_overwrite,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptoff,
 #endif
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
        .print_line     = irqsoff_print_line,
        .flags          = &tracer_flags,
        .set_flag       = irqsoff_set_flag,
+       .flag_changed   = trace_keep_overwrite,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_preemptirqsoff,
 #endif
index 75aa97fbe1a11120d93ed241e00ddbac5a51a55d..fde652c9a511c197d47822b66871ffb764f62539 100644 (file)
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
 static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
 static void wakeup_graph_return(struct ftrace_graph_ret *trace);
 
-static int save_lat_flag;
+static int save_flags;
 
 #define TRACE_DISPLAY_GRAPH     1
 
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
 
 static int __wakeup_tracer_init(struct trace_array *tr)
 {
-       save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
-       trace_flags |= TRACE_ITER_LATENCY_FMT;
+       save_flags = trace_flags;
+
+       /* non overwrite screws up the latency tracers */
+       set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
+       set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
 
        tracing_max_latency = 0;
        wakeup_trace = tr;
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
 
 static void wakeup_tracer_reset(struct trace_array *tr)
 {
+       int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
+       int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+
        stop_wakeup_tracer(tr);
        /* make sure we put back any tasks we are tracing */
        wakeup_reset(tr);
 
-       if (!save_lat_flag)
-               trace_flags &= ~TRACE_ITER_LATENCY_FMT;
+       set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
+       set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
 }
 
 static void wakeup_tracer_start(struct trace_array *tr)
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly =
        .print_line     = wakeup_print_line,
        .flags          = &tracer_flags,
        .set_flag       = wakeup_set_flag,
+       .flag_changed   = trace_keep_overwrite,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
 #endif
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
        .print_line     = wakeup_print_line,
        .flags          = &tracer_flags,
        .set_flag       = wakeup_set_flag,
+       .flag_changed   = trace_keep_overwrite,
 #ifdef CONFIG_FTRACE_SELFTEST
        .selftest    = trace_selftest_startup_wakeup,
 #endif
index e81978e8c03b2b5f1a38d62248d8408fbfcd2d13..8e635a18ab521e48cfc4195cfb9f199de5e0c3b3 100644 (file)
@@ -51,6 +51,8 @@ struct user_namespace init_user_ns = {
        .owner = GLOBAL_ROOT_UID,
        .group = GLOBAL_ROOT_GID,
        .proc_inum = PROC_USER_INIT_INO,
+       .may_mount_sysfs = true,
+       .may_mount_proc = true,
 };
 EXPORT_SYMBOL_GPL(init_user_ns);
 
index b14f4d3420439ca629be08494ef417e734513499..a54f26f82eb250a60c7f24ecc651c6c1bbf8cc88 100644 (file)
@@ -61,6 +61,15 @@ int create_user_ns(struct cred *new)
        kgid_t group = new->egid;
        int ret;
 
+       /*
+        * Verify that we can not violate the policy of which files
+        * may be accessed that is specified by the root directory,
+        * by verifing that the root directory is at the root of the
+        * mount namespace which allows all files to be accessed.
+        */
+       if (current_chrooted())
+               return -EPERM;
+
        /* The creator needs a mapping in the parent user namespace
         * or else we won't be able to reasonably tell userspace who
         * created a user_namespace.
@@ -87,6 +96,8 @@ int create_user_ns(struct cred *new)
 
        set_cred_user_ns(new, ns);
 
+       update_mnt_policy(ns);
+
        return 0;
 }
 
index 55fac5b991b768ef059a783fb2a85d7521154e60..b48cd597145dd007b503d22755c481256f48867e 100644 (file)
@@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work)
 
                spin_unlock_irq(&pool->lock);
                mutex_unlock(&pool->assoc_mutex);
-       }
 
-       /*
-        * Call schedule() so that we cross rq->lock and thus can guarantee
-        * sched callbacks see the %WORKER_UNBOUND flag.  This is necessary
-        * as scheduler callbacks may be invoked from other cpus.
-        */
-       schedule();
+               /*
+                * Call schedule() so that we cross rq->lock and thus can
+                * guarantee sched callbacks see the %WORKER_UNBOUND flag.
+                * This is necessary as scheduler callbacks may be invoked
+                * from other cpus.
+                */
+               schedule();
 
-       /*
-        * Sched callbacks are disabled now.  Zap nr_running.  After this,
-        * nr_running stays zero and need_more_worker() and keep_working()
-        * are always true as long as the worklist is not empty.  Pools on
-        * @cpu now behave as unbound (in terms of concurrency management)
-        * pools which are served by workers tied to the CPU.
-        *
-        * On return from this function, the current worker would trigger
-        * unbound chain execution of pending work items if other workers
-        * didn't already.
-        */
-       for_each_std_worker_pool(pool, cpu)
+               /*
+                * Sched callbacks are disabled now.  Zap nr_running.
+                * After this, nr_running stays zero and need_more_worker()
+                * and keep_working() are always true as long as the
+                * worklist is not empty.  This pool now behaves as an
+                * unbound (in terms of concurrency management) pool which
+                * are served by workers tied to the pool.
+                */
                atomic_set(&pool->nr_running, 0);
+
+               /*
+                * With concurrency management just turned off, a busy
+                * worker blocking could lead to lengthy stalls.  Kick off
+                * unbound chain execution of currently pending work items.
+                */
+               spin_lock_irq(&pool->lock);
+               wake_up_worker(pool);
+               spin_unlock_irq(&pool->lock);
+       }
 }
 
 /*
index 9681d54b95d127878405d5ff0afec6fcc575fbae..f8e0e536739832bdd05df68647e373788c060fda 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/printk.h>
 #include <linux/spinlock.h>
 #include <linux/tty.h>
 #include <linux/wait.h>
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)
                        wake_up_klogd();
        }
 }
-
-
index 5e396accd3d02a8dce6d06cc2d6020e311b308b0..d87a17a819d07a58bc8f1dd40f2d41ae499935c8 100644 (file)
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)
        entry = bucket_find_exact(bucket, ref);
 
        if (!entry) {
+               /* must drop lock before calling dma_mapping_error */
+               put_hash_bucket(bucket, &flags);
+
                if (dma_mapping_error(ref->dev, ref->dev_addr)) {
                        err_printk(ref->dev, NULL,
-                                  "DMA-API: device driver tries "
-                                  "to free an invalid DMA memory address\n");
-                       return;
+                                  "DMA-API: device driver tries to free an "
+                                  "invalid DMA memory address\n");
+               } else {
+                       err_printk(ref->dev, NULL,
+                                  "DMA-API: device driver tries to free DMA "
+                                  "memory it has not allocated [device "
+                                  "address=0x%016llx] [size=%llu bytes]\n",
+                                  ref->dev_addr, ref->size);
                }
-               err_printk(ref->dev, NULL, "DMA-API: device driver tries "
-                          "to free DMA memory it has not allocated "
-                          "[device address=0x%016llx] [size=%llu bytes]\n",
-                          ref->dev_addr, ref->size);
-               goto out;
+               return;
        }
 
        if (ref->size != entry->size) {
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)
        hash_bucket_del(entry);
        dma_entry_free(entry);
 
-out:
        put_hash_bucket(bucket, &flags);
 }
 
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
        ref.dev = dev;
        ref.dev_addr = dma_addr;
        bucket = get_hash_bucket(&ref, &flags);
-       entry = bucket_find_exact(bucket, &ref);
 
-       if (!entry)
-               goto out;
+       list_for_each_entry(entry, &bucket->list, list) {
+               if (!exact_match(&ref, entry))
+                       continue;
+
+               /*
+                * The same physical address can be mapped multiple
+                * times. Without a hardware IOMMU this results in the
+                * same device addresses being put into the dma-debug
+                * hash multiple times too. This can result in false
+                * positives being reported. Therefore we implement a
+                * best-fit algorithm here which updates the first entry
+                * from the hash which fits the reference value and is
+                * not currently listed as being checked.
+                */
+               if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+                       entry->map_err_type = MAP_ERR_CHECKED;
+                       break;
+               }
+       }
 
-       entry->map_err_type = MAP_ERR_CHECKED;
-out:
        put_hash_bucket(bucket, &flags);
 }
 EXPORT_SYMBOL(debug_dma_mapping_error);
index 4723ac8d2fc200d5990f830b6f4c8c0fd2a9bbcf..87da3590c61e20287b4854e8212c178f96cb73c6 100644 (file)
@@ -204,10 +204,8 @@ get_write_lock:
                        unsigned long addr;
                        struct file *file = get_file(vma->vm_file);
 
-                       vm_flags = vma->vm_flags;
-                       if (!(flags & MAP_NONBLOCK))
-                               vm_flags |= VM_POPULATE;
-                       addr = mmap_region(file, start, size, vm_flags, pgoff);
+                       addr = mmap_region(file, start, size,
+                                       vma->vm_flags, pgoff);
                        fput(file);
                        if (IS_ERR_VALUE(addr)) {
                                err = addr;
@@ -226,12 +224,6 @@ get_write_lock:
                mutex_unlock(&mapping->i_mmap_mutex);
        }
 
-       if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) {
-               if (!has_write_lock)
-                       goto get_write_lock;
-               vma->vm_flags |= VM_POPULATE;
-       }
-
        if (vma->vm_flags & VM_LOCKED) {
                /*
                 * drop PG_Mlocked flag for over-mapped range
index 0a0be33bb1997f9b19d1800e906c97f7e5a18d38..ca9a7c6d7e973c5db4bbecd7e72139d5c9529b72 100644 (file)
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf)
 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
 unsigned long hugetlb_total_pages(void)
 {
-       struct hstate *h = &default_hstate;
-       return h->nr_huge_pages * pages_per_huge_page(h);
+       struct hstate *h;
+       unsigned long nr_total_pages = 0;
+
+       for_each_hstate(h)
+               nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
+       return nr_total_pages;
 }
 
 static int hugetlb_acct_memory(struct hstate *h, long delta)
index 9597eec8239d3f2e3fc2052179ed588643be53e0..ee3765760818a6623f86e48a6d775d2d6c2a75a2 100644 (file)
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid)
        for (i = 0; i < MAX_NR_ZONES; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
-               if (zone->wait_table)
+               /*
+                * wait_table may be allocated from boot memory,
+                * here only free if it's allocated by vmalloc.
+                */
+               if (is_vmalloc_addr(zone->wait_table))
                        vfree(zone->wait_table);
        }
 
index 1c5e33fce6391ec1ea7569b7d324fc76aa9fa288..79b7cf7d1bca72cee9babfb60e21a38799c8eba1 100644 (file)
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
 
                newflags = vma->vm_flags & ~VM_LOCKED;
                if (on)
-                       newflags |= VM_LOCKED | VM_POPULATE;
+                       newflags |= VM_LOCKED;
 
                tmp = vma->vm_end;
                if (tmp > end)
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
                 * range with the first VMA. Also, skip undesirable VMA types.
                 */
                nend = min(end, vma->vm_end);
-               if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) !=
-                   VM_POPULATE)
+               if (vma->vm_flags & (VM_IO | VM_PFNMAP))
                        continue;
                if (nstart < vma->vm_start)
                        nstart = vma->vm_start;
@@ -492,9 +491,9 @@ static int do_mlockall(int flags)
        struct vm_area_struct * vma, * prev = NULL;
 
        if (flags & MCL_FUTURE)
-               current->mm->def_flags |= VM_LOCKED | VM_POPULATE;
+               current->mm->def_flags |= VM_LOCKED;
        else
-               current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE);
+               current->mm->def_flags &= ~VM_LOCKED;
        if (flags == MCL_FUTURE)
                goto out;
 
@@ -503,7 +502,7 @@ static int do_mlockall(int flags)
 
                newflags = vma->vm_flags & ~VM_LOCKED;
                if (flags & MCL_CURRENT)
-                       newflags |= VM_LOCKED | VM_POPULATE;
+                       newflags |= VM_LOCKED;
 
                /* Ignore errors */
                mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
index 2664a47cec93721d6c73aafa85f32c11a5fb13e6..6466699b16cbca1a26c8792f9d484925a23df005 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        }
 
        addr = mmap_region(file, addr, len, vm_flags, pgoff);
-       if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE))
+       if (!IS_ERR_VALUE(addr) &&
+           ((vm_flags & VM_LOCKED) ||
+            (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
                *populate = len;
        return addr;
 }
index a18714469bf791299f0d60b1a38b9886fdf4dfff..85addcd9372b0d8abfbd277d332408900efdea1d 100644 (file)
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 
        grp = &vlan_info->grp;
 
-       /* Take it out of our own structures, but be sure to interlock with
-        * HW accelerating devices or SW vlan input packet processing if
-        * VLAN is not 0 (leave it there for 802.1p).
-        */
-       if (vlan_id)
-               vlan_vid_del(real_dev, vlan_id);
-
        grp->nr_vlan_devs--;
 
        if (vlan->flags & VLAN_FLAG_MVRP)
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
                vlan_gvrp_uninit_applicant(real_dev);
        }
 
+       /* Take it out of our own structures, but be sure to interlock with
+        * HW accelerating devices or SW vlan input packet processing if
+        * VLAN is not 0 (leave it there for 802.1p).
+        */
+       if (vlan_id)
+               vlan_vid_del(real_dev, vlan_id);
+
        /* Get rid of the vlan's reference to real_dev */
        dev_put(real_dev);
 }
index a0b253ecadaf6805106e74c943ed726a81f172bb..a5bb0a769eb927efff0408fa7dd6afbffcdf2ec0 100644 (file)
@@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
 
        /* unpack the aggregated packets and process them one by one */
-       do {
+       while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
+                                        batadv_ogm_packet->tt_num_changes)) {
                tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
 
                batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
@@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
 
                packet_pos = packet_buff + buff_pos;
                batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
-       } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
-                                          batadv_ogm_packet->tt_num_changes));
+       }
 
        kfree_skb(skb);
        return NET_RX_SUCCESS;
index 79d87d8d4f514f5529b57f1b89119a735d068f69..fad0302bdb325e5df700edf4fe323c6226216599 100644 (file)
@@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk)
                        sco_chan_del(sk, ECONNRESET);
                break;
 
+       case BT_CONNECT2:
        case BT_CONNECT:
        case BT_DISCONN:
                sco_chan_del(sk, ECONNRESET);
index b0812c91c0f0ea6d4010722990a044c72d93ba36..bab338e6270df4604efe792f40025a40d7682347 100644 (file)
@@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                        return 0;
                br_warn(br, "adding interface %s with same address "
                       "as a received packet\n",
-                      source->dev->name);
+                      source ? source->dev->name : br->dev->name);
                fdb_delete(br, fdb);
        }
 
index 27aa3ee517ce56101e28fd37602356f1f43843ff..299fc5f40a26c12ef39786d8bc3885c0c96ca7be 100644 (file)
@@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void)
                + nla_total_size(1)     /* IFLA_BRPORT_MODE */
                + nla_total_size(1)     /* IFLA_BRPORT_GUARD */
                + nla_total_size(1)     /* IFLA_BRPORT_PROTECT */
+               + nla_total_size(1)     /* IFLA_BRPORT_FAST_LEAVE */
                + 0;
 }
 
@@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
        br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
        br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
        br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
+       br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
 
        if (tb[IFLA_BRPORT_COST]) {
                err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
index dffbef70cd31253625da50819a171fbc59b386a5..13e6447f03987b49b32695bec2f4e50684e78115 100644 (file)
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)
                return;
        }
 #endif
-       WARN_ON(in_interrupt());
        static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
@@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        }
 
        skb_orphan(skb);
-       nf_reset(skb);
 
        if (unlikely(!is_skb_forwardable(dev, skb))) {
                atomic_long_inc(&dev->rx_dropped);
@@ -2219,9 +2217,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_offload *ptype;
        __be16 type = skb->protocol;
+       int vlan_depth = ETH_HLEN;
 
        while (type == htons(ETH_P_8021Q)) {
-               int vlan_depth = ETH_HLEN;
                struct vlan_hdr *vh;
 
                if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
@@ -3315,6 +3313,7 @@ int netdev_rx_handler_register(struct net_device *dev,
        if (dev->rx_handler)
                return -EBUSY;
 
+       /* Note: rx_handler_data must be set before rx_handler */
        rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
        rcu_assign_pointer(dev->rx_handler, rx_handler);
 
@@ -3335,6 +3334,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
 
        ASSERT_RTNL();
        RCU_INIT_POINTER(dev->rx_handler, NULL);
+       /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
+        * section has a guarantee to see a non NULL rx_handler_data
+        * as well.
+        */
+       synchronize_net();
        RCU_INIT_POINTER(dev->rx_handler_data, NULL);
 }
 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
index c56ea6f7f6c7c88acbe602a3470c979622345713..2bfd081c59f7706cbd9d8a5ba8a5780aaaae23d1 100644 (file)
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
        struct flow_flush_info *info = data;
        struct tasklet_struct *tasklet;
 
-       tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet);
+       tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
        tasklet->data = (unsigned long)info;
        tasklet_schedule(tasklet);
 }
index 9d4c7201400d154ab2fd00ccfab4f22f8bddb5ff..e187bf06d673b0ec37d401a8389bba372a8fb3d9 100644 (file)
@@ -140,6 +140,8 @@ ipv6:
                        flow->ports = *ports;
        }
 
+       flow->thoff = (u16) nhoff;
+
        return true;
 }
 EXPORT_SYMBOL(skb_flow_dissect);
index a585d45cc9d9faefbc51fde485971a1336065e58..b65441da74abd85cc8deacc7650c583013e9dcfb 100644 (file)
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
        }
        if (ops->fill_info) {
                data = nla_nest_start(skb, IFLA_INFO_DATA);
-               if (data == NULL)
+               if (data == NULL) {
+                       err = -EMSGSIZE;
                        goto err_cancel_link;
+               }
                err = ops->fill_info(skb, dev);
                if (err < 0)
                        goto err_cancel_data;
@@ -2621,7 +2623,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
 
                while (RTA_OK(attr, attrlen)) {
-                       unsigned int flavor = attr->rta_type;
+                       unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
                        if (flavor) {
                                if (flavor > rta_max[sz_idx])
                                        return -EINVAL;
index 905dcc6ad1e3b480c01f87df5157f4e37de112a1..2dc6cdaaae8abc5f31afa57a7ccf765cc978b6ac 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
 #include <linux/security.h>
+#include <linux/pid_namespace.h>
 #include <linux/pid.h>
 #include <linux/nsproxy.h>
 #include <linux/slab.h>
@@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds)
        if (!uid_valid(uid) || !gid_valid(gid))
                return -EINVAL;
 
-       if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) &&
+       if ((creds->pid == task_tgid_vnr(current) ||
+            ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
            ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||
              uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
            ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) ||
index 68f6a94f7661999095d9a02539aee956cd52e8cc..c929d9c1c4b60d719d60066f7ca06e4b7d296f59 100644 (file)
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                                iph->frag_off |= htons(IP_MF);
                        offset += (skb->len - skb->mac_len - iph->ihl * 4);
                } else  {
-                       if (!(iph->frag_off & htons(IP_DF)))
-                               iph->id = htons(id++);
+                       iph->id = htons(id++);
                }
                iph->tot_len = htons(skb->len - skb->mac_len);
                iph->check = 0;
index 245ae078a07fa3d991d6bba6147aab7970f36c9a..f4fd23de9b13103b10351310aea08c47d0429d38 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/slab.h>
 
+#include <net/sock.h>
 #include <net/inet_frag.h>
 
 static void inet_frag_secret_rebuild(unsigned long dummy)
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
        __releases(&f->lock)
 {
        struct inet_frag_queue *q;
+       int depth = 0;
 
        hlist_for_each_entry(q, &f->hash[hash], list) {
                if (q->net == nf && f->match(q, key)) {
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
                        read_unlock(&f->lock);
                        return q;
                }
+               depth++;
        }
        read_unlock(&f->lock);
 
-       return inet_frag_create(nf, f, key);
+       if (depth <= INETFRAGS_MAXDEPTH)
+               return inet_frag_create(nf, f, key);
+       else
+               return ERR_PTR(-ENOBUFS);
 }
 EXPORT_SYMBOL(inet_frag_find);
+
+void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
+                                  const char *prefix)
+{
+       static const char msg[] = "inet_frag_find: Fragment hash bucket"
+               " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
+               ". Dropping fragment.\n";
+
+       if (PTR_ERR(q) == -ENOBUFS)
+               LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
+}
+EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
index b6d30acb600c5e306ba825ed394c252e4fe27a8b..a6445b843ef40774f2bf5014e6800e1f4fd86b3e 100644 (file)
@@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
        hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
 
        q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
-       if (q == NULL)
-               goto out_nomem;
-
+       if (IS_ERR_OR_NULL(q)) {
+               inet_frag_maybe_warn_overflow(q, pr_fmt());
+               return NULL;
+       }
        return container_of(q, struct ipq, q);
-
-out_nomem:
-       LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
-       return NULL;
 }
 
 /* Is the fragment too far ahead to be part of ipq? */
index d0ef0e674ec5c083ad719bcd2b8201c82c1116d5..91d66dbde9c05cd05c4e8489a4abd317b378230e 100644 (file)
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
 
        if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
                gre_hlen = 0;
-               if (skb->protocol == htons(ETH_P_IP))
-                       tiph = (const struct iphdr *)skb->data;
-               else
-                       tiph = &tunnel->parms.iph;
+               tiph = (const struct iphdr *)skb->data;
        } else {
                gre_hlen = tunnel->hlen;
                tiph = &tunnel->parms.iph;
index 310a3647c83d948949e8c76ef8e89b68def338dd..ec7264514a82e0a130bf15ee01a9030596880921 100644 (file)
@@ -370,7 +370,6 @@ int ip_options_compile(struct net *net,
                                }
                                switch (optptr[3]&0xF) {
                                      case IPOPT_TS_TSONLY:
-                                       opt->ts = optptr - iph;
                                        if (skb)
                                                timeptr = &optptr[optptr[2]-1];
                                        opt->ts_needtime = 1;
@@ -381,7 +380,6 @@ int ip_options_compile(struct net *net,
                                                pp_ptr = optptr + 2;
                                                goto error;
                                        }
-                                       opt->ts = optptr - iph;
                                        if (rt)  {
                                                spec_dst_fill(&spec_dst, skb);
                                                memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
@@ -396,7 +394,6 @@ int ip_options_compile(struct net *net,
                                                pp_ptr = optptr + 2;
                                                goto error;
                                        }
-                                       opt->ts = optptr - iph;
                                        {
                                                __be32 addr;
                                                memcpy(&addr, &optptr[optptr[2]-1], 4);
@@ -429,12 +426,12 @@ int ip_options_compile(struct net *net,
                                        pp_ptr = optptr + 3;
                                        goto error;
                                }
-                               opt->ts = optptr - iph;
                                if (skb) {
                                        optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
                                        opt->is_changed = 1;
                                }
                        }
+                       opt->ts = optptr - iph;
                        break;
                      case IPOPT_RA:
                        if (optlen < 4) {
index 98cbc6877019428f7d3400632c1e81e50145a814..bf6c5cf31aed27ccba4aa25c4d049f0cc037e3cb 100644 (file)
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void)
                }
        for (i++; i < CONF_NAMESERVERS_MAX; i++)
                if (ic_nameservers[i] != NONE)
-                       pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]);
+                       pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]);
+       pr_cont("\n");
 #endif /* !SILENT */
 
        return 0;
index ce2d43e1f09f6b4983e6635e6c2b48ca56fcf18b..0d755c50994b2ca01438158e033c020810a3e8af 100644 (file)
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
-config IP_NF_QUEUE
-       tristate "IP Userspace queueing via NETLINK (OBSOLETE)"
-       depends on NETFILTER_ADVANCED
-       help
-         Netfilter has the ability to queue packets to user space: the
-         netlink device can be used to access them using this driver.
-
-         This option enables the old IPv4-only "ip_queue" implementation
-         which has been obsoleted by the new "nfnetlink_queue" code (see
-         CONFIG_NETFILTER_NETLINK_QUEUE).
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_IPTABLES
        tristate "IP tables support (required for filtering/masq/NAT)"
        default m if NETFILTER_ADVANCED=n
index 47e854fcae24dd8c108e93a2b5a22801a68d7232..e22020790709dbde58759c771f47a69e883892e4 100644 (file)
@@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
                         * Make sure that we have exactly size bytes
                         * available to the caller, no more, no less.
                         */
-                       skb->avail_size = size;
+                       skb->reserved_tailroom = skb->end - skb->tail - size;
                        return skb;
                }
                __kfree_skb(skb);
index 0d9bdacce99f46a77982d89c86a7764fe9ef15f8..3bd55bad230ac7f822f4ea75eb280bc8ce0c07fc 100644 (file)
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how)
        if (tcp_is_reno(tp))
                tcp_reset_reno_sack(tp);
 
-       if (!how) {
-               /* Push undo marker, if it was plain RTO and nothing
-                * was retransmitted. */
-               tp->undo_marker = tp->snd_una;
-       } else {
+       tp->undo_marker = tp->snd_una;
+       if (how) {
                tp->sacked_out = 0;
                tp->fackets_out = 0;
        }
index 4a8ec457310fbd57e6b62e0a0f8ae6c22f2463fa..d09203c63264d2d8c11f7b40d42200f82934213b 100644 (file)
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk)
        struct inet_sock *inet = inet_sk(sk);
        u32 mtu = tcp_sk(sk)->mtu_info;
 
-       /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
-        * send out by Linux are always <576bytes so they should go through
-        * unfragmented).
-        */
-       if (sk->sk_state == TCP_LISTEN)
-               return;
-
        dst = inet_csk_update_pmtu(sk, mtu);
        if (!dst)
                return;
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                        goto out;
 
                if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
+                       /* We are not interested in TCP_LISTEN and open_requests
+                        * (SYN-ACKs send out by Linux are always <576bytes so
+                        * they should go through unfragmented).
+                        */
+                       if (sk->sk_state == TCP_LISTEN)
+                               goto out;
+
                        tp->mtu_info = info;
                        if (!sock_owned_by_user(sk)) {
                                tcp_v4_mtu_reduced(sk);
index e2b4461074dab715af9c328fd1767f4a5caf8c97..5d0b4387cba6df401166a48f1e4cf3800d6ce6ff 100644 (file)
@@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
        eat = min_t(int, len, skb_headlen(skb));
        if (eat) {
                __skb_pull(skb, eat);
-               skb->avail_size -= eat;
                len -= eat;
                if (!len)
                        return;
@@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
                        goto send_now;
        }
 
-       /* Ok, it looks like it is advisable to defer.  */
-       tp->tso_deferred = 1 | (jiffies << 1);
+       /* Ok, it looks like it is advisable to defer.
+        * Do not rearm the timer if already set to not break TCP ACK clocking.
+        */
+       if (!tp->tso_deferred)
+               tp->tso_deferred = 1 | (jiffies << 1);
 
        return true;
 
index 265c42cf963c30cce55016bbf4ef0a13f57e6ec5..0a073a263720c4eab823db2c09b1b18d7b510bcd 100644 (file)
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb)
 
 void udp_destroy_sock(struct sock *sk)
 {
+       struct udp_sock *up = udp_sk(sk);
        bool slow = lock_sock_fast(sk);
        udp_flush_pending_frames(sk);
        unlock_sock_fast(sk, slow);
+       if (static_key_false(&udp_encap_needed) && up->encap_type) {
+               void (*encap_destroy)(struct sock *sk);
+               encap_destroy = ACCESS_ONCE(up->encap_destroy);
+               if (encap_destroy)
+                       encap_destroy(sk);
+       }
 }
 
 /*
index f2c7e615f902d861d99529536ecebff35fbd7fd5..26512250e095557e50fbee26cc5dcd65d12cc675 100644 (file)
@@ -4784,26 +4784,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)
 
 static int __net_init addrconf_init_net(struct net *net)
 {
-       int err;
+       int err = -ENOMEM;
        struct ipv6_devconf *all, *dflt;
 
-       err = -ENOMEM;
-       all = &ipv6_devconf;
-       dflt = &ipv6_devconf_dflt;
+       all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
+       if (all == NULL)
+               goto err_alloc_all;
 
-       if (!net_eq(net, &init_net)) {
-               all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL);
-               if (all == NULL)
-                       goto err_alloc_all;
+       dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
+       if (dflt == NULL)
+               goto err_alloc_dflt;
 
-               dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
-               if (dflt == NULL)
-                       goto err_alloc_dflt;
-       } else {
-               /* these will be inherited by all namespaces */
-               dflt->autoconf = ipv6_defaults.autoconf;
-               dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
-       }
+       /* these will be inherited by all namespaces */
+       dflt->autoconf = ipv6_defaults.autoconf;
+       dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
 
        net->ipv6.devconf_all = all;
        net->ipv6.devconf_dflt = dflt;
index e33fe0ab2568ec5a750e846457ff52a6a3395c86..2bab2aa597450813ae4bd60d362998830a7e7e3b 100644 (file)
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
            ipv6_addr_loopback(&hdr->daddr))
                goto err;
 
+       /* RFC4291 Errata ID: 3480
+        * Interface-Local scope spans only a single interface on a
+        * node and is useful only for loopback transmission of
+        * multicast.  Packets with interface-local scope received
+        * from another node must be discarded.
+        */
+       if (!(skb->pkt_type == PACKET_LOOPBACK ||
+             dev->flags & IFF_LOOPBACK) &&
+           ipv6_addr_is_multicast(&hdr->daddr) &&
+           IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
+               goto err;
+
        /* RFC4291 2.7
         * Nodes must not originate a packet to a multicast address whose scope
         * field contains the reserved value 0; if such a packet is received, it
index 83acc1405a18dcef218625e8517431978393ac12..33608c610276d87e9845b75555fadc6771765606 100644 (file)
@@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
        {
                .name           = "SNPT",
+               .table          = "mangle",
                .target         = ip6t_snpt_tg,
                .targetsize     = sizeof(struct ip6t_npt_tginfo),
                .checkentry     = ip6t_npt_checkentry,
@@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
        },
        {
                .name           = "DNPT",
+               .table          = "mangle",
                .target         = ip6t_dnpt_tg,
                .targetsize     = sizeof(struct ip6t_npt_tginfo),
                .checkentry     = ip6t_npt_checkentry,
index 54087e96d7b8c5b45b52ce1f26fdd54a134506d2..6700069949ddc81b13f7f0c2bc2622258b6fb2a4 100644 (file)
@@ -14,6 +14,8 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt) "IPv6-nf: " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/string.h>
@@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
 
        q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
        local_bh_enable();
-       if (q == NULL)
-               goto oom;
-
+       if (IS_ERR_OR_NULL(q)) {
+               inet_frag_maybe_warn_overflow(q, pr_fmt());
+               return NULL;
+       }
        return container_of(q, struct frag_queue, q);
-
-oom:
-       return NULL;
 }
 
 
index 3c6a77290c6e87573cb07d081d878d5c03f9760c..196ab9347ad1df2bab5a9c98720a51e81773985c 100644 (file)
@@ -26,6 +26,9 @@
  *     YOSHIFUJI,H. @USAGI     Always remove fragment header to
  *                             calculate ICV correctly.
  */
+
+#define pr_fmt(fmt) "IPv6: " fmt
+
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/string.h>
@@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
        hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
 
        q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
-       if (q == NULL)
+       if (IS_ERR_OR_NULL(q)) {
+               inet_frag_maybe_warn_overflow(q, pr_fmt());
                return NULL;
-
+       }
        return container_of(q, struct frag_queue, q);
 }
 
index 9b6460055df5683d70dea9d3f09d24c325ee89e7..f6d629fd6aee152aaff674a03db7a3dd0f194134 100644 (file)
@@ -389,6 +389,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        }
 
        if (type == ICMPV6_PKT_TOOBIG) {
+               /* We are not interested in TCP_LISTEN and open_requests
+                * (SYN-ACKs send out by Linux are always <576bytes so
+                * they should go through unfragmented).
+                */
+               if (sk->sk_state == TCP_LISTEN)
+                       goto out;
+
                tp->mtu_info = ntohl(info);
                if (!sock_owned_by_user(sk))
                        tcp_v6_mtu_reduced(sk);
index 599e1ba6d1ceaa6766d53eda020a69a0aa234e5b..d8e5e852fc7a08b26879c81a4ecaad0be2cf2f90 100644 (file)
@@ -1285,10 +1285,18 @@ do_confirm:
 
 void udpv6_destroy_sock(struct sock *sk)
 {
+       struct udp_sock *up = udp_sk(sk);
        lock_sock(sk);
        udp_v6_flush_pending_frames(sk);
        release_sock(sk);
 
+       if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
+               void (*encap_destroy)(struct sock *sk);
+               encap_destroy = ACCESS_ONCE(up->encap_destroy);
+               if (encap_destroy)
+                       encap_destroy(sk);
+       }
+
        inet6_destroy_sock(sk);
 }
 
index d07e3a626446b3cab44e68b019f7870619699f75..d28e7f014cc639779a4557203b4b2ba2f7e63927 100644 (file)
@@ -2583,8 +2583,10 @@ bed:
                                    NULL, NULL, NULL);
 
                /* Check if the we got some results */
-               if (!self->cachedaddr)
-                       return -EAGAIN;         /* Didn't find any devices */
+               if (!self->cachedaddr) {
+                       err = -EAGAIN;          /* Didn't find any devices */
+                       goto out;
+               }
                daddr = self->cachedaddr;
                /* Cleanup */
                self->cachedaddr = 0;
index 8555f331ea60d4bca67cbe91390ed077f62a091e..5b1e5af257137e4c6a03a2c575f1adb5a949e25e 100644 (file)
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)
        hdr->sadb_msg_pid = c->portid;
        hdr->sadb_msg_version = PF_KEY_V2;
        hdr->sadb_msg_errno = (uint8_t) 0;
+       hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
        hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
        pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
        return 0;
index d36875f3427e82b7795012bebe8281318f983d22..8aecf5df66569a6ca0ae1b8921e20954d6812336 100644 (file)
@@ -114,7 +114,6 @@ struct l2tp_net {
 
 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
-static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
 
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
@@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
        } else {
                /* Socket is owned by kernelspace */
                sk = tunnel->sock;
+               sock_hold(sk);
        }
 
 out:
@@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk)
                }
                sock_put(sk);
        }
+       sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put);
 
@@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
        struct sk_buff *skbp;
        struct sk_buff *tmp;
        u32 ns = L2TP_SKB_CB(skb)->ns;
-       struct l2tp_stats *sstats;
 
        spin_lock_bh(&session->reorder_q.lock);
-       sstats = &session->stats;
        skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
                if (L2TP_SKB_CB(skbp)->ns > ns) {
                        __skb_queue_before(&session->reorder_q, skbp, skb);
@@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
                                 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
                                 session->name, ns, L2TP_SKB_CB(skbp)->ns,
                                 skb_queue_len(&session->reorder_q));
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_oos_packets++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_oos_packets);
                        goto out;
                }
        }
@@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
 {
        struct l2tp_tunnel *tunnel = session->tunnel;
        int length = L2TP_SKB_CB(skb)->length;
-       struct l2tp_stats *tstats, *sstats;
 
        /* We're about to requeue the skb, so return resources
         * to its current owner (a socket receive buffer).
         */
        skb_orphan(skb);
 
-       tstats = &tunnel->stats;
-       u64_stats_update_begin(&tstats->syncp);
-       sstats = &session->stats;
-       u64_stats_update_begin(&sstats->syncp);
-       tstats->rx_packets++;
-       tstats->rx_bytes += length;
-       sstats->rx_packets++;
-       sstats->rx_bytes += length;
-       u64_stats_update_end(&tstats->syncp);
-       u64_stats_update_end(&sstats->syncp);
+       atomic_long_inc(&tunnel->stats.rx_packets);
+       atomic_long_add(length, &tunnel->stats.rx_bytes);
+       atomic_long_inc(&session->stats.rx_packets);
+       atomic_long_add(length, &session->stats.rx_bytes);
 
        if (L2TP_SKB_CB(skb)->has_seq) {
                /* Bump our Nr */
@@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
 {
        struct sk_buff *skb;
        struct sk_buff *tmp;
-       struct l2tp_stats *sstats;
 
        /* If the pkt at the head of the queue has the nr that we
         * expect to send up next, dequeue it and any other
@@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
         */
 start:
        spin_lock_bh(&session->reorder_q.lock);
-       sstats = &session->stats;
        skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
                if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_seq_discards++;
-                       sstats->rx_errors++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_seq_discards);
+                       atomic_long_inc(&session->stats.rx_errors);
                        l2tp_dbg(session, L2TP_MSG_SEQ,
                                 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
                                 session->name, L2TP_SKB_CB(skb)->ns,
@@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        struct l2tp_tunnel *tunnel = session->tunnel;
        int offset;
        u32 ns, nr;
-       struct l2tp_stats *sstats = &session->stats;
 
        /* The ref count is increased since we now hold a pointer to
         * the session. Take care to decrement the refcnt when exiting
@@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                                  "%s: cookie mismatch (%u/%u). Discarding.\n",
                                  tunnel->name, tunnel->tunnel_id,
                                  session->session_id);
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_cookie_discards++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_cookie_discards);
                        goto discard;
                }
                ptr += session->peer_cookie_len;
@@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                        l2tp_warn(session, L2TP_MSG_SEQ,
                                  "%s: recv data has no seq numbers when required. Discarding.\n",
                                  session->name);
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_seq_discards++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_seq_discards);
                        goto discard;
                }
 
@@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                        l2tp_warn(session, L2TP_MSG_SEQ,
                                  "%s: recv data has no seq numbers when required. Discarding.\n",
                                  session->name);
-                       u64_stats_update_begin(&sstats->syncp);
-                       sstats->rx_seq_discards++;
-                       u64_stats_update_end(&sstats->syncp);
+                       atomic_long_inc(&session->stats.rx_seq_discards);
                        goto discard;
                }
        }
@@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                         * packets
                         */
                        if (L2TP_SKB_CB(skb)->ns != session->nr) {
-                               u64_stats_update_begin(&sstats->syncp);
-                               sstats->rx_seq_discards++;
-                               u64_stats_update_end(&sstats->syncp);
+                               atomic_long_inc(&session->stats.rx_seq_discards);
                                l2tp_dbg(session, L2TP_MSG_SEQ,
                                         "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
                                         session->name, L2TP_SKB_CB(skb)->ns,
@@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        return;
 
 discard:
-       u64_stats_update_begin(&sstats->syncp);
-       sstats->rx_errors++;
-       u64_stats_update_end(&sstats->syncp);
+       atomic_long_inc(&session->stats.rx_errors);
        kfree_skb(skb);
 
        if (session->deref)
@@ -828,6 +803,23 @@ discard:
 }
 EXPORT_SYMBOL(l2tp_recv_common);
 
+/* Drop skbs from the session's reorder_q
+ */
+int l2tp_session_queue_purge(struct l2tp_session *session)
+{
+       struct sk_buff *skb = NULL;
+       BUG_ON(!session);
+       BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+       while ((skb = skb_dequeue(&session->reorder_q))) {
+               atomic_long_inc(&session->stats.rx_errors);
+               kfree_skb(skb);
+               if (session->deref)
+                       (*session->deref)(session);
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
+
 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
  * here. The skb is not on a list when we get here.
  * Returns 0 if the packet was a data packet and was successfully passed on.
@@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
        u32 tunnel_id, session_id;
        u16 version;
        int length;
-       struct l2tp_stats *tstats;
 
        if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
                goto discard_bad_csum;
@@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
 discard_bad_csum:
        LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
        UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
-       tstats = &tunnel->stats;
-       u64_stats_update_begin(&tstats->syncp);
-       tstats->rx_errors++;
-       u64_stats_update_end(&tstats->syncp);
+       atomic_long_inc(&tunnel->stats.rx_errors);
        kfree_skb(skb);
 
        return 0;
@@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        struct l2tp_tunnel *tunnel = session->tunnel;
        unsigned int len = skb->len;
        int error;
-       struct l2tp_stats *tstats, *sstats;
 
        /* Debug */
        if (session->send_seq)
@@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
                error = ip_queue_xmit(skb, fl);
 
        /* Update stats */
-       tstats = &tunnel->stats;
-       u64_stats_update_begin(&tstats->syncp);
-       sstats = &session->stats;
-       u64_stats_update_begin(&sstats->syncp);
        if (error >= 0) {
-               tstats->tx_packets++;
-               tstats->tx_bytes += len;
-               sstats->tx_packets++;
-               sstats->tx_bytes += len;
+               atomic_long_inc(&tunnel->stats.tx_packets);
+               atomic_long_add(len, &tunnel->stats.tx_bytes);
+               atomic_long_inc(&session->stats.tx_packets);
+               atomic_long_add(len, &session->stats.tx_bytes);
        } else {
-               tstats->tx_errors++;
-               sstats->tx_errors++;
+               atomic_long_inc(&tunnel->stats.tx_errors);
+               atomic_long_inc(&session->stats.tx_errors);
        }
-       u64_stats_update_end(&tstats->syncp);
-       u64_stats_update_end(&sstats->syncp);
 
        return 0;
 }
@@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
                /* No longer an encapsulation socket. See net/ipv4/udp.c */
                (udp_sk(sk))->encap_type = 0;
                (udp_sk(sk))->encap_rcv = NULL;
+               (udp_sk(sk))->encap_destroy = NULL;
                break;
        case L2TP_ENCAPTYPE_IP:
                break;
@@ -1311,7 +1293,7 @@ end:
 
 /* When the tunnel is closed, all the attached sessions need to go too.
  */
-static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
 {
        int hash;
        struct hlist_node *walk;
@@ -1334,25 +1316,13 @@ again:
 
                        hlist_del_init(&session->hlist);
 
-                       /* Since we should hold the sock lock while
-                        * doing any unbinding, we need to release the
-                        * lock we're holding before taking that lock.
-                        * Hold a reference to the sock so it doesn't
-                        * disappear as we're jumping between locks.
-                        */
                        if (session->ref != NULL)
                                (*session->ref)(session);
 
                        write_unlock_bh(&tunnel->hlist_lock);
 
-                       if (tunnel->version != L2TP_HDR_VER_2) {
-                               struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
-                               spin_lock_bh(&pn->l2tp_session_hlist_lock);
-                               hlist_del_init_rcu(&session->global_hlist);
-                               spin_unlock_bh(&pn->l2tp_session_hlist_lock);
-                               synchronize_rcu();
-                       }
+                       __l2tp_session_unhash(session);
+                       l2tp_session_queue_purge(session);
 
                        if (session->session_close != NULL)
                                (*session->session_close)(session);
@@ -1360,6 +1330,8 @@ again:
                        if (session->deref != NULL)
                                (*session->deref)(session);
 
+                       l2tp_session_dec_refcount(session);
+
                        write_lock_bh(&tunnel->hlist_lock);
 
                        /* Now restart from the beginning of this hash
@@ -1372,6 +1344,17 @@ again:
        }
        write_unlock_bh(&tunnel->hlist_lock);
 }
+EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
+
+/* Tunnel socket destroy hook for UDP encapsulation */
+static void l2tp_udp_encap_destroy(struct sock *sk)
+{
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+       if (tunnel) {
+               l2tp_tunnel_closeall(tunnel);
+               sock_put(sk);
+       }
+}
 
 /* Really kill the tunnel.
  * Come here only when all sessions have been cleared from the tunnel.
@@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
                return;
 
        sock = sk->sk_socket;
-       BUG_ON(!sock);
 
-       /* If the tunnel socket was created directly by the kernel, use the
-        * sk_* API to release the socket now.  Otherwise go through the
-        * inet_* layer to shut the socket down, and let userspace close it.
+       /* If the tunnel socket was created by userspace, then go through the
+        * inet layer to shut the socket down, and let userspace close it.
+        * Otherwise, if we created the socket directly within the kernel, use
+        * the sk API to release it here.
         * In either case the tunnel resources are freed in the socket
         * destructor when the tunnel socket goes away.
         */
-       if (sock->file == NULL) {
-               kernel_sock_shutdown(sock, SHUT_RDWR);
-               sk_release_kernel(sk);
+       if (tunnel->fd >= 0) {
+               if (sock)
+                       inet_shutdown(sock, 2);
        } else {
-               inet_shutdown(sock, 2);
+               if (sock)
+                       kernel_sock_shutdown(sock, SHUT_RDWR);
+               sk_release_kernel(sk);
        }
 
        l2tp_tunnel_sock_put(sk);
@@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
                /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
                udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
                udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
+               udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
 #if IS_ENABLED(CONFIG_IPV6)
                if (sk->sk_family == PF_INET6)
                        udpv6_encap_enable();
@@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
  */
 int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
 {
+       l2tp_tunnel_closeall(tunnel);
        return (false == queue_work(l2tp_wq, &tunnel->del_work));
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
@@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
  */
 void l2tp_session_free(struct l2tp_session *session)
 {
-       struct l2tp_tunnel *tunnel;
+       struct l2tp_tunnel *tunnel = session->tunnel;
 
        BUG_ON(atomic_read(&session->ref_count) != 0);
 
-       tunnel = session->tunnel;
-       if (tunnel != NULL) {
+       if (tunnel) {
                BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+               if (session->session_id != 0)
+                       atomic_dec(&l2tp_session_count);
+               sock_put(tunnel->sock);
+               session->tunnel = NULL;
+               l2tp_tunnel_dec_refcount(tunnel);
+       }
+
+       kfree(session);
 
-               /* Delete the session from the hash */
+       return;
+}
+EXPORT_SYMBOL_GPL(l2tp_session_free);
+
+/* Remove an l2tp session from l2tp_core's hash lists.
+ * Provides a tidyup interface for pseudowire code which can't just route all
+ * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
+ * callback.
+ */
+void __l2tp_session_unhash(struct l2tp_session *session)
+{
+       struct l2tp_tunnel *tunnel = session->tunnel;
+
+       /* Remove the session from core hashes */
+       if (tunnel) {
+               /* Remove from the per-tunnel hash */
                write_lock_bh(&tunnel->hlist_lock);
                hlist_del_init(&session->hlist);
                write_unlock_bh(&tunnel->hlist_lock);
 
-               /* Unlink from the global hash if not L2TPv2 */
+               /* For L2TPv3 we have a per-net hash: remove from there, too */
                if (tunnel->version != L2TP_HDR_VER_2) {
                        struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
-
                        spin_lock_bh(&pn->l2tp_session_hlist_lock);
                        hlist_del_init_rcu(&session->global_hlist);
                        spin_unlock_bh(&pn->l2tp_session_hlist_lock);
                        synchronize_rcu();
                }
-
-               if (session->session_id != 0)
-                       atomic_dec(&l2tp_session_count);
-
-               sock_put(tunnel->sock);
-
-               /* This will delete the tunnel context if this
-                * is the last session on the tunnel.
-                */
-               session->tunnel = NULL;
-               l2tp_tunnel_dec_refcount(tunnel);
        }
-
-       kfree(session);
-
-       return;
 }
-EXPORT_SYMBOL_GPL(l2tp_session_free);
+EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
 
 /* This function is used by the netlink SESSION_DELETE command and by
    pseudowire modules.
  */
 int l2tp_session_delete(struct l2tp_session *session)
 {
+       if (session->ref)
+               (*session->ref)(session);
+       __l2tp_session_unhash(session);
+       l2tp_session_queue_purge(session);
        if (session->session_close != NULL)
                (*session->session_close)(session);
-
+       if (session->deref)
+               (*session->ref)(session);
        l2tp_session_dec_refcount(session);
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(l2tp_session_delete);
 
-
 /* We come here whenever a session's send_seq, cookie_len or
  * l2specific_len parameters are set.
  */
index 8eb8f1d47f3ac2d6ce32ae287e120a93aa931b40..485a490fd990eeb91a3577e65844c17d1350f720 100644 (file)
@@ -36,16 +36,15 @@ enum {
 struct sk_buff;
 
 struct l2tp_stats {
-       u64                     tx_packets;
-       u64                     tx_bytes;
-       u64                     tx_errors;
-       u64                     rx_packets;
-       u64                     rx_bytes;
-       u64                     rx_seq_discards;
-       u64                     rx_oos_packets;
-       u64                     rx_errors;
-       u64                     rx_cookie_discards;
-       struct u64_stats_sync   syncp;
+       atomic_long_t           tx_packets;
+       atomic_long_t           tx_bytes;
+       atomic_long_t           tx_errors;
+       atomic_long_t           rx_packets;
+       atomic_long_t           rx_bytes;
+       atomic_long_t           rx_seq_discards;
+       atomic_long_t           rx_oos_packets;
+       atomic_long_t           rx_errors;
+       atomic_long_t           rx_cookie_discards;
 };
 
 struct l2tp_tunnel;
@@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
 extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
 
 extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
+extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
 extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
 extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
+extern void __l2tp_session_unhash(struct l2tp_session *session);
 extern int l2tp_session_delete(struct l2tp_session *session);
 extern void l2tp_session_free(struct l2tp_session *session);
 extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
+extern int l2tp_session_queue_purge(struct l2tp_session *session);
 extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
 
 extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
index c3813bc84552cebc56691cae4eae3097b29d2160..072d7202e182ffa2125fe7de9c54c584626fdeb0 100644 (file)
@@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
                   tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
                   atomic_read(&tunnel->ref_count));
 
-       seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+       seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
                   tunnel->debug,
-                  (unsigned long long)tunnel->stats.tx_packets,
-                  (unsigned long long)tunnel->stats.tx_bytes,
-                  (unsigned long long)tunnel->stats.tx_errors,
-                  (unsigned long long)tunnel->stats.rx_packets,
-                  (unsigned long long)tunnel->stats.rx_bytes,
-                  (unsigned long long)tunnel->stats.rx_errors);
+                  atomic_long_read(&tunnel->stats.tx_packets),
+                  atomic_long_read(&tunnel->stats.tx_bytes),
+                  atomic_long_read(&tunnel->stats.tx_errors),
+                  atomic_long_read(&tunnel->stats.rx_packets),
+                  atomic_long_read(&tunnel->stats.rx_bytes),
+                  atomic_long_read(&tunnel->stats.rx_errors));
 
        if (tunnel->show != NULL)
                tunnel->show(m, tunnel);
@@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
                seq_printf(m, "\n");
        }
 
-       seq_printf(m, "   %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n",
+       seq_printf(m, "   %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
                   session->nr, session->ns,
-                  (unsigned long long)session->stats.tx_packets,
-                  (unsigned long long)session->stats.tx_bytes,
-                  (unsigned long long)session->stats.tx_errors,
-                  (unsigned long long)session->stats.rx_packets,
-                  (unsigned long long)session->stats.rx_bytes,
-                  (unsigned long long)session->stats.rx_errors);
+                  atomic_long_read(&session->stats.tx_packets),
+                  atomic_long_read(&session->stats.tx_bytes),
+                  atomic_long_read(&session->stats.tx_errors),
+                  atomic_long_read(&session->stats.rx_packets),
+                  atomic_long_read(&session->stats.rx_bytes),
+                  atomic_long_read(&session->stats.rx_errors));
 
        if (session->show != NULL)
                session->show(m, session);
index 7f41b7051269539efa0780b618be0b60e7788e76..571db8dd2292a7c5b1f2e940073edce370b13699 100644 (file)
@@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
 static void l2tp_ip_destroy_sock(struct sock *sk)
 {
        struct sk_buff *skb;
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
 
        while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
                kfree_skb(skb);
 
+       if (tunnel) {
+               l2tp_tunnel_closeall(tunnel);
+               sock_put(sk);
+       }
+
        sk_refcnt_debug_dec(sk);
 }
 
index 41f2f8126ebc720933638f67eb800f17797987e7..c74f5a91ff6a3213209c23ff488f0b475a552857 100644 (file)
@@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
 
 static void l2tp_ip6_destroy_sock(struct sock *sk)
 {
+       struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
+
        lock_sock(sk);
        ip6_flush_pending_frames(sk);
        release_sock(sk);
 
+       if (tunnel) {
+               l2tp_tunnel_closeall(tunnel);
+               sock_put(sk);
+       }
+
        inet6_destroy_sock(sk);
 }
 
index c1bab22db85e79765581079468a36231e5f3d608..0825ff26e113f3e2acdbbe2b88bd0f4fbfd92410 100644 (file)
@@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
 #if IS_ENABLED(CONFIG_IPV6)
        struct ipv6_pinfo *np = NULL;
 #endif
-       struct l2tp_stats stats;
-       unsigned int start;
 
        hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,
                          L2TP_CMD_TUNNEL_GET);
@@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
        if (nest == NULL)
                goto nla_put_failure;
 
-       do {
-               start = u64_stats_fetch_begin(&tunnel->stats.syncp);
-               stats.tx_packets = tunnel->stats.tx_packets;
-               stats.tx_bytes = tunnel->stats.tx_bytes;
-               stats.tx_errors = tunnel->stats.tx_errors;
-               stats.rx_packets = tunnel->stats.rx_packets;
-               stats.rx_bytes = tunnel->stats.rx_bytes;
-               stats.rx_errors = tunnel->stats.rx_errors;
-               stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
-               stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
-       } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
-
-       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
+                   atomic_long_read(&tunnel->stats.tx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
+                   atomic_long_read(&tunnel->stats.tx_bytes)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
+                   atomic_long_read(&tunnel->stats.tx_errors)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
+                   atomic_long_read(&tunnel->stats.rx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
+                   atomic_long_read(&tunnel->stats.rx_bytes)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-                       stats.rx_seq_discards) ||
+                   atomic_long_read(&tunnel->stats.rx_seq_discards)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-                       stats.rx_oos_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+                   atomic_long_read(&tunnel->stats.rx_oos_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
+                   atomic_long_read(&tunnel->stats.rx_errors)))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
@@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
        struct nlattr *nest;
        struct l2tp_tunnel *tunnel = session->tunnel;
        struct sock *sk = NULL;
-       struct l2tp_stats stats;
-       unsigned int start;
 
        sk = tunnel->sock;
 
@@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
        if (nest == NULL)
                goto nla_put_failure;
 
-       do {
-               start = u64_stats_fetch_begin(&session->stats.syncp);
-               stats.tx_packets = session->stats.tx_packets;
-               stats.tx_bytes = session->stats.tx_bytes;
-               stats.tx_errors = session->stats.tx_errors;
-               stats.rx_packets = session->stats.rx_packets;
-               stats.rx_bytes = session->stats.rx_bytes;
-               stats.rx_errors = session->stats.rx_errors;
-               stats.rx_seq_discards = session->stats.rx_seq_discards;
-               stats.rx_oos_packets = session->stats.rx_oos_packets;
-       } while (u64_stats_fetch_retry(&session->stats.syncp, start));
-
-       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
-           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
+       if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS,
+               atomic_long_read(&session->stats.tx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_BYTES,
+               atomic_long_read(&session->stats.tx_bytes)) ||
+           nla_put_u64(skb, L2TP_ATTR_TX_ERRORS,
+               atomic_long_read(&session->stats.tx_errors)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_PACKETS,
+               atomic_long_read(&session->stats.rx_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_BYTES,
+               atomic_long_read(&session->stats.rx_bytes)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
-                       stats.rx_seq_discards) ||
+               atomic_long_read(&session->stats.rx_seq_discards)) ||
            nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
-                       stats.rx_oos_packets) ||
-           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
+               atomic_long_read(&session->stats.rx_oos_packets)) ||
+           nla_put_u64(skb, L2TP_ATTR_RX_ERRORS,
+               atomic_long_read(&session->stats.rx_errors)))
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
index 6a53371dba1f1b357f9cd4f34b7af166ed324eab..637a341c1e2d1a466c68efcb50d7fc201198a15b 100644 (file)
@@ -97,6 +97,7 @@
 #include <net/ip.h>
 #include <net/udp.h>
 #include <net/xfrm.h>
+#include <net/inet_common.h>
 
 #include <asm/byteorder.h>
 #include <linux/atomic.h>
@@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
                          session->name);
 
                /* Not bound. Nothing we can do, so discard. */
-               session->stats.rx_errors++;
+               atomic_long_inc(&session->stats.rx_errors);
                kfree_skb(skb);
        }
 
@@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session)
 {
        struct pppol2tp_session *ps = l2tp_session_priv(session);
        struct sock *sk = ps->sock;
-       struct sk_buff *skb;
+       struct socket *sock = sk->sk_socket;
 
        BUG_ON(session->magic != L2TP_SESSION_MAGIC);
 
-       if (session->session_id == 0)
-               goto out;
-
-       if (sk != NULL) {
-               lock_sock(sk);
-
-               if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
-                       pppox_unbind_sock(sk);
-                       sk->sk_state = PPPOX_DEAD;
-                       sk->sk_state_change(sk);
-               }
-
-               /* Purge any queued data */
-               skb_queue_purge(&sk->sk_receive_queue);
-               skb_queue_purge(&sk->sk_write_queue);
-               while ((skb = skb_dequeue(&session->reorder_q))) {
-                       kfree_skb(skb);
-                       sock_put(sk);
-               }
 
-               release_sock(sk);
+       if (sock) {
+               inet_shutdown(sock, 2);
+               /* Don't let the session go away before our socket does */
+               l2tp_session_inc_refcount(session);
        }
-
-out:
        return;
 }
 
@@ -483,19 +466,12 @@ out:
  */
 static void pppol2tp_session_destruct(struct sock *sk)
 {
-       struct l2tp_session *session;
-
-       if (sk->sk_user_data != NULL) {
-               session = sk->sk_user_data;
-               if (session == NULL)
-                       goto out;
-
+       struct l2tp_session *session = sk->sk_user_data;
+       if (session) {
                sk->sk_user_data = NULL;
                BUG_ON(session->magic != L2TP_SESSION_MAGIC);
                l2tp_session_dec_refcount(session);
        }
-
-out:
        return;
 }
 
@@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock)
        session = pppol2tp_sock_to_session(sk);
 
        /* Purge any queued data */
-       skb_queue_purge(&sk->sk_receive_queue);
-       skb_queue_purge(&sk->sk_write_queue);
        if (session != NULL) {
-               struct sk_buff *skb;
-               while ((skb = skb_dequeue(&session->reorder_q))) {
-                       kfree_skb(skb);
-                       sock_put(sk);
-               }
+               __l2tp_session_unhash(session);
+               l2tp_session_queue_purge(session);
                sock_put(sk);
        }
+       skb_queue_purge(&sk->sk_receive_queue);
+       skb_queue_purge(&sk->sk_write_queue);
 
        release_sock(sk);
 
@@ -880,18 +853,6 @@ out:
        return error;
 }
 
-/* Called when deleting sessions via the netlink interface.
- */
-static int pppol2tp_session_delete(struct l2tp_session *session)
-{
-       struct pppol2tp_session *ps = l2tp_session_priv(session);
-
-       if (ps->sock == NULL)
-               l2tp_session_dec_refcount(session);
-
-       return 0;
-}
-
 #endif /* CONFIG_L2TP_V3 */
 
 /* getname() support.
@@ -1025,14 +986,14 @@ end:
 static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
                                struct l2tp_stats *stats)
 {
-       dest->tx_packets = stats->tx_packets;
-       dest->tx_bytes = stats->tx_bytes;
-       dest->tx_errors = stats->tx_errors;
-       dest->rx_packets = stats->rx_packets;
-       dest->rx_bytes = stats->rx_bytes;
-       dest->rx_seq_discards = stats->rx_seq_discards;
-       dest->rx_oos_packets = stats->rx_oos_packets;
-       dest->rx_errors = stats->rx_errors;
+       dest->tx_packets = atomic_long_read(&stats->tx_packets);
+       dest->tx_bytes = atomic_long_read(&stats->tx_bytes);
+       dest->tx_errors = atomic_long_read(&stats->tx_errors);
+       dest->rx_packets = atomic_long_read(&stats->rx_packets);
+       dest->rx_bytes = atomic_long_read(&stats->rx_bytes);
+       dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards);
+       dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets);
+       dest->rx_errors = atomic_long_read(&stats->rx_errors);
 }
 
 /* Session ioctl helper.
@@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
                   tunnel->name,
                   (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
                   atomic_read(&tunnel->ref_count) - 1);
-       seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
+       seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
                   tunnel->debug,
-                  (unsigned long long)tunnel->stats.tx_packets,
-                  (unsigned long long)tunnel->stats.tx_bytes,
-                  (unsigned long long)tunnel->stats.tx_errors,
-                  (unsigned long long)tunnel->stats.rx_packets,
-                  (unsigned long long)tunnel->stats.rx_bytes,
-                  (unsigned long long)tunnel->stats.rx_errors);
+                  atomic_long_read(&tunnel->stats.tx_packets),
+                  atomic_long_read(&tunnel->stats.tx_bytes),
+                  atomic_long_read(&tunnel->stats.tx_errors),
+                  atomic_long_read(&tunnel->stats.rx_packets),
+                  atomic_long_read(&tunnel->stats.rx_bytes),
+                  atomic_long_read(&tunnel->stats.rx_errors));
 }
 
 static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
@@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
                   session->lns_mode ? "LNS" : "LAC",
                   session->debug,
                   jiffies_to_msecs(session->reorder_timeout));
-       seq_printf(m, "   %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
+       seq_printf(m, "   %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
                   session->nr, session->ns,
-                  (unsigned long long)session->stats.tx_packets,
-                  (unsigned long long)session->stats.tx_bytes,
-                  (unsigned long long)session->stats.tx_errors,
-                  (unsigned long long)session->stats.rx_packets,
-                  (unsigned long long)session->stats.rx_bytes,
-                  (unsigned long long)session->stats.rx_errors);
+                  atomic_long_read(&session->stats.tx_packets),
+                  atomic_long_read(&session->stats.tx_bytes),
+                  atomic_long_read(&session->stats.tx_errors),
+                  atomic_long_read(&session->stats.rx_packets),
+                  atomic_long_read(&session->stats.rx_bytes),
+                  atomic_long_read(&session->stats.rx_errors));
 
        if (po)
                seq_printf(m, "   interface %s\n", ppp_dev_name(&po->chan));
@@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = {
 
 static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
        .session_create = pppol2tp_session_create,
-       .session_delete = pppol2tp_session_delete,
+       .session_delete = l2tp_session_delete,
 };
 
 #endif /* CONFIG_L2TP_V3 */
index baaa8608e52de8d9d504f14a254bb7e6631b5f2e..3bfe2612c8c2e64f19a0843875b470eb72c7325b 100644 (file)
@@ -349,21 +349,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
 static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
 {
        struct ieee80211_sub_if_data *sdata;
-       int ret = 0;
+       int ret;
 
        if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
                return 0;
 
-       mutex_lock(&local->iflist_mtx);
+       ASSERT_RTNL();
 
        if (local->monitor_sdata)
-               goto out_unlock;
+               return 0;
 
        sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
-       if (!sdata) {
-               ret = -ENOMEM;
-               goto out_unlock;
-       }
+       if (!sdata)
+               return -ENOMEM;
 
        /* set up data */
        sdata->local = local;
@@ -377,13 +375,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        if (WARN_ON(ret)) {
                /* ok .. stupid driver, it asked for this! */
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
        ret = ieee80211_check_queues(sdata);
        if (ret) {
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
        ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
@@ -391,13 +389,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
        if (ret) {
                drv_remove_interface(local, sdata);
                kfree(sdata);
-               goto out_unlock;
+               return ret;
        }
 
+       mutex_lock(&local->iflist_mtx);
        rcu_assign_pointer(local->monitor_sdata, sdata);
- out_unlock:
        mutex_unlock(&local->iflist_mtx);
-       return ret;
+
+       return 0;
 }
 
 static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@@ -407,14 +406,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
        if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
                return;
 
+       ASSERT_RTNL();
+
        mutex_lock(&local->iflist_mtx);
 
        sdata = rcu_dereference_protected(local->monitor_sdata,
                                          lockdep_is_held(&local->iflist_mtx));
-       if (!sdata)
-               goto out_unlock;
+       if (!sdata) {
+               mutex_unlock(&local->iflist_mtx);
+               return;
+       }
 
        rcu_assign_pointer(local->monitor_sdata, NULL);
+       mutex_unlock(&local->iflist_mtx);
+
        synchronize_net();
 
        ieee80211_vif_release_channel(sdata);
@@ -422,8 +427,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
        drv_remove_interface(local, sdata);
 
        kfree(sdata);
- out_unlock:
-       mutex_unlock(&local->iflist_mtx);
 }
 
 /*
index 29ce2aa87e7b60fdd344f7fd8acb1148cbc477fd..4749b3858695e6c2aeae983146d6a055be562a28 100644 (file)
@@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
 
        rcu_read_lock();
        list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               if (ieee80211_vif_is_mesh(&sdata->vif))
+               if (ieee80211_vif_is_mesh(&sdata->vif) &&
+                   ieee80211_sdata_running(sdata))
                        ieee80211_queue_work(&local->hw, &sdata->work);
        rcu_read_unlock();
 }
index 141577412d8407fc8b18ad354a34ad8de105f154..82cc30318a86f66c4a8f23341ea6ed3f6b62d578 100644 (file)
@@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
 
        /* Restart STA timers */
        rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               ieee80211_restart_sta_timer(sdata);
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_restart_sta_timer(sdata);
+       }
        rcu_read_unlock();
 }
 
index bb73ed2d20b90e8ece75bdd41a331fecb9c9d7ac..c6844ad080beee0123969c3aca15b7ca349eddda 100644 (file)
@@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
 
                memset(nskb->cb, 0, sizeof(nskb->cb));
 
-               ieee80211_tx_skb(rx->sdata, nskb);
+               if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
+
+                       info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
+                                     IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
+                                     IEEE80211_TX_CTL_NO_CCK_RATE;
+                       if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
+                               info->hw_queue =
+                                       local->hw.offchannel_tx_hw_queue;
+               }
+
+               __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
+                                           status->band);
        }
        dev_kfree_skb(rx->skb);
        return RX_QUEUED;
index a79ce820cb50cf01e5cff62a47b974332c3fe8d6..238a0cca320e621dad86cbca1830e4867459cb6d 100644 (file)
@@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        struct ieee80211_local *local;
        struct ieee80211_sub_if_data *sdata;
        int ret, i;
+       bool have_key = false;
 
        might_sleep();
 
@@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
        list_del_rcu(&sta->list);
 
        mutex_lock(&local->key_mtx);
-       for (i = 0; i < NUM_DEFAULT_KEYS; i++)
+       for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
                __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
-       if (sta->ptk)
+               have_key = true;
+       }
+       if (sta->ptk) {
                __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
+               have_key = true;
+       }
        mutex_unlock(&local->key_mtx);
 
+       if (!have_key)
+               synchronize_net();
+
        sta->dead = true;
 
        local->num_sta--;
index 47edf5a40a5939d2401dc11baa89142b9bcd64b6..61f49d241712a77797829a294ee94fc10161306e 100644 (file)
@@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
                        skb_reset_network_header(skb);
                        IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",
                                &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu);
-                       rcu_read_lock();
                        ipv4_update_pmtu(skb, dev_net(skb->dev),
                                         mtu, 0, 0, 0, 0);
-                       rcu_read_unlock();
                        /* Client uses PMTUD? */
                        if (!(cih->frag_off & htons(IP_DF)))
                                goto ignore_ipip;
@@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
        /* ipvs enabled in this netns ? */
        net = skb_net(skb);
-       if (!net_ipvs(net)->enable)
+       ipvs = net_ipvs(net);
+       if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
        ip_vs_fill_iph_skb(af, skb, &iph);
@@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
 
        IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-       ipvs = net_ipvs(net);
        /* Check the server status */
        if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
                /* the destination server is not available */
@@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
 {
        int r;
        struct net *net;
+       struct netns_ipvs *ipvs;
 
        if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
                return NF_ACCEPT;
 
        /* ipvs enabled in this netns ? */
        net = skb_net(skb);
-       if (!net_ipvs(net)->enable)
+       ipvs = net_ipvs(net);
+       if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
        return ip_vs_in_icmp(skb, &r, hooknum);
@@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
 {
        int r;
        struct net *net;
+       struct netns_ipvs *ipvs;
        struct ip_vs_iphdr iphdr;
 
        ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr);
@@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
 
        /* ipvs enabled in this netns ? */
        net = skb_net(skb);
-       if (!net_ipvs(net)->enable)
+       ipvs = net_ipvs(net);
+       if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
        return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
index c68198bf9128abfb695571faed1e0390b036a4bb..9e2d1cccd1eb4c647b2ece3b2c3985edc0b5b32d 100644 (file)
@@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "backup_only",
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #ifdef CONFIG_IP_VS_DEBUG
        {
                .procname       = "debug_level",
@@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
        tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
        ipvs->sysctl_pmtu_disc = 1;
        tbl[idx++].data = &ipvs->sysctl_pmtu_disc;
+       tbl[idx++].data = &ipvs->sysctl_backup_only;
 
 
        ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
index ae8ec6f2768888eec1fb8b8c68b4242eff033525..cd1d7298f7ba779de3e6654b85fafaf7cec58f8e 100644 (file)
@@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
        sctp_chunkhdr_t _sctpch, *sch;
        unsigned char chunk_type;
        int event, next_state;
-       int ihl;
+       int ihl, cofs;
 
 #ifdef CONFIG_IP_VS_IPV6
        ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr);
@@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
        ihl = ip_hdrlen(skb);
 #endif
 
-       sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t),
-                               sizeof(_sctpch), &_sctpch);
+       cofs = ihl + sizeof(sctp_sctphdr_t);
+       sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);
        if (sch == NULL)
                return;
 
@@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
         */
        if ((sch->type == SCTP_CID_COOKIE_ECHO) ||
            (sch->type == SCTP_CID_COOKIE_ACK)) {
-               sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) +
-                               sch->length), sizeof(_sctpch), &_sctpch);
-               if (sch) {
-                       if (sch->type == SCTP_CID_ABORT)
+               int clen = ntohs(sch->length);
+
+               if (clen >= sizeof(sctp_chunkhdr_t)) {
+                       sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4),
+                                                sizeof(_sctpch), &_sctpch);
+                       if (sch && sch->type == SCTP_CID_ABORT)
                                chunk_type = sch->type;
                }
        }
index 432f95780003f2e36a4d0fb08aaa10b900e052ff..ba65b2041eb4b7f747f7df605a049e7922b32a55 100644 (file)
@@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&dccp_net_ops);
+       if (ret < 0)
+               goto out_pernet;
+
        ret = nf_ct_l4proto_register(&dccp_proto4);
        if (ret < 0)
                goto out_dccp4;
@@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void)
        if (ret < 0)
                goto out_dccp6;
 
-       ret = register_pernet_subsys(&dccp_net_ops);
-       if (ret < 0)
-               goto out_pernet;
-
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&dccp_proto6);
 out_dccp6:
        nf_ct_l4proto_unregister(&dccp_proto4);
 out_dccp4:
+       unregister_pernet_subsys(&dccp_net_ops);
+out_pernet:
        return ret;
 }
 
index bd7d01d9c7e77d0e8a3a1e50116f854bb4925e14..155ce9f8a0db047d0e100b1d0283b8b36cd6e082 100644 (file)
@@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void)
 {
        int ret;
 
-       ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
-       if (ret < 0)
-               goto out_gre4;
-
        ret = register_pernet_subsys(&proto_gre_net_ops);
        if (ret < 0)
                goto out_pernet;
 
+       ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4);
+       if (ret < 0)
+               goto out_gre4;
+
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);
 out_gre4:
+       unregister_pernet_subsys(&proto_gre_net_ops);
+out_pernet:
        return ret;
 }
 
index 480f616d59361e0fff4abf182dc9f35974606187..ec83536def9ab89aca0678ac7b7e02a5509b4f01 100644 (file)
@@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&sctp_net_ops);
+       if (ret < 0)
+               goto out_pernet;
+
        ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);
        if (ret < 0)
                goto out_sctp4;
@@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void)
        if (ret < 0)
                goto out_sctp6;
 
-       ret = register_pernet_subsys(&sctp_net_ops);
-       if (ret < 0)
-               goto out_pernet;
-
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
 out_sctp6:
        nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
 out_sctp4:
+       unregister_pernet_subsys(&sctp_net_ops);
+out_pernet:
        return ret;
 }
 
index 157489581c313b02456b18d0e3e886746a60671f..ca969f6273f77a58851858357c25285b9f22e10e 100644 (file)
@@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void)
 {
        int ret;
 
+       ret = register_pernet_subsys(&udplite_net_ops);
+       if (ret < 0)
+               goto out_pernet;
+
        ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);
        if (ret < 0)
                goto out_udplite4;
@@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void)
        if (ret < 0)
                goto out_udplite6;
 
-       ret = register_pernet_subsys(&udplite_net_ops);
-       if (ret < 0)
-               goto out_pernet;
-
        return 0;
-out_pernet:
-       nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
 out_udplite6:
        nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
 out_udplite4:
+       unregister_pernet_subsys(&udplite_net_ops);
+out_pernet:
        return ret;
 }
 
index 858fd52c10408393a901a9afb686226638f32f07..1cb48540f86a96df1e1b26371e5d75589861562d 100644 (file)
@@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid)
        inst->queue_num = queue_num;
        inst->peer_portid = portid;
        inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
-       inst->copy_range = 0xfffff;
+       inst->copy_range = 0xffff;
        inst->copy_mode = NFQNL_COPY_NONE;
        spin_lock_init(&inst->lock);
        INIT_LIST_HEAD(&inst->queue_list);
index f2aabb6f410582439604d7a3c0f379a5cd798621..5a55be3f17a54aa2acac3ce109c412c86c652b36 100644 (file)
@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,
        int err = 0;
 
        BUG_ON(grp->name[0] == '\0');
+       BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);
 
        genl_lock();
 
index 7f8266dd14cbc164a278aa3bbc0438c845fa0bf5..b530afadd76c6168a5374d27935b7d95180f8884 100644 (file)
@@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)
        }
 }
 
-static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
+static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen,
+                                   int err)
 {
        struct sock *sk;
        struct hlist_node *tmp;
@@ -100,7 +101,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
 
                                nfc_llcp_accept_unlink(accept_sk);
 
+                               if (err)
+                                       accept_sk->sk_err = err;
                                accept_sk->sk_state = LLCP_CLOSED;
+                               accept_sk->sk_state_change(sk);
 
                                bh_unlock_sock(accept_sk);
 
@@ -123,7 +127,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
                        continue;
                }
 
+               if (err)
+                       sk->sk_err = err;
                sk->sk_state = LLCP_CLOSED;
+               sk->sk_state_change(sk);
 
                bh_unlock_sock(sk);
 
@@ -133,6 +140,36 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
        }
 
        write_unlock(&local->sockets.lock);
+
+       /*
+        * If we want to keep the listening sockets alive,
+        * we don't touch the RAW ones.
+        */
+       if (listen == true)
+               return;
+
+       write_lock(&local->raw_sockets.lock);
+
+       sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
+               llcp_sock = nfc_llcp_sock(sk);
+
+               bh_lock_sock(sk);
+
+               nfc_llcp_socket_purge(llcp_sock);
+
+               if (err)
+                       sk->sk_err = err;
+               sk->sk_state = LLCP_CLOSED;
+               sk->sk_state_change(sk);
+
+               bh_unlock_sock(sk);
+
+               sock_orphan(sk);
+
+               sk_del_node_init(sk);
+       }
+
+       write_unlock(&local->raw_sockets.lock);
 }
 
 struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
@@ -142,20 +179,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
        return local;
 }
 
-static void local_release(struct kref *ref)
+static void local_cleanup(struct nfc_llcp_local *local, bool listen)
 {
-       struct nfc_llcp_local *local;
-
-       local = container_of(ref, struct nfc_llcp_local, ref);
-
-       list_del(&local->list);
-       nfc_llcp_socket_release(local, false);
+       nfc_llcp_socket_release(local, listen, ENXIO);
        del_timer_sync(&local->link_timer);
        skb_queue_purge(&local->tx_queue);
        cancel_work_sync(&local->tx_work);
        cancel_work_sync(&local->rx_work);
        cancel_work_sync(&local->timeout_work);
        kfree_skb(local->rx_pending);
+}
+
+static void local_release(struct kref *ref)
+{
+       struct nfc_llcp_local *local;
+
+       local = container_of(ref, struct nfc_llcp_local, ref);
+
+       list_del(&local->list);
+       local_cleanup(local, false);
        kfree(local);
 }
 
@@ -1348,7 +1390,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
                return;
 
        /* Close and purge all existing sockets */
-       nfc_llcp_socket_release(local, true);
+       nfc_llcp_socket_release(local, true, 0);
 }
 
 void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -1427,6 +1469,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
                return;
        }
 
+       local_cleanup(local, false);
+
        nfc_llcp_local_put(local);
 }
 
index 5332751943a9ec564befadda63a5a3749d7434c1..5c7cdf3f2a83b5a6a85033744121727ecddba70d 100644 (file)
@@ -278,6 +278,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,
 
                        pr_debug("Returning sk state %d\n", sk->sk_state);
 
+                       sk_acceptq_removed(parent);
+
                        return sk;
                }
 
index ac2defeeba83ddbbb1ffcacb54deec0714fb85e0..d4d5363c7ba7868731cb9188ad9bc5ccd28b8304 100644 (file)
@@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
 
        if (skb->ip_summed == CHECKSUM_COMPLETE)
                skb->csum = csum_sub(skb->csum, csum_partial(skb->data
-                                       + ETH_HLEN, VLAN_HLEN, 0));
+                                       + (2 * ETH_ALEN), VLAN_HLEN, 0));
 
        vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
        *current_tci = vhdr->h_vlan_TCI;
@@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla
 
                if (skb->ip_summed == CHECKSUM_COMPLETE)
                        skb->csum = csum_add(skb->csum, csum_partial(skb->data
-                                       + ETH_HLEN, VLAN_HLEN, 0));
+                                       + (2 * ETH_ALEN), VLAN_HLEN, 0));
 
        }
        __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
index e87a26506dbaecc3e09af9cc5c75a767bb303e95..a4b724708a1ab0b7d4aedffc07606b8fbfdac0a9 100644 (file)
@@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
 
        skb_copy_and_csum_dev(skb, nla_data(nla));
 
+       genlmsg_end(user_skb, upcall);
        err = genlmsg_unicast(net, user_skb, upcall_info->portid);
 
 out:
@@ -1690,6 +1691,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
        if (IS_ERR(vport))
                goto exit_unlock;
 
+       err = 0;
        reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
                                         OVS_VPORT_CMD_NEW);
        if (IS_ERR(reply)) {
@@ -1771,6 +1773,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
        if (IS_ERR(reply))
                goto exit_unlock;
 
+       err = 0;
        ovs_dp_detach_port(vport);
 
        genl_notify(reply, genl_info_net(info), info->snd_portid,
index 20605ecf100bda104dd5fa7c536787149f0b2ea8..fe0e4215c73d6045469ba006722ef223d43ebd66 100644 (file)
@@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb)
                return htons(ETH_P_802_2);
 
        __skb_pull(skb, sizeof(struct llc_snap_hdr));
-       return llc->ethertype;
+
+       if (ntohs(llc->ethertype) >= 1536)
+               return llc->ethertype;
+
+       return htons(ETH_P_802_2);
 }
 
 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
index 670cbc3518ded5930f900eaca7e0677835cf750b..2130d61c384a227e0d7f5093e285054497e8fb40 100644 (file)
@@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
 
        /* Make our own copy of the packet.  Otherwise we will mangle the
         * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
-        * (No one comes after us, since we tell handle_bridge() that we took
-        * the packet.) */
+        */
        skb = skb_share_check(skb, GFP_ATOMIC);
        if (unlikely(!skb))
                return;
index ba717cc038b3183fc24692e4b76cae8da6cbfbe4..f6b8132ce4cb7380a6bca860b4e50a2aa6756772 100644 (file)
@@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
  * @skb: skb that was received
  *
  * Must be called with rcu_read_lock.  The packet cannot be shared and
- * skb->data should point to the Ethernet header.  The caller must have already
- * called compute_ip_summed() to initialize the checksumming fields.
+ * skb->data should point to the Ethernet header.
  */
 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
 {
index 4e606fcb2534929a2470e807816ac1089d81b7b2..55786283a3dfe613ee6deac5756b4726a66925ee 100644 (file)
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                flow->deficit = q->quantum;
                flow->dropped = 0;
        }
-       if (++sch->q.qlen < sch->limit)
+       if (++sch->q.qlen <= sch->limit)
                return NET_XMIT_SUCCESS;
 
        q->drop_overlimit++;
index ffad48109a228c66ccbd33124525dee83f9158df..eac7e0ee23c18708354e3cef8c237d289b81235f 100644 (file)
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
        u64 mult;
        int shift;
 
-       r->rate_bps = rate << 3;
+       r->rate_bps = (u64)rate << 3;
        r->shift = 0;
        r->mult = 1;
        /*
index 43cd0dd9149d42e2739985bd8bb4ea15c977de06..d2709e2b7be6642d47a691d27c983a937c7e8648 100644 (file)
@@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
                        transports) {
 
                if (transport == active)
-                       break;
+                       continue;
                list_for_each_entry(chunk, &transport->transmitted,
                                transmitted_list) {
                        if (key == chunk->subh.data_hdr->tsn) {
index 5131fcfedb03c0b09e8c7e451341cebd82e49692..de1a0138317f482c028ce583c335501b14d9f917 100644 (file)
@@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
        }
 
        /* Delete the tempory new association. */
-       sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
+       sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
 
        /* Restore association pointer to provide SCTP command interpeter
index fb20f25ddec9b70c805ad65d675441d02b62055b..f8529fc8e54275c5b7b9809f0219f20608ffb472 100644 (file)
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
                list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
        task->tk_waitqueue = queue;
        queue->qlen++;
+       /* barrier matches the read in rpc_wake_up_task_queue_locked() */
+       smp_wmb();
        rpc_set_queued(task);
 
        dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
  */
 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
 {
-       if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
-               __rpc_do_wake_up_task(queue, task);
+       if (RPC_IS_QUEUED(task)) {
+               smp_rmb();
+               if (task->tk_waitqueue == queue)
+                       __rpc_do_wake_up_task(queue, task);
+       }
 }
 
 /*
index 51be64f163ec166812660db8de26105b1c92eb46..971282b6f6a38fd056dd65d127b85f044b7c6041 100644 (file)
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)
 #endif
 }
 
-static int unix_release_sock(struct sock *sk, int embrion)
+static void unix_release_sock(struct sock *sk, int embrion)
 {
        struct unix_sock *u = unix_sk(sk);
        struct path path;
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)
 
        if (unix_tot_inflight)
                unix_gc();              /* Garbage collect fds */
-
-       return 0;
 }
 
 static void init_peercred(struct sock *sk)
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)
        if (!sk)
                return 0;
 
+       unix_release_sock(sk, 0);
        sock->sk = NULL;
 
-       return unix_release_sock(sk, 0);
+       return 0;
 }
 
 static int unix_autobind(struct socket *sock)
@@ -1413,8 +1412,8 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
        if (UNIXCB(skb).cred)
                return;
        if (test_bit(SOCK_PASSCRED, &sock->flags) ||
-           !other->sk_socket ||
-           test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
+           (other->sk_socket &&
+           test_bit(SOCK_PASSCRED, &other->sk_socket->flags))) {
                UNIXCB(skb).pid  = get_pid(task_tgid(current));
                UNIXCB(skb).cred = get_current_cred();
        }
index ea4155fe97334f91794dbb7a55f0dab28219c2a3..6ddf74f0ae1e5ace4346f3c0cf6b4f1ce13fa580 100644 (file)
@@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
        rdev_rfkill_poll(rdev);
 }
 
+void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+                             struct wireless_dev *wdev)
+{
+       lockdep_assert_held(&rdev->devlist_mtx);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
+
+       if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE))
+               return;
+
+       if (!wdev->p2p_started)
+               return;
+
+       rdev_stop_p2p_device(rdev, wdev);
+       wdev->p2p_started = false;
+
+       rdev->opencount--;
+
+       if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+               bool busy = work_busy(&rdev->scan_done_wk);
+
+               /*
+                * If the work isn't pending or running (in which case it would
+                * be waiting for the lock we hold) the driver didn't properly
+                * cancel the scan when the interface was removed. In this case
+                * warn and leak the scan request object to not crash later.
+                */
+               WARN_ON(!busy);
+
+               rdev->scan_req->aborted = true;
+               ___cfg80211_scan_done(rdev, !busy);
+       }
+}
+
 static int cfg80211_rfkill_set_block(void *data, bool blocked)
 {
        struct cfg80211_registered_device *rdev = data;
@@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                return 0;
 
        rtnl_lock();
-       mutex_lock(&rdev->devlist_mtx);
+
+       /* read-only iteration need not hold the devlist_mtx */
 
        list_for_each_entry(wdev, &rdev->wdev_list, list) {
                if (wdev->netdev) {
@@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
                /* otherwise, check iftype */
                switch (wdev->iftype) {
                case NL80211_IFTYPE_P2P_DEVICE:
-                       if (!wdev->p2p_started)
-                               break;
-                       rdev_stop_p2p_device(rdev, wdev);
-                       wdev->p2p_started = false;
-                       rdev->opencount--;
+                       /* but this requires it */
+                       mutex_lock(&rdev->devlist_mtx);
+                       mutex_lock(&rdev->sched_scan_mtx);
+                       cfg80211_stop_p2p_device(rdev, wdev);
+                       mutex_unlock(&rdev->sched_scan_mtx);
+                       mutex_unlock(&rdev->devlist_mtx);
                        break;
                default:
                        break;
                }
        }
 
-       mutex_unlock(&rdev->devlist_mtx);
        rtnl_unlock();
 
        return 0;
@@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work)
        wdev = container_of(work, struct wireless_dev, cleanup_work);
        rdev = wiphy_to_dev(wdev->wiphy);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
 
        if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
                rdev->scan_req->aborted = true;
                ___cfg80211_scan_done(rdev, true);
        }
 
-       cfg80211_unlock_rdev(rdev);
-
-       mutex_lock(&rdev->sched_scan_mtx);
-
        if (WARN_ON(rdev->sched_scan_req &&
                    rdev->sched_scan_req->dev == wdev->netdev)) {
                __cfg80211_stop_sched_scan(rdev, false);
@@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
                return;
 
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        list_del_rcu(&wdev->list);
        rdev->devlist_generation++;
 
        switch (wdev->iftype) {
        case NL80211_IFTYPE_P2P_DEVICE:
-               if (!wdev->p2p_started)
-                       break;
-               rdev_stop_p2p_device(rdev, wdev);
-               wdev->p2p_started = false;
-               rdev->opencount--;
+               cfg80211_stop_p2p_device(rdev, wdev);
                break;
        default:
                WARN_ON_ONCE(1);
                break;
        }
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
 }
 EXPORT_SYMBOL(cfg80211_unregister_wdev);
@@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                cfg80211_update_iface_num(rdev, wdev->iftype, 1);
                cfg80211_lock_rdev(rdev);
                mutex_lock(&rdev->devlist_mtx);
+               mutex_lock(&rdev->sched_scan_mtx);
                wdev_lock(wdev);
                switch (wdev->iftype) {
 #ifdef CONFIG_CFG80211_WEXT
@@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                        break;
                }
                wdev_unlock(wdev);
+               mutex_unlock(&rdev->sched_scan_mtx);
                rdev->opencount++;
                mutex_unlock(&rdev->devlist_mtx);
                cfg80211_unlock_rdev(rdev);
index 3aec0e429d8adbf9d44c7fbc5bb7fa503e5b9818..5845c2b37aa832b444906ad4fb0e4e74f06c3ee3 100644 (file)
@@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
 void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
                               enum nl80211_iftype iftype, int num);
 
+void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
+                             struct wireless_dev *wdev);
+
 #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
 
 #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
index d44ab216c0ecd8b01d4386edf62cd07c3f2071db..58e13a8c95f90a7b880a9d65530799fc02451f1e 100644 (file)
@@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->scan)
                return -EOPNOTSUPP;
 
-       if (rdev->scan_req)
-               return -EBUSY;
+       mutex_lock(&rdev->sched_scan_mtx);
+       if (rdev->scan_req) {
+               err = -EBUSY;
+               goto unlock;
+       }
 
        if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
                n_channels = validate_scan_freqs(
                                info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
-               if (!n_channels)
-                       return -EINVAL;
+               if (!n_channels) {
+                       err = -EINVAL;
+                       goto unlock;
+               }
        } else {
                enum ieee80211_band band;
                n_channels = 0;
@@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)
                        n_ssids++;
 
-       if (n_ssids > wiphy->max_scan_ssids)
-               return -EINVAL;
+       if (n_ssids > wiphy->max_scan_ssids) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
        if (info->attrs[NL80211_ATTR_IE])
                ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
        else
                ie_len = 0;
 
-       if (ie_len > wiphy->max_scan_ie_len)
-               return -EINVAL;
+       if (ie_len > wiphy->max_scan_ie_len) {
+               err = -EINVAL;
+               goto unlock;
+       }
 
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->channels) * n_channels
                        + ie_len, GFP_KERNEL);
-       if (!request)
-               return -ENOMEM;
+       if (!request) {
+               err = -ENOMEM;
+               goto unlock;
+       }
 
        if (n_ssids)
                request->ssids = (void *)&request->channels[n_channels];
@@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
                kfree(request);
        }
 
+ unlock:
+       mutex_unlock(&rdev->sched_scan_mtx);
        return err;
 }
 
@@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->stop_p2p_device)
                return -EOPNOTSUPP;
 
-       if (!wdev->p2p_started)
-               return 0;
-
-       rdev_stop_p2p_device(rdev, wdev);
-       wdev->p2p_started = false;
-
-       mutex_lock(&rdev->devlist_mtx);
-       rdev->opencount--;
-       mutex_unlock(&rdev->devlist_mtx);
-
-       if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
-               rdev->scan_req->aborted = true;
-               ___cfg80211_scan_done(rdev, true);
-       }
+       mutex_lock(&rdev->sched_scan_mtx);
+       cfg80211_stop_p2p_device(rdev, wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 
        return 0;
 }
@@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
        struct nlattr *nest;
        int i;
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (WARN_ON(!req))
                return 0;
index 674aadca007982257b14b3c68abbafc39ee01461..fd99ea495b7e68cd9e5265542a6aa059d0ac7760 100644 (file)
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
        union iwreq_data wrqu;
 #endif
 
-       ASSERT_RDEV_LOCK(rdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        request = rdev->scan_req;
 
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)
        rdev = container_of(wk, struct cfg80211_registered_device,
                            scan_done_wk);
 
-       cfg80211_lock_rdev(rdev);
+       mutex_lock(&rdev->sched_scan_mtx);
        ___cfg80211_scan_done(rdev, false);
-       cfg80211_unlock_rdev(rdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
 }
 
 void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
@@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
        found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
 
        if (found) {
-               found->pub.beacon_interval = tmp->pub.beacon_interval;
-               found->pub.signal = tmp->pub.signal;
-               found->pub.capability = tmp->pub.capability;
-               found->ts = tmp->ts;
-
                /* Update IEs */
                if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
                        const struct cfg80211_bss_ies *old;
@@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
 
                        if (found->pub.hidden_beacon_bss &&
                            !list_empty(&found->hidden_list)) {
+                               const struct cfg80211_bss_ies *f;
+
                                /*
                                 * The found BSS struct is one of the probe
                                 * response members of a group, but we're
@@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                                 * SSID to showing it, which is confusing so
                                 * drop this information.
                                 */
+
+                               f = rcu_access_pointer(tmp->pub.beacon_ies);
+                               kfree_rcu((struct cfg80211_bss_ies *)f,
+                                         rcu_head);
                                goto drop;
                        }
 
@@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
                                kfree_rcu((struct cfg80211_bss_ies *)old,
                                          rcu_head);
                }
+
+               found->pub.beacon_interval = tmp->pub.beacon_interval;
+               found->pub.signal = tmp->pub.signal;
+               found->pub.capability = tmp->pub.capability;
+               found->ts = tmp->ts;
        } else {
                struct cfg80211_internal_bss *new;
                struct cfg80211_internal_bss *hidden;
@@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        if (IS_ERR(rdev))
                return PTR_ERR(rdev);
 
+       mutex_lock(&rdev->sched_scan_mtx);
        if (rdev->scan_req) {
                err = -EBUSY;
                goto out;
@@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
                dev_hold(dev);
        }
  out:
+       mutex_unlock(&rdev->sched_scan_mtx);
        kfree(creq);
        cfg80211_unlock_rdev(rdev);
        return err;
index f432bd3755b19f0d865b1b2e9f2a132890f3b6a3..09d994d192ffa1c78b963933daad73e4240dfe4b 100644 (file)
@@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
        ASSERT_RTNL();
        ASSERT_RDEV_LOCK(rdev);
        ASSERT_WDEV_LOCK(wdev);
+       lockdep_assert_held(&rdev->sched_scan_mtx);
 
        if (rdev->scan_req)
                return -EBUSY;
@@ -320,11 +321,9 @@ void cfg80211_sme_scan_done(struct net_device *dev)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
 
-       mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
        wdev_lock(wdev);
        __cfg80211_sme_scan_done(dev);
        wdev_unlock(wdev);
-       mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
 }
 
 void cfg80211_sme_rx_auth(struct net_device *dev,
@@ -924,9 +923,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
        int err;
 
        mutex_lock(&rdev->devlist_mtx);
+       /* might request scan - scan_mtx -> wdev_mtx dependency */
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(dev->ieee80211_ptr);
        err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
        wdev_unlock(dev->ieee80211_ptr);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
 
        return err;
index b7a531380e19d5bf879f2395eee1176a1abfac93..7586de77a2f8b57ebb9b832b76563ab66a59111a 100644 (file)
@@ -27,7 +27,8 @@
 #define WIPHY_PR_ARG   __entry->wiphy_name
 
 #define WDEV_ENTRY     __field(u32, id)
-#define WDEV_ASSIGN    (__entry->id) = (wdev ? wdev->identifier : 0)
+#define WDEV_ASSIGN    (__entry->id) = (!IS_ERR_OR_NULL(wdev)  \
+                                        ? wdev->identifier : 0)
 #define WDEV_PR_FMT    "wdev(%u)"
 #define WDEV_PR_ARG    (__entry->id)
 
@@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl,
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
-               WIPHY_ASSIGN;
+               NETDEV_ASSIGN;
                __entry->acl_policy = params->acl_policy;
        ),
        TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",
index fb9622f6d99c4b6b6c87f8a8d8e2c88e4fb3705c..e79cb5c0655ad34ec0e8df85471e6c77ad856716 100644 (file)
@@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
@@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        err = 0;
@@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
@@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
 
        cfg80211_lock_rdev(rdev);
        mutex_lock(&rdev->devlist_mtx);
+       mutex_lock(&rdev->sched_scan_mtx);
        wdev_lock(wdev);
 
        if (wdev->sme_state != CFG80211_SME_IDLE) {
@@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
+       mutex_unlock(&rdev->sched_scan_mtx);
        mutex_unlock(&rdev->devlist_mtx);
        cfg80211_unlock_rdev(rdev);
        return err;
index 35754cc8a9e5b9c51cdaf52693128ee2098718f7..8dafe6d3c6e41ebd20e5d0347d4ab9b0e6326a34 100644 (file)
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
                x->xflags &= ~XFRM_TIME_DEFER;
 }
 
+static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
+{
+       u32 seq_diff, oseq_diff;
+       struct km_event c;
+       struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
+       struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
+
+       /* we send notify messages in case
+        *  1. we updated on of the sequence numbers, and the seqno difference
+        *     is at least x->replay_maxdiff, in this case we also update the
+        *     timeout of our timer function
+        *  2. if x->replay_maxage has elapsed since last update,
+        *     and there were changes
+        *
+        *  The state structure must be locked!
+        */
+
+       switch (event) {
+       case XFRM_REPLAY_UPDATE:
+               if (!x->replay_maxdiff)
+                       break;
+
+               if (replay_esn->seq_hi == preplay_esn->seq_hi)
+                       seq_diff = replay_esn->seq - preplay_esn->seq;
+               else
+                       seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
+
+               if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
+                       oseq_diff = replay_esn->oseq - preplay_esn->oseq;
+               else
+                       oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
+
+               if (seq_diff < x->replay_maxdiff &&
+                   oseq_diff < x->replay_maxdiff) {
+
+                       if (x->xflags & XFRM_TIME_DEFER)
+                               event = XFRM_REPLAY_TIMEOUT;
+                       else
+                               return;
+               }
+
+               break;
+
+       case XFRM_REPLAY_TIMEOUT:
+               if (memcmp(x->replay_esn, x->preplay_esn,
+                          xfrm_replay_state_esn_len(replay_esn)) == 0) {
+                       x->xflags |= XFRM_TIME_DEFER;
+                       return;
+               }
+
+               break;
+       }
+
+       memcpy(x->preplay_esn, x->replay_esn,
+              xfrm_replay_state_esn_len(replay_esn));
+       c.event = XFRM_MSG_NEWAE;
+       c.data.aevent = event;
+       km_state_notify(x, &c);
+
+       if (x->replay_maxage &&
+           !mod_timer(&x->rtimer, jiffies + x->replay_maxage))
+               x->xflags &= ~XFRM_TIME_DEFER;
+}
+
 static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err = 0;
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {
        .advance        = xfrm_replay_advance_esn,
        .check          = xfrm_replay_check_esn,
        .recheck        = xfrm_replay_recheck_esn,
-       .notify         = xfrm_replay_notify_bmp,
+       .notify         = xfrm_replay_notify_esn,
        .overflow       = xfrm_replay_overflow_esn,
 };
 
index 48665ecd119715359cc4ffa624afee5c4332dd57..8ab2951545170e6e81fb71fc7ee1e8cad5c2dd72 100644 (file)
@@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
 
        if (old_ctx) {
                new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
-                                 GFP_KERNEL);
+                                 GFP_ATOMIC);
                if (!new_ctx)
                        return -ENOMEM;
 
index 23414b93771f30ec82ccf76b6cfb49fbed27edef..13c88fbcf0371cc32340791e335eeb0b4758f875 100644 (file)
@@ -347,10 +347,8 @@ int yama_ptrace_traceme(struct task_struct *parent)
        /* Only disallow PTRACE_TRACEME on more aggressive settings. */
        switch (ptrace_scope) {
        case YAMA_SCOPE_CAPABILITY:
-               rcu_read_lock();
-               if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE))
+               if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
                        rc = -EPERM;
-               rcu_read_unlock();
                break;
        case YAMA_SCOPE_NO_ATTACH:
                rc = -EPERM;
index a9ebcf9e3710ece320bd656bd45e8ff615b0eb75..ecdf30eb5879ab4547e1e72b70bca366cb39ca78 100644 (file)
@@ -3144,7 +3144,7 @@ static unsigned int convert_to_spdif_status(unsigned short val)
        if (val & AC_DIG1_PROFESSIONAL)
                sbits |= IEC958_AES0_PROFESSIONAL;
        if (sbits & IEC958_AES0_PROFESSIONAL) {
-               if (sbits & AC_DIG1_EMPHASIS)
+               if (val & AC_DIG1_EMPHASIS)
                        sbits |= IEC958_AES0_PRO_EMPHASIS_5015;
        } else {
                if (val & AC_DIG1_EMPHASIS)
index 78897d05d80f281d9bc8427a09a4da426f90dbe4..43c2ea5395618f9ca1c166c9125b006839714f6d 100644 (file)
@@ -995,6 +995,8 @@ enum {
        BAD_NO_EXTRA_SURR_DAC = 0x101,
        /* Primary DAC shared with main surrounds */
        BAD_SHARED_SURROUND = 0x100,
+       /* No independent HP possible */
+       BAD_NO_INDEP_HP = 0x40,
        /* Primary DAC shared with main CLFE */
        BAD_SHARED_CLFE = 0x10,
        /* Primary DAC shared with extra surrounds */
@@ -1392,6 +1394,43 @@ static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
        return snd_hda_get_path_idx(codec, path);
 }
 
+/* check whether the independent HP is available with the current config */
+static bool indep_hp_possible(struct hda_codec *codec)
+{
+       struct hda_gen_spec *spec = codec->spec;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
+       struct nid_path *path;
+       int i, idx;
+
+       if (cfg->line_out_type == AUTO_PIN_HP_OUT)
+               idx = spec->out_paths[0];
+       else
+               idx = spec->hp_paths[0];
+       path = snd_hda_get_path_from_idx(codec, idx);
+       if (!path)
+               return false;
+
+       /* assume no path conflicts unless aamix is involved */
+       if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid))
+               return true;
+
+       /* check whether output paths contain aamix */
+       for (i = 0; i < cfg->line_outs; i++) {
+               if (spec->out_paths[i] == idx)
+                       break;
+               path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]);
+               if (path && is_nid_contained(path, spec->mixer_nid))
+                       return false;
+       }
+       for (i = 0; i < cfg->speaker_outs; i++) {
+               path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]);
+               if (path && is_nid_contained(path, spec->mixer_nid))
+                       return false;
+       }
+
+       return true;
+}
+
 /* fill the empty entries in the dac array for speaker/hp with the
  * shared dac pointed by the paths
  */
@@ -1545,6 +1584,9 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
                badness += BAD_MULTI_IO;
        }
 
+       if (spec->indep_hp && !indep_hp_possible(codec))
+               badness += BAD_NO_INDEP_HP;
+
        /* re-fill the shared DAC for speaker / headphone */
        if (cfg->line_out_type != AUTO_PIN_HP_OUT)
                refill_shared_dacs(codec, cfg->hp_outs,
@@ -1758,6 +1800,10 @@ static int parse_output_paths(struct hda_codec *codec)
                                cfg->speaker_pins, val);
        }
 
+       /* clear indep_hp flag if not available */
+       if (spec->indep_hp && !indep_hp_possible(codec))
+               spec->indep_hp = 0;
+
        kfree(best_cfg);
        return 0;
 }
index 4cea6bb6fade09c1dfcfadd0ecd2cdab798a134a..418bfc0eb0a3127234899aa6f84586feaa19eeef 100644 (file)
@@ -415,6 +415,8 @@ struct azx_dev {
        unsigned int opened :1;
        unsigned int running :1;
        unsigned int irq_pending :1;
+       unsigned int prepared:1;
+       unsigned int locked:1;
        /*
         * For VIA:
         *  A flag to ensure DMA position is 0
@@ -426,8 +428,25 @@ struct azx_dev {
 
        struct timecounter  azx_tc;
        struct cyclecounter azx_cc;
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+       struct mutex dsp_mutex;
+#endif
 };
 
+/* DSP lock helpers */
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+#define dsp_lock_init(dev)     mutex_init(&(dev)->dsp_mutex)
+#define dsp_lock(dev)          mutex_lock(&(dev)->dsp_mutex)
+#define dsp_unlock(dev)                mutex_unlock(&(dev)->dsp_mutex)
+#define dsp_is_locked(dev)     ((dev)->locked)
+#else
+#define dsp_lock_init(dev)     do {} while (0)
+#define dsp_lock(dev)          do {} while (0)
+#define dsp_unlock(dev)                do {} while (0)
+#define dsp_is_locked(dev)     0
+#endif
+
 /* CORB/RIRB */
 struct azx_rb {
        u32 *buf;               /* CORB/RIRB buffer
@@ -527,6 +546,10 @@ struct azx {
 
        /* card list (for power_save trigger) */
        struct list_head list;
+
+#ifdef CONFIG_SND_HDA_DSP_LOADER
+       struct azx_dev saved_azx_dev;
+#endif
 };
 
 #define CREATE_TRACE_POINTS
@@ -1793,15 +1816,25 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
                dev = chip->capture_index_offset;
                nums = chip->capture_streams;
        }
-       for (i = 0; i < nums; i++, dev++)
-               if (!chip->azx_dev[dev].opened) {
-                       res = &chip->azx_dev[dev];
-                       if (res->assigned_key == key)
-                               break;
+       for (i = 0; i < nums; i++, dev++) {
+               struct azx_dev *azx_dev = &chip->azx_dev[dev];
+               dsp_lock(azx_dev);
+               if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
+                       res = azx_dev;
+                       if (res->assigned_key == key) {
+                               res->opened = 1;
+                               res->assigned_key = key;
+                               dsp_unlock(azx_dev);
+                               return azx_dev;
+                       }
                }
+               dsp_unlock(azx_dev);
+       }
        if (res) {
+               dsp_lock(res);
                res->opened = 1;
                res->assigned_key = key;
+               dsp_unlock(res);
        }
        return res;
 }
@@ -2009,6 +2042,12 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
        struct azx_dev *azx_dev = get_azx_dev(substream);
        int ret;
 
+       dsp_lock(azx_dev);
+       if (dsp_is_locked(azx_dev)) {
+               ret = -EBUSY;
+               goto unlock;
+       }
+
        mark_runtime_wc(chip, azx_dev, substream, false);
        azx_dev->bufsize = 0;
        azx_dev->period_bytes = 0;
@@ -2016,8 +2055,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
        ret = snd_pcm_lib_malloc_pages(substream,
                                        params_buffer_bytes(hw_params));
        if (ret < 0)
-               return ret;
+               goto unlock;
        mark_runtime_wc(chip, azx_dev, substream, true);
+ unlock:
+       dsp_unlock(azx_dev);
        return ret;
 }
 
@@ -2029,16 +2070,21 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
        struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream];
 
        /* reset BDL address */
-       azx_sd_writel(azx_dev, SD_BDLPL, 0);
-       azx_sd_writel(azx_dev, SD_BDLPU, 0);
-       azx_sd_writel(azx_dev, SD_CTL, 0);
-       azx_dev->bufsize = 0;
-       azx_dev->period_bytes = 0;
-       azx_dev->format_val = 0;
+       dsp_lock(azx_dev);
+       if (!dsp_is_locked(azx_dev)) {
+               azx_sd_writel(azx_dev, SD_BDLPL, 0);
+               azx_sd_writel(azx_dev, SD_BDLPU, 0);
+               azx_sd_writel(azx_dev, SD_CTL, 0);
+               azx_dev->bufsize = 0;
+               azx_dev->period_bytes = 0;
+               azx_dev->format_val = 0;
+       }
 
        snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
 
        mark_runtime_wc(chip, azx_dev, substream, false);
+       azx_dev->prepared = 0;
+       dsp_unlock(azx_dev);
        return snd_pcm_lib_free_pages(substream);
 }
 
@@ -2055,6 +2101,12 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
                snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid);
        unsigned short ctls = spdif ? spdif->ctls : 0;
 
+       dsp_lock(azx_dev);
+       if (dsp_is_locked(azx_dev)) {
+               err = -EBUSY;
+               goto unlock;
+       }
+
        azx_stream_reset(chip, azx_dev);
        format_val = snd_hda_calc_stream_format(runtime->rate,
                                                runtime->channels,
@@ -2065,7 +2117,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
                snd_printk(KERN_ERR SFX
                           "%s: invalid format_val, rate=%d, ch=%d, format=%d\n",
                           pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock;
        }
 
        bufsize = snd_pcm_lib_buffer_bytes(substream);
@@ -2084,7 +2137,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
                azx_dev->no_period_wakeup = runtime->no_period_wakeup;
                err = azx_setup_periods(chip, substream, azx_dev);
                if (err < 0)
-                       return err;
+                       goto unlock;
        }
 
        /* wallclk has 24Mhz clock source */
@@ -2101,8 +2154,14 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
        if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
            stream_tag > chip->capture_streams)
                stream_tag -= chip->capture_streams;
-       return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
+       err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
                                     azx_dev->format_val, substream);
+
+ unlock:
+       if (!err)
+               azx_dev->prepared = 1;
+       dsp_unlock(azx_dev);
+       return err;
 }
 
 static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -2117,6 +2176,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        azx_dev = get_azx_dev(substream);
        trace_azx_pcm_trigger(chip, azx_dev, cmd);
 
+       if (dsp_is_locked(azx_dev) || !azx_dev->prepared)
+               return -EPIPE;
+
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
                rstart = 1;
@@ -2621,17 +2683,27 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
        struct azx_dev *azx_dev;
        int err;
 
-       if (snd_hda_lock_devices(bus))
-               return -EBUSY;
+       azx_dev = azx_get_dsp_loader_dev(chip);
+
+       dsp_lock(azx_dev);
+       spin_lock_irq(&chip->reg_lock);
+       if (azx_dev->running || azx_dev->locked) {
+               spin_unlock_irq(&chip->reg_lock);
+               err = -EBUSY;
+               goto unlock;
+       }
+       azx_dev->prepared = 0;
+       chip->saved_azx_dev = *azx_dev;
+       azx_dev->locked = 1;
+       spin_unlock_irq(&chip->reg_lock);
 
        err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG,
                                  snd_dma_pci_data(chip->pci),
                                  byte_size, bufp);
        if (err < 0)
-               goto unlock;
+               goto err_alloc;
 
        mark_pages_wc(chip, bufp, true);
-       azx_dev = azx_get_dsp_loader_dev(chip);
        azx_dev->bufsize = byte_size;
        azx_dev->period_bytes = byte_size;
        azx_dev->format_val = format;
@@ -2649,13 +2721,20 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format,
                goto error;
 
        azx_setup_controller(chip, azx_dev);
+       dsp_unlock(azx_dev);
        return azx_dev->stream_tag;
 
  error:
        mark_pages_wc(chip, bufp, false);
        snd_dma_free_pages(bufp);
-unlock:
-       snd_hda_unlock_devices(bus);
+ err_alloc:
+       spin_lock_irq(&chip->reg_lock);
+       if (azx_dev->opened)
+               *azx_dev = chip->saved_azx_dev;
+       azx_dev->locked = 0;
+       spin_unlock_irq(&chip->reg_lock);
+ unlock:
+       dsp_unlock(azx_dev);
        return err;
 }
 
@@ -2677,9 +2756,10 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus,
        struct azx *chip = bus->private_data;
        struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip);
 
-       if (!dmab->area)
+       if (!dmab->area || !azx_dev->locked)
                return;
 
+       dsp_lock(azx_dev);
        /* reset BDL address */
        azx_sd_writel(azx_dev, SD_BDLPL, 0);
        azx_sd_writel(azx_dev, SD_BDLPU, 0);
@@ -2692,7 +2772,12 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus,
        snd_dma_free_pages(dmab);
        dmab->area = NULL;
 
-       snd_hda_unlock_devices(bus);
+       spin_lock_irq(&chip->reg_lock);
+       if (azx_dev->opened)
+               *azx_dev = chip->saved_azx_dev;
+       azx_dev->locked = 0;
+       spin_unlock_irq(&chip->reg_lock);
+       dsp_unlock(azx_dev);
 }
 #endif /* CONFIG_SND_HDA_DSP_LOADER */
 
@@ -3481,6 +3566,7 @@ static int azx_first_init(struct azx *chip)
        }
 
        for (i = 0; i < chip->num_streams; i++) {
+               dsp_lock_init(&chip->azx_dev[i]);
                /* allocate memory for the BDL for each stream */
                err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
                                          snd_dma_pci_data(chip->pci),
index 60d08f669f0c1dec9c4a165437f466dec73071bb..0d9c58f135606f8d422d5422d1742e76cf7f6572 100644 (file)
@@ -168,10 +168,10 @@ static void cs_automute(struct hda_codec *codec)
        snd_hda_gen_update_outputs(codec);
 
        if (spec->gpio_eapd_hp) {
-               unsigned int gpio = spec->gen.hp_jack_present ?
+               spec->gpio_data = spec->gen.hp_jack_present ?
                        spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
                snd_hda_codec_write(codec, 0x01, 0,
-                                   AC_VERB_SET_GPIO_DATA, gpio);
+                                   AC_VERB_SET_GPIO_DATA, spec->gpio_data);
        }
 }
 
index 941bf6c766ecf5f6ff2aa35e28e33563066c58ef..2a89d1eefeb6059623a985fea7cdb464dc4c7390 100644 (file)
@@ -1142,7 +1142,7 @@ static int patch_cxt5045(struct hda_codec *codec)
        }
 
        if (spec->beep_amp)
-               snd_hda_attach_beep_device(codec, spec->beep_amp);
+               snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
 
        return 0;
 }
@@ -1921,7 +1921,7 @@ static int patch_cxt5051(struct hda_codec *codec)
        }
 
        if (spec->beep_amp)
-               snd_hda_attach_beep_device(codec, spec->beep_amp);
+               snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
 
        return 0;
 }
@@ -3099,7 +3099,7 @@ static int patch_cxt5066(struct hda_codec *codec)
        }
 
        if (spec->beep_amp)
-               snd_hda_attach_beep_device(codec, spec->beep_amp);
+               snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
 
        return 0;
 }
@@ -3191,11 +3191,17 @@ static int cx_auto_build_controls(struct hda_codec *codec)
        return 0;
 }
 
+static void cx_auto_free(struct hda_codec *codec)
+{
+       snd_hda_detach_beep_device(codec);
+       snd_hda_gen_free(codec);
+}
+
 static const struct hda_codec_ops cx_auto_patch_ops = {
        .build_controls = cx_auto_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
        .init = snd_hda_gen_init,
-       .free = snd_hda_gen_free,
+       .free = cx_auto_free,
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
        .check_power_status = snd_hda_gen_check_power_status,
@@ -3391,7 +3397,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
 
        codec->patch_ops = cx_auto_patch_ops;
        if (spec->beep_amp)
-               snd_hda_attach_beep_device(codec, spec->beep_amp);
+               snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp));
 
        /* Some laptops with Conexant chips show stalls in S3 resume,
         * which falls into the single-cmd mode.
index 638e7f738018bf155d55b8ffecc1af5f9a1fc15b..ca4739c3f65021fdef6d8493fb11225ad2746fb1 100644 (file)
@@ -715,8 +715,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
                case UAC2_CLOCK_SELECTOR: {
                        struct uac_selector_unit_descriptor *d = p1;
                        /* call recursively to retrieve the channel info */
-                       if (check_input_term(state, d->baSourceID[0], term) < 0)
-                               return -ENODEV;
+                       err = check_input_term(state, d->baSourceID[0], term);
+                       if (err < 0)
+                               return err;
                        term->type = d->bDescriptorSubtype << 16; /* virtual type */
                        term->id = id;
                        term->name = uac_selector_unit_iSelector(d);
@@ -725,7 +726,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_
                case UAC1_PROCESSING_UNIT:
                case UAC1_EXTENSION_UNIT:
                /* UAC2_PROCESSING_UNIT_V2 */
-               /* UAC2_EFFECT_UNIT */ {
+               /* UAC2_EFFECT_UNIT */
+               case UAC2_EXTENSION_UNIT_V2: {
                        struct uac_processing_unit_descriptor *d = p1;
 
                        if (state->mixer->protocol == UAC_VERSION_2 &&
@@ -1356,8 +1358,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
                return err;
 
        /* determine the input source type and name */
-       if (check_input_term(state, hdr->bSourceID, &iterm) < 0)
-               return -EINVAL;
+       err = check_input_term(state, hdr->bSourceID, &iterm);
+       if (err < 0)
+               return err;
 
        master_bits = snd_usb_combine_bytes(bmaControls, csize);
        /* master configuration quirks */
@@ -2052,6 +2055,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
                        return parse_audio_extension_unit(state, unitid, p1);
                else /* UAC_VERSION_2 */
                        return parse_audio_processing_unit(state, unitid, p1);
+       case UAC2_EXTENSION_UNIT_V2:
+               return parse_audio_extension_unit(state, unitid, p1);
        default:
                snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
                return -EINVAL;
@@ -2118,7 +2123,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
                        state.oterm.type = le16_to_cpu(desc->wTerminalType);
                        state.oterm.name = desc->iTerminal;
                        err = parse_audio_unit(&state, desc->bSourceID);
-                       if (err < 0)
+                       if (err < 0 && err != -EINVAL)
                                return err;
                } else { /* UAC_VERSION_2 */
                        struct uac2_output_terminal_descriptor *desc = p;
@@ -2130,12 +2135,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
                        state.oterm.type = le16_to_cpu(desc->wTerminalType);
                        state.oterm.name = desc->iTerminal;
                        err = parse_audio_unit(&state, desc->bSourceID);
-                       if (err < 0)
+                       if (err < 0 && err != -EINVAL)
                                return err;
 
                        /* for UAC2, use the same approach to also add the clock selectors */
                        err = parse_audio_unit(&state, desc->bCSourceID);
-                       if (err < 0)
+                       if (err < 0 && err != -EINVAL)
                                return err;
                }
        }
index a20e32033431a38bc752e2d3476ecafe49e3c78f..0b0a90787db64b010277005c5a7980f60d8fbdbc 100644 (file)
@@ -122,7 +122,7 @@ export Q VERBOSE
 
 EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
 
-INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES)
+INCLUDES = -I. $(CONFIG_INCLUDES)
 
 # Set compile option CFLAGS if not set elsewhere
 CFLAGS ?= -g -Wall
index a2108ca1cc17ad02e331aa02adf1870cc56a07ba..bb74c79cd16ed2ad02dbd33ee5f791fb9de2b3c2 100644 (file)
@@ -95,7 +95,7 @@ ifeq ("$(origin DEBUG)", "command line")
   PERF_DEBUG = $(DEBUG)
 endif
 ifndef PERF_DEBUG
-  CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2
+  CFLAGS_OPTIMIZE = -O6
 endif
 
 ifdef PARSER_DEBUG
@@ -180,6 +180,12 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-W
        CFLAGS := $(CFLAGS) -Wvolatile-register-var
 endif
 
+ifndef PERF_DEBUG
+       ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y)
+               CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2
+       endif
+endif
+
 ### --- END CONFIGURATION SECTION ---
 
 ifeq ($(srctree),)
index a5223e6a7b432a0b04faf5ba5dfc130cd9ed150e..0fdc85269c4dc4c2bd72220526879a07e76679be 100644 (file)
@@ -1,6 +1,30 @@
 #ifndef BENCH_H
 #define BENCH_H
 
+/*
+ * The madvise transparent hugepage constants were added in glibc
+ * 2.13. For compatibility with older versions of glibc, define these
+ * tokens if they are not already defined.
+ *
+ * PA-RISC uses different madvise values from other architectures and
+ * needs to be special-cased.
+ */
+#ifdef __hppa__
+# ifndef MADV_HUGEPAGE
+#  define MADV_HUGEPAGE                67
+# endif
+# ifndef MADV_NOHUGEPAGE
+#  define MADV_NOHUGEPAGE      68
+# endif
+#else
+# ifndef MADV_HUGEPAGE
+#  define MADV_HUGEPAGE                14
+# endif
+# ifndef MADV_NOHUGEPAGE
+#  define MADV_NOHUGEPAGE      15
+# endif
+#endif
+
 extern int bench_numa(int argc, const char **argv, const char *prefix);
 extern int bench_sched_messaging(int argc, const char **argv, const char *prefix);
 extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
index 774c90713a53fe8a27f0561a8476d5c23748edab..f1a939ebc19c5d4aad0afbe1b96201aadbdb0511 100644 (file)
@@ -573,13 +573,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
                                         perf_event__synthesize_guest_os, tool);
        }
 
-       if (!opts->target.system_wide)
+       if (perf_target__has_task(&opts->target))
                err = perf_event__synthesize_thread_map(tool, evsel_list->threads,
                                                  process_synthesized_event,
                                                  machine);
-       else
+       else if (perf_target__has_cpu(&opts->target))
                err = perf_event__synthesize_threads(tool, process_synthesized_event,
                                               machine);
+       else /* command specified */
+               err = 0;
 
        if (err != 0)
                goto out_delete_session;
index 38624686ee9a145ffaacbcfa063e5d985da750f0..226a4ae2f936ccaa228a5fad85bfd5318521ccd5 100644 (file)
@@ -208,8 +208,9 @@ static inline int script_browse(const char *script_opt __maybe_unused)
        return 0;
 }
 
-#define K_LEFT -1
-#define K_RIGHT -2
+#define K_LEFT  -1000
+#define K_RIGHT -2000
+#define K_SWITCH_INPUT_DATA -3000
 #endif
 
 #ifdef GTK2_SUPPORT
index 55433aa42c8f30cb8dbeeae537ce8ea0ad47965b..eabdce0a2daa0a0a2c5c882529617322ce6e8852 100644 (file)
@@ -143,7 +143,7 @@ struct strlist *strlist__new(bool dupstr, const char *list)
                slist->rblist.node_delete = strlist__node_delete;
 
                slist->dupstr    = dupstr;
-               if (slist && strlist__parse_list(slist, list) != 0)
+               if (list && strlist__parse_list(slist, list) != 0)
                        goto out_error;
        }
 
index ce82b940195843827be4d22e710942b0683a4618..5ba005c00e2f76998694d202eca68452600ee5bf 100644 (file)
@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
                        u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
                        u64 redir_content;
 
-                       ASSERT(redir_index < IOAPIC_NUM_PINS);
+                       if (redir_index < IOAPIC_NUM_PINS)
+                               redir_content =
+                                       ioapic->redirtbl[redir_index].bits;
+                       else
+                               redir_content = ~0ULL;
 
-                       redir_content = ioapic->redirtbl[redir_index].bits;
                        result = (ioapic->ioregsel & 0x1) ?
                            (redir_content >> 32) & 0xffffffff :
                            redir_content & 0xffffffff;