]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Feb 2015 01:53:53 +0000 (17:53 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Feb 2015 01:53:53 +0000 (17:53 -0800)
Pull EFI updates from Ingo Molnar:
 "Main changes:

   - Move efivarfs from the misc filesystem section to pseudo filesystem

   - Expose firmware platform size in sysfs

   - Improve robustness of get_memory_map() by removing assumptions on
     the size of efi_memory_desc_t.

  - various cleanups and fixes

  The biggest risk is the get_memory_map() change, which changes the way
  that both the arm64 and x86 EFI boot stub build the early memory map.
  There are no known regressions with it at the moment, BYMMV"

* 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  efi: Don't look for chosen@0 node on DT platforms
  firmware: efi: Remove unneeded guid unparse
  efi/libstub: Call get_memory_map() to obtain map and desc sizes
  efi: Small leak on error in runtime map code
  efi: rtc-efi: Mark UIE as unsupported
  arm64/efi: efistub: Apply __init annotation
  efi: Expose underlying UEFI firmware platform size to userland
  efi: Rename efi_guid_unparse to efi_guid_to_str
  efi: Update the URLs for efibootmgr
  fs: Make efivarfs a pseudo filesystem, built by default with EFI

617 files changed:
.mailmap
Documentation/ABI/testing/sysfs-bus-event_source-devices-events
Documentation/RCU/stallwarn.txt
Documentation/RCU/trace.txt
Documentation/devicetree/bindings/i2c/i2c-st.txt
Documentation/devicetree/bindings/i2c/trivial-devices.txt
Documentation/devicetree/bindings/mfd/max77686.txt
Documentation/devicetree/bindings/regulator/da9211.txt
Documentation/devicetree/bindings/regulator/isl9305.txt
Documentation/devicetree/bindings/regulator/mt6397-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/pfuze100.txt
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/devicetree/bindings/spi/spi-sirf.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/spi-st-ssc.txt [new file with mode: 0644]
Documentation/futex-requeue-pi.txt
Documentation/hwmon/ina2xx
Documentation/locking/lockdep-design.txt
Documentation/memory-barriers.txt
Documentation/networking/netlink_mmap.txt
Documentation/x86/entry_64.txt
Documentation/x86/x86_64/kernel-stacks
MAINTAINERS
Makefile
arch/alpha/mm/fault.c
arch/arc/mm/fault.c
arch/arm/boot/compressed/head.S
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
arch/arm/boot/dts/sun5i-a13-olinuxino.dts
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20-bananapi.dts
arch/arm/boot/dts/sun7i-a20-hummingbird.dts
arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/boot/dts/sun9i-a80-optimus.dts
arch/arm/boot/dts/sun9i-a80.dtsi
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/kernel/entry-v7m.S
arch/arm/kvm/Kconfig
arch/arm/kvm/arm.c
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.h
arch/arm/kvm/coproc_a15.c
arch/arm/kvm/coproc_a7.c
arch/arm/kvm/mmu.c
arch/arm/kvm/trace.h
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-shmobile/board-ape6evm.c
arch/arm/mach-shmobile/board-lager.c
arch/arm/mach-shmobile/setup-rcar-gen2.c
arch/arm/mach-shmobile/timer.c
arch/arm/mm/Kconfig
arch/arm/mm/context.c
arch/arm/mm/dma-mapping.c
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/Kconfig
arch/arm64/kvm/sys_regs.c
arch/avr32/mm/fault.c
arch/cris/mm/fault.c
arch/frv/mm/fault.c
arch/ia64/mm/fault.c
arch/m32r/mm/fault.c
arch/m68k/mm/fault.c
arch/metag/mm/fault.c
arch/microblaze/mm/fault.c
arch/mips/Kconfig
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/smp.c
arch/mips/configs/malta_defconfig
arch/mips/include/asm/fpu.h
arch/mips/include/asm/fw/arc/hinv.h
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/syscall.h
arch/mips/include/asm/thread_info.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/jz4740/irq.c
arch/mips/kernel/elf.c
arch/mips/kernel/irq_cpu.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/smp-cmp.c
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kvm/Kconfig
arch/mips/mm/fault.c
arch/mips/mm/tlb-r4k.c
arch/mn10300/include/asm/cacheflush.h
arch/mn10300/mm/fault.c
arch/nios2/mm/fault.c
arch/openrisc/mm/fault.c
arch/parisc/mm/fault.c
arch/powerpc/include/asm/cacheflush.h
arch/powerpc/kvm/Kconfig
arch/powerpc/mm/copro_fault.c
arch/powerpc/mm/fault.c
arch/s390/include/asm/cacheflush.h
arch/s390/kvm/Kconfig
arch/s390/mm/fault.c
arch/score/mm/fault.c
arch/sh/mm/fault.c
arch/sparc/include/asm/cacheflush_64.h
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/tile/kvm/Kconfig
arch/tile/mm/fault.c
arch/um/kernel/trap.c
arch/x86/Kconfig
arch/x86/boot/ctype.h
arch/x86/boot/early_serial_console.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/apic.h
arch/x86/include/asm/calling.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/debugreg.h
arch/x86/include/asm/hw_breakpoint.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/irq_remapping.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/smpboot_hooks.h [deleted file]
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/traps.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/p5.c
arch/x86/kernel/cpu/mcheck/winchip.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/entry_64.S
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/rtc.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kvm/Kconfig
arch/x86/kvm/lapic.c
arch/x86/mm/fault.c
arch/x86/pci/common.c
arch/x86/pci/intel_mid_pci.c
arch/x86/vdso/Makefile
arch/xtensa/mm/fault.c
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
drivers/Kconfig
drivers/acpi/acpi_lpss.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-ac97.c
drivers/base/regmap/regmap-i2c.c
drivers/base/regmap/regmap.c
drivers/block/rbd.c
drivers/char/random.c
drivers/clk/Kconfig
drivers/cpufreq/Kconfig
drivers/devfreq/Kconfig
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/cirrus/cirrus_mode.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/radeon/radeon_benchmark.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/hwmon/Kconfig
drivers/hwmon/abx500.c
drivers/hwmon/ad7314.c
drivers/hwmon/adc128d818.c
drivers/hwmon/ads7828.c
drivers/hwmon/ina2xx.c
drivers/hwmon/jc42.c
drivers/hwmon/nct7802.c
drivers/hwmon/tmp102.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-slave-eeprom.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_proto.h
drivers/iommu/intel_irq_remapping.c
drivers/iommu/irq_remapping.c
drivers/iommu/irq_remapping.h
drivers/iommu/tegra-gart.c
drivers/irqchip/irq-mips-gic.c
drivers/isdn/hardware/eicon/message.c
drivers/md/Kconfig
drivers/md/bitmap.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-thin.c
drivers/md/raid5.c
drivers/net/Kconfig
drivers/net/caif/caif_hsi.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/nmclan_cs.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/hyperv/netvsc.c
drivers/net/macvtap.c
drivers/net/ppp/ppp_deflate.c
drivers/net/tun.c
drivers/net/usb/sr9700.c
drivers/net/usb/sr9700.h
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/Kconfig
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/pci/host/pcie-designware.c
drivers/pci/quirks.c
drivers/pinctrl/pinctrl-at91.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/axp20x-regulator.c
drivers/regulator/core.c
drivers/regulator/da9211-regulator.c
drivers/regulator/fan53555.c
drivers/regulator/internal.h
drivers/regulator/isl9305.c
drivers/regulator/lp872x.c
drivers/regulator/max14577.c
drivers/regulator/max77686.c
drivers/regulator/max77843.c [new file with mode: 0644]
drivers/regulator/max8649.c
drivers/regulator/mt6397-regulator.c [new file with mode: 0644]
drivers/regulator/of_regulator.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/qcom_rpm-regulator.c
drivers/regulator/rk808-regulator.c
drivers/regulator/rt5033-regulator.c
drivers/regulator/tps65023-regulator.c
drivers/rtc/hctosys.c
drivers/rtc/interface.c
drivers/rtc/rtc-dev.c
drivers/rtc/systohc.c
drivers/scsi/device_handler/scsi_dh.c
drivers/scsi/sd.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-atmel.c
drivers/spi/spi-au1550.c
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bcm63xx.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-butterfly.c
drivers/spi/spi-coldfire-qspi.c
drivers/spi/spi-davinci.c
drivers/spi/spi-dln2.c [new file with mode: 0644]
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw-pci.c
drivers/spi/spi-dw.c
drivers/spi/spi-falcon.c
drivers/spi/spi-fsl-cpm.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-fsl-lib.c
drivers/spi/spi-fsl-lib.h
drivers/spi/spi-gpio.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-imx.c
drivers/spi/spi-lm70llp.c
drivers/spi/spi-meson-spifc.c
drivers/spi/spi-mxs.c
drivers/spi/spi-omap-100k.c
drivers/spi/spi-omap-uwire.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-orion.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-pxa2xx-pxadma.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-pxa2xx.h
drivers/spi/spi-qup.c
drivers/spi/spi-rockchip.c
drivers/spi/spi-rspi.c
drivers/spi/spi-s3c64xx.c
drivers/spi/spi-sc18is602.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-sh.c
drivers/spi/spi-sirf.c
drivers/spi/spi-st-ssc4.c [new file with mode: 0644]
drivers/spi/spi-ti-qspi.c
drivers/spi/spi-topcliff-pch.c
drivers/spi/spi-xilinx.c
drivers/spi/spi.c
drivers/spi/spidev.c
drivers/staging/lustre/lustre/llite/vvp_io.c
drivers/staging/nvec/nvec.c
drivers/usb/core/otg_whitelist.h
drivers/usb/core/quirks.c
drivers/usb/dwc2/core_intr.c
drivers/usb/phy/phy.c
drivers/usb/storage/unusual_devs.h
drivers/usb/storage/unusual_uas.h
drivers/vhost/net.c
fs/aio.c
fs/btrfs/Kconfig
fs/btrfs/scrub.c
fs/btrfs/tree-log.c
fs/cifs/cifs_debug.c
fs/cifs/file.c
fs/cifs/smbencrypt.c
fs/gfs2/quota.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4client.c
fs/nilfs2/nilfs.h
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/notify/Kconfig
fs/quota/Kconfig
fs/quota/dquot.c
fs/quota/quota.c
fs/udf/file.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quotaops.c
include/linux/compiler.h
include/linux/ftrace_event.h
include/linux/hrtimer.h
include/linux/i2c.h
include/linux/if_vlan.h
include/linux/kernel.h
include/linux/ktime.h
include/linux/mlx4/device.h
include/linux/mm.h
include/linux/osq_lock.h
include/linux/perf_event.h
include/linux/pxa2xx_ssp.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/regmap.h
include/linux/regulator/da9211.h
include/linux/regulator/driver.h
include/linux/regulator/machine.h
include/linux/regulator/mt6397-regulator.h [new file with mode: 0644]
include/linux/regulator/pfuze100.h
include/linux/rtc.h
include/linux/smp.h
include/linux/spi/at86rf230.h
include/linux/spi/l4f00242t03.h
include/linux/spi/lms283gf05.h
include/linux/spi/mxs-spi.h
include/linux/spi/pxa2xx_spi.h
include/linux/spi/rspi.h
include/linux/spi/sh_hspi.h
include/linux/spi/sh_msiof.h
include/linux/spi/spi.h
include/linux/spi/tle62x0.h
include/linux/spi/tsc2005.h
include/linux/srcu.h
include/linux/timekeeping.h
include/linux/tracepoint.h
include/linux/wait.h
include/net/flow_keys.h
include/net/ip.h
include/net/ipv6.h
include/net/netfilter/nf_tables.h
include/net/netns/ipv4.h
include/net/sch_generic.h
include/net/tcp.h
include/rdma/ib_verbs.h
include/sound/ak4113.h
include/sound/ak4114.h
include/sound/soc.h
include/trace/events/tlb.h
include/trace/ftrace.h
include/uapi/rdma/ib_user_verbs.h
init/Kconfig
init/main.c
kernel/Kconfig.locks
kernel/cpu.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/futex.c
kernel/locking/Makefile
kernel/locking/mcs_spinlock.h
kernel/locking/mutex.c
kernel/locking/osq_lock.c [moved from kernel/locking/mcs_spinlock.c with 98% similarity]
kernel/locking/rtmutex.c
kernel/locking/rwsem-spinlock.c
kernel/locking/rwsem-xadd.c
kernel/notifier.c
kernel/power/Kconfig
kernel/rcu/Makefile
kernel/rcu/rcu.h
kernel/rcu/rcutorture.c
kernel/rcu/srcu.c
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cpudeadline.h
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/smpboot.c
kernel/softirq.c
kernel/time/hrtimer.c
kernel/time/ntp.c
kernel/time/timekeeping.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_syscalls.c
kernel/trace/trace_uprobe.c
lib/Kconfig.debug
lib/checksum.c
mm/Kconfig
mm/gup.c
mm/ksm.c
mm/memcontrol.c
mm/memory.c
mm/nommu.c
mm/pagewalk.c
mm/shmem.c
net/bridge/netfilter/nft_reject_bridge.c
net/caif/chnl_net.c
net/core/dev.c
net/core/rtnetlink.c
net/ipv4/ip_output.c
net/ipv4/route.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_veno.c
net/ipv4/tcp_yeah.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/output_core.c
net/ipv6/sit.c
net/ipv6/udp_offload.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_redir.c
net/netlink/af_netlink.c
net/rds/sysctl.c
net/sched/cls_api.c
net/sched/sch_fq.c
net/sctp/sm_make_chunk.c
security/tomoyo/Kconfig
sound/core/seq/seq_dummy.c
sound/i2c/other/ak4113.c
sound/i2c/other/ak4114.c
sound/soc/adi/axi-i2s.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/codecs/pcm512x.c
sound/soc/codecs/rt286.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5677.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/ts3a227e.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8960.c
sound/soc/codecs/wm9705.c
sound/soc/codecs/wm9712.c
sound/soc/codecs/wm9713.c
sound/soc/fsl/fsl_esai.h
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-wm8962.c
sound/soc/generic/simple-card.c
sound/soc/intel/sst-firmware.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/intel/sst/sst_acpi.c
sound/soc/omap/omap-mcbsp.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/soc-ac97.c
sound/soc/soc-compress.c
tools/lib/api/fs/debugfs.c
tools/lib/api/fs/debugfs.h
tools/lib/lockdep/.gitignore [new file with mode: 0644]
tools/lib/lockdep/Makefile
tools/lib/traceevent/event-parse.c
tools/perf/Documentation/perf-buildid-cache.txt
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-mem.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/bench/futex.h
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-diff.c
tools/perf/builtin-inject.c
tools/perf/builtin-mem.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/config/feature-checks/Makefile
tools/perf/config/feature-checks/test-all.c
tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c [new file with mode: 0644]
tools/perf/scripts/perl/Perf-Trace-Util/Context.c
tools/perf/tests/attr.py
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_output.c
tools/perf/tests/make
tools/perf/tests/parse-events.c
tools/perf/tests/sample-parsing.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/hist.c
tools/perf/ui/progress.h
tools/perf/ui/tui/helpline.c
tools/perf/ui/tui/setup.c
tools/perf/util/annotate.c
tools/perf/util/color.c
tools/perf/util/color.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/map.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/parse-options.c
tools/perf/util/pmu.c
tools/perf/util/probe-event.c
tools/perf/util/python.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/unwind-libunwind.c
tools/testing/selftests/rcutorture/bin/cpus2use.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/parse-build.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh

index d357e1bd2a434665ae545d9ed970edd77f15f7d9..0d971cfb07724828cde3e2fdb7bb184b649054fd 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -73,6 +73,7 @@ Juha Yrjola <juha.yrjola@nokia.com>
 Juha Yrjola <juha.yrjola@solidboot.com>
 Kay Sievers <kay.sievers@vrfy.org>
 Kenneth W Chen <kenneth.w.chen@intel.com>
+Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
 Koushik <raghavendra.koushik@neterion.com>
 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
 Leonid I Ananiev <leonid.i.ananiev@intel.com>
index 20979f8b3edbc48645e4e274772b4ebd26987962..505f080d20a14c5d4d9b20c0088aa47b52345c56 100644 (file)
@@ -52,12 +52,18 @@ Description:        Per-pmu performance monitoring events specific to the running syste
                        event=0x2abc
                        event=0x423,inv,cmask=0x3
                        domain=0x1,offset=0x8,starting_index=0xffff
+                       domain=0x1,offset=0x8,core=?
 
                Each of the assignments indicates a value to be assigned to a
                particular set of bits (as defined by the format file
                corresponding to the <term>) in the perf_event structure passed
                to the perf_open syscall.
 
+               In the case of the last example, a value replacing "?" would
+               need to be provided by the user selecting the particular event.
+               This is referred to as "event parameterization". Event
+               parameters have the format 'param=?'.
+
 What: /sys/bus/event_source/devices/<pmu>/events/<event>.unit
 Date: 2014/02/24
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
index ed186a902d312a23913bb3e4ba0f272306fd1cb3..b57c0c1cdac609ca008001b9490b42c58b043574 100644 (file)
@@ -15,7 +15,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT
        21 seconds.
 
        This configuration parameter may be changed at runtime via the
-       /sys/module/rcutree/parameters/rcu_cpu_stall_timeout, however
+       /sys/module/rcupdate/parameters/rcu_cpu_stall_timeout, however
        this parameter is checked only at the beginning of a cycle.
        So if you are 10 seconds into a 40-second stall, setting this
        sysfs parameter to (say) five will shorten the timeout for the
@@ -152,6 +152,15 @@ no non-lazy callbacks ("." is printed otherwise, as shown above) and
 "D" indicates that dyntick-idle processing is enabled ("." is printed
 otherwise, for example, if disabled via the "nohz=" kernel boot parameter).
 
+If the relevant grace-period kthread has been unable to run prior to
+the stall warning, the following additional line is printed:
+
+       rcu_preempt kthread starved for 2023 jiffies!
+
+Starving the grace-period kthreads of CPU time can of course result in
+RCU CPU stall warnings even when all CPUs and tasks have passed through
+the required quiescent states.
+
 
 Multiple Warnings From One Stall
 
@@ -187,6 +196,11 @@ o  For !CONFIG_PREEMPT kernels, a CPU looping anywhere in the
        behavior, you might need to replace some of the cond_resched()
        calls with calls to cond_resched_rcu_qs().
 
+o      Anything that prevents RCU's grace-period kthreads from running.
+       This can result in the "All QSes seen" console-log message.
+       This message will include information on when the kthread last
+       ran and how often it should be expected to run.
+
 o      A CPU-bound real-time task in a CONFIG_PREEMPT kernel, which might
        happen to preempt a low-priority task in the middle of an RCU
        read-side critical section.   This is especially damaging if
index b63b9bb3bc0c4dc24064599055f976ac00e0ee85..08651da15448e27f8f2fb574c7cd51cde67815c6 100644 (file)
@@ -56,14 +56,14 @@ rcuboost:
 
 The output of "cat rcu/rcu_preempt/rcudata" looks as follows:
 
-  0!c=30455 g=30456 pq=1 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
-  1!c=30719 g=30720 pq=1 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
-  2!c=30150 g=30151 pq=1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
-  3 c=31249 g=31250 pq=1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
-  4!c=29502 g=29503 pq=1 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
-  5 c=31201 g=31202 pq=1 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
-  6!c=30253 g=30254 pq=1 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
-  7 c=31178 g=31178 pq=1 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
+  0!c=30455 g=30456 pq=1/0 qp=1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 nci=0 co=1131 ca=716
+  1!c=30719 g=30720 pq=1/0 qp=0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=123209 nci=0 co=685 ca=982
+  2!c=30150 g=30151 pq=1/1 qp=1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 nci=0 co=1328 ca=1458
+  3 c=31249 g=31250 pq=1/1 qp=0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=151700 nci=0 co=509 ca=622
+  4!c=29502 g=29503 pq=1/0 qp=1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nci=0 co=1373 ca=1521
+  5 c=31201 g=31202 pq=1/0 qp=1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=698
+  6!c=30253 g=30254 pq=1/0 qp=1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 nci=0 co=1414 ca=1353
+  7 c=31178 g=31178 pq=1/0 qp=0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 ca=969
 
 This file has one line per CPU, or eight for this 8-CPU system.
 The fields are as follows:
@@ -188,14 +188,14 @@ o "ca" is the number of RCU callbacks that have been adopted by this
 Kernels compiled with CONFIG_RCU_BOOST=y display the following from
 /debug/rcu/rcu_preempt/rcudata:
 
-  0!c=12865 g=12866 pq=1 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
-  1 c=14407 g=14408 pq=1 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
-  2 c=14407 g=14408 pq=1 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
-  3 c=14407 g=14408 pq=1 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
-  4 c=14405 g=14406 pq=1 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
-  5!c=14168 g=14169 pq=1 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
-  6 c=14404 g=14405 pq=1 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
-  7 c=14407 g=14408 pq=1 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
+  0!c=12865 g=12866 pq=1/0 qp=1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 b=10 ci=60709 nci=0 co=748 ca=871
+  1 c=14407 g=14408 pq=1/0 qp=0 dt=100679/140000000000000/0 df=378 of=7 ql=0/119 qs=NRW. kt=0/W ktl=9b6 b=10 ci=109740 nci=0 co=589 ca=485
+  2 c=14407 g=14408 pq=1/0 qp=0 dt=105486/0/0 df=90 of=9 ql=0/89 qs=NRW. kt=0/W ktl=c0c b=10 ci=83113 nci=0 co=533 ca=490
+  3 c=14407 g=14408 pq=1/0 qp=0 dt=107138/0/0 df=142 of=8 ql=0/188 qs=NRW. kt=0/W ktl=b96 b=10 ci=121114 nci=0 co=426 ca=290
+  4 c=14405 g=14406 pq=1/0 qp=1 dt=50238/0/0 df=706 of=7 ql=0/0 qs=.... kt=0/W ktl=812 b=10 ci=34929 nci=0 co=643 ca=114
+  5!c=14168 g=14169 pq=1/0 qp=0 dt=45465/140000000000000/0 df=161 of=11 ql=0/0 qs=N... kt=0/O ktl=b4d b=10 ci=47712 nci=0 co=677 ca=722
+  6 c=14404 g=14405 pq=1/0 qp=0 dt=59454/0/0 df=94 of=6 ql=0/0 qs=.... kt=0/W ktl=e57 b=10 ci=55597 nci=0 co=701 ca=811
+  7 c=14407 g=14408 pq=1/0 qp=1 dt=68850/0/0 df=31 of=8 ql=0/0 qs=.... kt=0/W ktl=14bd b=10 ci=77475 nci=0 co=508 ca=1042
 
 This is similar to the output discussed above, but contains the following
 additional fields:
index 437e0db3823cac05ec71702827159851d0806d03..4c26fda3844a7f06c05cdef710d7e08fef2b76b8 100644 (file)
@@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
        compatible      = "st,comms-ssc4-i2c";
        reg             = <0xfed40000 0x110>;
        interrupts      =  <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
-       clocks          = <&CLK_S_ICN_REG_0>;
+       clocks          = <&clk_s_a0_ls CLK_ICN_REG>;
        clock-names     = "ssc";
        clock-frequency = <400000>;
        pinctrl-names   = "default";
index 9f4e3824e71eb22bb825cfcd0e2c6d128242f6af..9f41d05be3be8676e307f37f00accdab12722bff 100644 (file)
@@ -47,6 +47,7 @@ dallas,ds3232         Extremely Accurate I²C RTC with Integrated Crystal and SRAM
 dallas,ds4510          CPU Supervisor with Nonvolatile Memory and Programmable I/O
 dallas,ds75            Digital Thermometer and Thermostat
 dlg,da9053             DA9053: flexible system level PMIC with multicore support
+dlg,da9063             DA9063: system PMIC for quad-core application processors
 epson,rx8025           High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
 epson,rx8581           I2C-BUS INTERFACE REAL TIME CLOCK MODULE
 fsl,mag3110            MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
index 75fdfaf41831d9fcd76be1b5706dadf1e2a2cb7d..e39f0bc1f55e02ccd705b85ae96714fd0a7e5263 100644 (file)
@@ -39,6 +39,12 @@ to get matched with their hardware counterparts as follow:
        -BUCKn  :       1-4.
   Use standard regulator bindings for it ('regulator-off-in-suspend').
 
+  LDO20, LDO21, LDO22, BUCK8 and BUCK9 can be configured to GPIO enable
+  control. To turn this feature on this property must be added to the regulator
+  sub-node:
+       - maxim,ena-gpios :     one GPIO specifier enable control (the gpio
+                               flags are actually ignored and always
+                               ACTIVE_HIGH is used)
 
 Example:
 
@@ -65,4 +71,12 @@ Example:
                                regulator-always-on;
                                regulator-boot-on;
                        };
+
+                       buck9_reg {
+                               regulator-compatible = "BUCK9";
+                               regulator-name = "CAM_ISP_CORE_1.2V";
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <1200000>;
+                               maxim,ena-gpios = <&gpm0 3 GPIO_ACTIVE_HIGH>;
+                       };
        }
index 240019a82f9a57083f4f06402250f2fbf41ec4ce..eb618907c7dee35446ae8b2153cf566e91cdb689 100644 (file)
@@ -11,6 +11,7 @@ Required properties:
   BUCKA and BUCKB.
 
 Optional properties:
+- enable-gpios: platform gpio for control of BUCKA/BUCKB.
 - Any optional property defined in regulator.txt
 
 Example 1) DA9211
@@ -27,6 +28,7 @@ Example 1) DA9211
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <2000000>;
                                regulator-max-microamp  = <5000000>;
+                               enable-gpios = <&gpio 27 0>;
                        };
                        BUCKB {
                                regulator-name = "VBUCKB";
@@ -34,11 +36,12 @@ Example 1) DA9211
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <2000000>;
                                regulator-max-microamp  = <5000000>;
+                               enable-gpios = <&gpio 17 0>;
                        };
                };
        };
 
-Example 2) DA92113
+Example 2) DA9213
        pmic: da9213@68 {
                compatible = "dlg,da9213";
                reg = <0x68>;
@@ -51,6 +54,7 @@ Example 2) DA92113
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <3000000>;
                                regulator-max-microamp  = <6000000>;
+                               enable-gpios = <&gpio 27 0>;
                        };
                        BUCKB {
                                regulator-name = "VBUCKB";
@@ -58,6 +62,7 @@ Example 2) DA92113
                                regulator-max-microvolt = <1570000>;
                                regulator-min-microamp  = <3000000>;
                                regulator-max-microamp  = <6000000>;
+                               enable-gpios = <&gpio 17 0>;
                        };
                };
        };
index a626fc1bbf0d0bd1797191e91e31959e09c338a3..d6e7c9ec9413c0ddb0d6c66ee25812cce1683702 100644 (file)
@@ -2,7 +2,7 @@ Intersil ISL9305/ISL9305H voltage regulator
 
 Required properties:
 
-- compatible: "isl,isl9305" or "isl,isl9305h"
+- compatible: "isil,isl9305" or "isil,isl9305h"
 - reg: I2C slave address, usually 0x68.
 - regulators: A node that houses a sub-node for each regulator within the
   device. Each sub-node is identified using the node's name, with valid
@@ -19,7 +19,7 @@ Optional properties:
 Example
 
        pmic: isl9305@68 {
-               compatible = "isl,isl9305";
+               compatible = "isil,isl9305";
                reg = <0x68>;
 
                VINDCD1-supply = <&system_power>;
diff --git a/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt b/Documentation/devicetree/bindings/regulator/mt6397-regulator.txt
new file mode 100644 (file)
index 0000000..a42b1d6
--- /dev/null
@@ -0,0 +1,217 @@
+Mediatek MT6397 Regulator Driver
+
+Required properties:
+- compatible: "mediatek,mt6397-regulator"
+- mt6397regulator: List of regulators provided by this controller. It is named
+  according to its regulator type, buck_<name> and ldo_<name>.
+  The definition for each of these nodes is defined using the standard binding
+  for regulators at Documentation/devicetree/bindings/regulator/regulator.txt.
+
+The valid names for regulators are::
+BUCK:
+  buck_vpca15, buck_vpca7, buck_vsramca15, buck_vsramca7, buck_vcore, buck_vgpu,
+  buck_vdrm, buck_vio18
+LDO:
+  ldo_vtcxo, ldo_va28, ldo_vcama, ldo_vio28, ldo_vusb, ldo_vmc, ldo_vmch,
+  ldo_vemc3v3, ldo_vgp1, ldo_vgp2, ldo_vgp3, ldo_vgp4, ldo_vgp5, ldo_vgp6,
+  ldo_vibr
+
+Example:
+       pmic {
+               compatible = "mediatek,mt6397";
+
+               mt6397regulator: mt6397regulator {
+                       compatible = "mediatek,mt6397-regulator";
+
+                       mt6397_vpca15_reg: buck_vpca15 {
+                               regulator-compatible = "buck_vpca15";
+                               regulator-name = "vpca15";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <200>;
+                       };
+
+                       mt6397_vpca7_reg: buck_vpca7 {
+                               regulator-compatible = "buck_vpca7";
+                               regulator-name = "vpca7";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+                       };
+
+                       mt6397_vsramca15_reg: buck_vsramca15 {
+                               regulator-compatible = "buck_vsramca15";
+                               regulator-name = "vsramca15";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+
+                       };
+
+                       mt6397_vsramca7_reg: buck_vsramca7 {
+                               regulator-compatible = "buck_vsramca7";
+                               regulator-name = "vsramca7";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+
+                       };
+
+                       mt6397_vcore_reg: buck_vcore {
+                               regulator-compatible = "buck_vcore";
+                               regulator-name = "vcore";
+                               regulator-min-microvolt = < 850000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+                       };
+
+                       mt6397_vgpu_reg: buck_vgpu {
+                               regulator-compatible = "buck_vgpu";
+                               regulator-name = "vgpu";
+                               regulator-min-microvolt = < 700000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <115>;
+                       };
+
+                       mt6397_vdrm_reg: buck_vdrm {
+                               regulator-compatible = "buck_vdrm";
+                               regulator-name = "vdrm";
+                               regulator-min-microvolt = < 800000>;
+                               regulator-max-microvolt = <1400000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <500>;
+                       };
+
+                       mt6397_vio18_reg: buck_vio18 {
+                               regulator-compatible = "buck_vio18";
+                               regulator-name = "vio18";
+                               regulator-min-microvolt = <1500000>;
+                               regulator-max-microvolt = <2120000>;
+                               regulator-ramp-delay = <12500>;
+                               regulator-enable-ramp-delay = <500>;
+                       };
+
+                       mt6397_vtcxo_reg: ldo_vtcxo {
+                               regulator-compatible = "ldo_vtcxo";
+                               regulator-name = "vtcxo";
+                               regulator-min-microvolt = <2800000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <90>;
+                       };
+
+                       mt6397_va28_reg: ldo_va28 {
+                               regulator-compatible = "ldo_va28";
+                               regulator-name = "va28";
+                               /* fixed output 2.8 V */
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vcama_reg: ldo_vcama {
+                               regulator-compatible = "ldo_vcama";
+                               regulator-name = "vcama";
+                               regulator-min-microvolt = <1500000>;
+                               regulator-max-microvolt = <2800000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vio28_reg: ldo_vio28 {
+                               regulator-compatible = "ldo_vio28";
+                               regulator-name = "vio28";
+                               /* fixed output 2.8 V */
+                               regulator-enable-ramp-delay = <240>;
+                       };
+
+                       mt6397_usb_reg: ldo_vusb {
+                               regulator-compatible = "ldo_vusb";
+                               regulator-name = "vusb";
+                               /* fixed output 3.3 V */
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vmc_reg: ldo_vmc {
+                               regulator-compatible = "ldo_vmc";
+                               regulator-name = "vmc";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vmch_reg: ldo_vmch {
+                               regulator-compatible = "ldo_vmch";
+                               regulator-name = "vmch";
+                               regulator-min-microvolt = <3000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+                               regulator-compatible = "ldo_vemc3v3";
+                               regulator-name = "vemc_3v3";
+                               regulator-min-microvolt = <3000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp1_reg: ldo_vgp1 {
+                               regulator-compatible = "ldo_vgp1";
+                               regulator-name = "vcamd";
+                               regulator-min-microvolt = <1220000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <240>;
+                       };
+
+                       mt6397_vgp2_reg: ldo_vgp2 {
+                               egulator-compatible = "ldo_vgp2";
+                               regulator-name = "vcamio";
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp3_reg: ldo_vgp3 {
+                               regulator-compatible = "ldo_vgp3";
+                               regulator-name = "vcamaf";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp4_reg: ldo_vgp4 {
+                               regulator-compatible = "ldo_vgp4";
+                               regulator-name = "vgp4";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp5_reg: ldo_vgp5 {
+                               regulator-compatible = "ldo_vgp5";
+                               regulator-name = "vgp5";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vgp6_reg: ldo_vgp6 {
+                               regulator-compatible = "ldo_vgp6";
+                               regulator-name = "vgp6";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+
+                       mt6397_vibr_reg: ldo_vibr {
+                               regulator-compatible = "ldo_vibr";
+                               regulator-name = "vibr";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-enable-ramp-delay = <218>;
+                       };
+               };
+       };
index 34ef5d16d0f1697c51b6759b8a572f33d6b85894..9b40db88f637bd9ed7f06c5eb809e7b16ddd660d 100644 (file)
@@ -1,7 +1,7 @@
 PFUZE100 family of regulators
 
 Required properties:
-- compatible: "fsl,pfuze100" or "fsl,pfuze200"
+- compatible: "fsl,pfuze100", "fsl,pfuze200", "fsl,pfuze3000"
 - reg: I2C slave address
 
 Required child node:
@@ -14,6 +14,8 @@ Required child node:
   sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
   --PFUZE200
   sw1ab,sw2,sw3a,sw3b,swbst,vsnvs,vrefddr,vgen1~vgen6
+  --PFUZE3000
+  sw1a,sw1b,sw2,sw3,swbst,vsnvs,vrefddr,vldo1,vldo2,vccsd,v33,vldo3,vldo4
 
 Each regulator is defined using the standard binding for regulators.
 
@@ -205,3 +207,93 @@ Example 2: PFUZE200
                        };
                };
        };
+
+Example 3: PFUZE3000
+
+       pmic: pfuze3000@08 {
+               compatible = "fsl,pfuze3000";
+               reg = <0x08>;
+
+               regulators {
+                       sw1a_reg: sw1a {
+                               regulator-min-microvolt = <700000>;
+                               regulator-max-microvolt = <1475000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                               regulator-ramp-delay = <6250>;
+                       };
+                       /* use sw1c_reg to align with pfuze100/pfuze200 */
+                       sw1c_reg: sw1b {
+                               regulator-min-microvolt = <700000>;
+                               regulator-max-microvolt = <1475000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                               regulator-ramp-delay = <6250>;
+                       };
+
+                       sw2_reg: sw2 {
+                               regulator-min-microvolt = <2500000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       sw3a_reg: sw3 {
+                               regulator-min-microvolt = <900000>;
+                               regulator-max-microvolt = <1650000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       swbst_reg: swbst {
+                               regulator-min-microvolt = <5000000>;
+                               regulator-max-microvolt = <5150000>;
+                       };
+
+                       snvs_reg: vsnvs {
+                               regulator-min-microvolt = <1000000>;
+                               regulator-max-microvolt = <3000000>;
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vref_reg: vrefddr {
+                               regulator-boot-on;
+                               regulator-always-on;
+                       };
+
+                       vgen1_reg: vldo1 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen2_reg: vldo2 {
+                               regulator-min-microvolt = <800000>;
+                               regulator-max-microvolt = <1550000>;
+                       };
+
+                       vgen3_reg: vccsd {
+                               regulator-min-microvolt = <2850000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen4_reg: v33 {
+                               regulator-min-microvolt = <2850000>;
+                               regulator-max-microvolt = <3300000>;
+                       };
+
+                       vgen5_reg: vldo3 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vgen6_reg: vldo4 {
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+               };
+       };
index d11c3721e7cd144995a717b6f29d804a018a0299..4c388bb2f0a224b5247bb03b9c52172b4a48de8c 100644 (file)
@@ -30,6 +30,22 @@ Optional properties:
                         specifiers, one for transmission, and one for
                         reception.
 - dma-names            : Must contain a list of two DMA names, "tx" and "rx".
+- renesas,dtdl         : delay sync signal (setup) in transmit mode.
+                        Must contain one of the following values:
+                        0   (no bit delay)
+                        50  (0.5-clock-cycle delay)
+                        100 (1-clock-cycle delay)
+                        150 (1.5-clock-cycle delay)
+                        200 (2-clock-cycle delay)
+
+- renesas,syncdl       : delay sync signal (hold) in transmit mode.
+                        Must contain one of the following values:
+                        0   (no bit delay)
+                        50  (0.5-clock-cycle delay)
+                        100 (1-clock-cycle delay)
+                        150 (1.5-clock-cycle delay)
+                        200 (2-clock-cycle delay)
+                        300 (3-clock-cycle delay)
 
 Optional properties, deprecated for soctype-specific bindings:
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
diff --git a/Documentation/devicetree/bindings/spi/spi-sirf.txt b/Documentation/devicetree/bindings/spi/spi-sirf.txt
new file mode 100644 (file)
index 0000000..4c7adb8
--- /dev/null
@@ -0,0 +1,41 @@
+* CSR SiRFprimaII Serial Peripheral Interface
+
+Required properties:
+- compatible : Should be "sirf,prima2-spi"
+- reg : Offset and length of the register set for the device
+- interrupts : Should contain SPI interrupt
+- resets: phandle to the reset controller asserting this device in
+          reset
+  See ../reset/reset.txt for details.
+- dmas : Must contain an entry for each entry in clock-names.
+  See ../dma/dma.txt for details.
+- dma-names : Must include the following entries:
+  - rx
+  - tx
+- clocks : Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+
+- #address-cells: Number of cells required to define a chip select
+                  address on the SPI bus. Should be set to 1.
+- #size-cells:    Should be zero.
+
+Optional properties:
+- spi-max-frequency: Specifies maximum SPI clock frequency,
+                     Units - Hz. Definition as per
+                     Documentation/devicetree/bindings/spi/spi-bus.txt
+- cs-gpios:     should specify GPIOs used for chipselects.
+
+Example:
+
+spi0: spi@b00d0000 {
+       compatible = "sirf,prima2-spi";
+       reg = <0xb00d0000 0x10000>;
+       interrupts = <15>;
+       dmas = <&dmac1 9>,
+               <&dmac1 4>;
+       dma-names = "rx", "tx";
+       #address-cells = <1>;
+       #size-cells = <0>;
+       clocks = <&clks 19>;
+       resets = <&rstc 26>;
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-st-ssc.txt b/Documentation/devicetree/bindings/spi/spi-st-ssc.txt
new file mode 100644 (file)
index 0000000..fe54959
--- /dev/null
@@ -0,0 +1,40 @@
+STMicroelectronics SSC (SPI) Controller
+---------------------------------------
+
+Required properties:
+- compatible   : "st,comms-ssc4-spi"
+- reg          : Offset and length of the device's register set
+- interrupts   : The interrupt specifier
+- clock-names  : Must contain "ssc"
+- clocks       : Must contain an entry for each name in clock-names
+                   See ../clk/*
+- pinctrl-names        : Uses "default", can use "sleep" if provided
+                   See ../pinctrl/pinctrl-binding.txt
+
+Optional properties:
+- cs-gpios     : List of GPIO chip selects
+                   See ../spi/spi-bus.txt
+
+Child nodes represent devices on the SPI bus
+  See ../spi/spi-bus.txt
+
+Example:
+       spi@9840000 {
+               compatible      = "st,comms-ssc4-spi";
+               reg             = <0x9840000 0x110>;
+               interrupts      = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
+               clocks          = <&clk_s_c0_flexgen CLK_EXT2F_A9>;
+               clock-names     = "ssc";
+               pinctrl-0       = <&pinctrl_spi0_default>;
+               pinctrl-names   = "default";
+               cs-gpios        = <&pio17 5 0>;
+               #address-cells  = <1>;
+               #size-cells     = <0>;
+
+               st95hf@0{
+                       compatible              = "st,st95hf";
+                       reg                     = <0>;
+                       spi-max-frequency       = <1000000>;
+                       interrupts              = <2 IRQ_TYPE_EDGE_FALLING>;
+               };
+       };
index 31b16610c4169bf42e8af3dd2478deec1d2c96e5..77b36f59d16b452bbf12bba4e3db83ec3ea84a9f 100644 (file)
@@ -98,7 +98,7 @@ rt_mutex_start_proxy_lock() and rt_mutex_finish_proxy_lock(), which
 allow the requeue code to acquire an uncontended rt_mutex on behalf
 of the waiter and to enqueue the waiter on a contended rt_mutex.
 Two new system calls provide the kernel<->user interface to
-requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_REQUEUE_CMP_PI.
+requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI.
 
 FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait()
 and pthread_cond_timedwait()) to block on the initial futex and wait
@@ -107,7 +107,7 @@ result of a high-speed collision between futex_wait() and
 futex_lock_pi(), with some extra logic to check for the additional
 wake-up scenarios.
 
-FUTEX_REQUEUE_CMP_PI is called by the waker
+FUTEX_CMP_REQUEUE_PI is called by the waker
 (pthread_cond_broadcast() and pthread_cond_signal()) to requeue and
 possibly wake the waiting tasks. Internally, this system call is
 still handled by futex_requeue (by passing requeue_pi=1).  Before
@@ -120,12 +120,12 @@ task as a waiter on the underlying rt_mutex.  It is possible that
 the lock can be acquired at this stage as well, if so, the next
 waiter is woken to finish the acquisition of the lock.
 
-FUTEX_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but
+FUTEX_CMP_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but
 their sum is all that really matters.  futex_requeue() will wake or
 requeue up to nr_wake + nr_requeue tasks.  It will wake only as many
 tasks as it can acquire the lock for, which in the majority of cases
 should be 0 as good programming practice dictates that the caller of
 either pthread_cond_broadcast() or pthread_cond_signal() acquire the
-mutex prior to making the call. FUTEX_REQUEUE_PI requires that
+mutex prior to making the call. FUTEX_CMP_REQUEUE_PI requires that
 nr_wake=1.  nr_requeue should be INT_MAX for broadcast and 0 for
 signal.
index 4223c2d3b508be75e5764e67fe979b23aec90cc7..cfd31d94c8727251179a0c2afcc4f9b2a4580156 100644 (file)
@@ -26,6 +26,12 @@ Supported chips:
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/
 
+  * Texas Instruments INA231
+    Prefix: 'ina231'
+    Addresses: I2C 0x40 - 0x4f
+    Datasheet: Publicly available at the Texas Instruments website
+               http://www.ti.com/
+
 Author: Lothar Felten <l-felten@ti.com>
 
 Description
@@ -41,9 +47,18 @@ interface. The INA220 monitors both shunt drop and supply voltage.
 The INA226 is a current shunt and power monitor with an I2C interface.
 The INA226 monitors both a shunt voltage drop and bus supply voltage.
 
-The INA230 is a high or low side current shunt and power monitor with an I2C
-interface. The INA230 monitors both a shunt voltage drop and bus supply voltage.
+INA230 and INA231 are high or low side current shunt and power monitors
+with an I2C interface. The chips monitor both a shunt voltage drop and
+bus supply voltage.
 
-The shunt value in micro-ohms can be set via platform data or device tree.
-Please refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
+The shunt value in micro-ohms can be set via platform data or device tree at
+compile-time or via the shunt_resistor attribute in sysfs at run-time. Please
+refer to the Documentation/devicetree/bindings/i2c/ina2xx.txt for bindings
 if the device tree is used.
+
+Additionally ina226 supports update_interval attribute as described in
+Documentation/hwmon/sysfs-interface. Internally the interval is the sum of
+bus and shunt voltage conversion times multiplied by the averaging rate. We
+don't touch the conversion times and only modify the number of averages. The
+lower limit of the update_interval is 2 ms, the upper limit is 2253 ms.
+The actual programmed interval may vary from the desired value.
index 5dbc99c04f6e3d5469c7fa39f228a86900de89ab..5001280e9d824d360cfb5589eb40751968b841d3 100644 (file)
@@ -34,7 +34,7 @@ The validator tracks lock-class usage history into 4n + 1 separate state bits:
 - 'ever held with STATE enabled'
 - 'ever held as readlock with STATE enabled'
 
-Where STATE can be either one of (kernel/lockdep_states.h)
+Where STATE can be either one of (kernel/locking/lockdep_states.h)
  - hardirq
  - softirq
  - reclaim_fs
index 70a09f8a0383b2cdc25ab35f6aef43e8ac2905b4..ca2387ef27ab0c38cdb4b5e74ffbcde9509d61d8 100644 (file)
@@ -269,6 +269,50 @@ And there are a number of things that _must_ or _must_not_ be assumed:
        STORE *(A + 4) = Y; STORE *A = X;
        STORE {*A, *(A + 4) } = {X, Y};
 
+And there are anti-guarantees:
+
+ (*) These guarantees do not apply to bitfields, because compilers often
+     generate code to modify these using non-atomic read-modify-write
+     sequences.  Do not attempt to use bitfields to synchronize parallel
+     algorithms.
+
+ (*) Even in cases where bitfields are protected by locks, all fields
+     in a given bitfield must be protected by one lock.  If two fields
+     in a given bitfield are protected by different locks, the compiler's
+     non-atomic read-modify-write sequences can cause an update to one
+     field to corrupt the value of an adjacent field.
+
+ (*) These guarantees apply only to properly aligned and sized scalar
+     variables.  "Properly sized" currently means variables that are
+     the same size as "char", "short", "int" and "long".  "Properly
+     aligned" means the natural alignment, thus no constraints for
+     "char", two-byte alignment for "short", four-byte alignment for
+     "int", and either four-byte or eight-byte alignment for "long",
+     on 32-bit and 64-bit systems, respectively.  Note that these
+     guarantees were introduced into the C11 standard, so beware when
+     using older pre-C11 compilers (for example, gcc 4.6).  The portion
+     of the standard containing this guarantee is Section 3.14, which
+     defines "memory location" as follows:
+
+       memory location
+               either an object of scalar type, or a maximal sequence
+               of adjacent bit-fields all having nonzero width
+
+               NOTE 1: Two threads of execution can update and access
+               separate memory locations without interfering with
+               each other.
+
+               NOTE 2: A bit-field and an adjacent non-bit-field member
+               are in separate memory locations. The same applies
+               to two bit-fields, if one is declared inside a nested
+               structure declaration and the other is not, or if the two
+               are separated by a zero-length bit-field declaration,
+               or if they are separated by a non-bit-field member
+               declaration. It is not safe to concurrently update two
+               bit-fields in the same structure if all members declared
+               between them are also bit-fields, no matter what the
+               sizes of those intervening bit-fields happen to be.
+
 
 =========================
 WHAT ARE MEMORY BARRIERS?
@@ -750,7 +794,7 @@ In summary:
       However, they do -not- guarantee any other sort of ordering:
       Not prior loads against later loads, nor prior stores against
       later anything.  If you need these other forms of ordering,
-      use smb_rmb(), smp_wmb(), or, in the case of prior stores and
+      use smp_rmb(), smp_wmb(), or, in the case of prior stores and
       later loads, smp_mb().
 
   (*) If both legs of the "if" statement begin with identical stores
index c6af4bac5aa8f914a83305831e10f285c1699fb2..54f10478e8e30ccda77da9d345c01b9ace781e07 100644 (file)
@@ -199,16 +199,9 @@ frame header.
 TX limitations
 --------------
 
-Kernel processing usually involves validation of the message received by
-user-space, then processing its contents. The kernel must assure that
-userspace is not able to modify the message contents after they have been
-validated. In order to do so, the message is copied from the ring frame
-to an allocated buffer if either of these conditions is false:
-
-- only a single mapping of the ring exists
-- the file descriptor is not shared between processes
-
-This means that for threaded programs, the kernel will fall back to copying.
+As of Jan 2015 the message is always copied from the ring frame to an
+allocated buffer due to unresolved security concerns.
+See commit 4682a0358639b29cf ("netlink: Always copy on mmap TX.").
 
 Example
 -------
index 4a1c5c2dc5a919f5aa56f1b2bf4f847c68b4e357..9132b86176a3899b6ad8bd7f4bd5b630dd6fa031 100644 (file)
@@ -78,9 +78,6 @@ The expensive (paranoid) way is to read back the MSR_GS_BASE value
        xorl %ebx,%ebx
 1:     ret
 
-and the whole paranoid non-paranoid macro complexity is about whether
-to suffer that RDMSR cost.
-
 If we are at an interrupt or user-trap/gate-alike boundary then we can
 use the faster check: the stack will be a reliable indicator of
 whether SWAPGS was already done: if we see that we are a secondary
@@ -93,6 +90,15 @@ which might have triggered right after a normal entry wrote CS to the
 stack but before we executed SWAPGS, then the only safe way to check
 for GS is the slower method: the RDMSR.
 
-So we try only to mark those entry methods 'paranoid' that absolutely
-need the more expensive check for the GS base - and we generate all
-'normal' entry points with the regular (faster) entry macros.
+Therefore, super-atomic entries (except NMI, which is handled separately)
+must use idtentry with paranoid=1 to handle gsbase correctly.  This
+triggers three main behavior changes:
+
+ - Interrupt entry will use the slower gsbase check.
+ - Interrupt entry from user mode will switch off the IST stack.
+ - Interrupt exit to kernel mode will not attempt to reschedule.
+
+We try to only use IST entries and the paranoid entry code for vectors
+that absolutely need the more expensive check for the GS base - and we
+generate all 'normal' entry points with the regular (faster) paranoid=0
+variant.
index a01eec5d1d0b2b4898dc09e175f240cbc33a8ea5..e3c8a49d1a2f5b51cee6c128c5a2aecd257409dd 100644 (file)
@@ -40,9 +40,11 @@ An IST is selected by a non-zero value in the IST field of an
 interrupt-gate descriptor.  When an interrupt occurs and the hardware
 loads such a descriptor, the hardware automatically sets the new stack
 pointer based on the IST value, then invokes the interrupt handler.  If
-software wants to allow nested IST interrupts then the handler must
-adjust the IST values on entry to and exit from the interrupt handler.
-(This is occasionally done, e.g. for debug exceptions.)
+the interrupt came from user mode, then the interrupt handler prologue
+will switch back to the per-thread stack.  If software wants to allow
+nested IST interrupts then the handler must adjust the IST values on
+entry to and exit from the interrupt handler.  (This is occasionally
+done, e.g. for debug exceptions.)
 
 Events with different IST codes (i.e. with different stacks) can be
 nested.  For example, a debug interrupt can safely be interrupted by an
index 2ebb056cbe0a3032eb91f7f773b40bd033f27fe7..d66a97dd3a12548e0f79c59154260724da11f282 100644 (file)
@@ -708,6 +708,16 @@ X: drivers/iio/*/adjd*
 F:     drivers/staging/iio/*/ad*
 F:     staging/iio/trigger/iio-trig-bfin-timer.c
 
+ANDROID DRIVERS
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M:     Arve HjønnevÃ¥g <arve@android.com>
+M:     Riley Andrews <riandrews@android.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
+L:     devel@driverdev.osuosl.org
+S:     Supported
+F:     drivers/android/
+F:     drivers/staging/android/
+
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linuxppc-dev@lists.ozlabs.org
@@ -4943,6 +4953,16 @@ F:       Documentation/input/multi-touch-protocol.txt
 F:     drivers/input/input-mt.c
 K:     \b(ABS|SYN)_MT_
 
+INTEL ASoC BDW/HSW DRIVERS
+M:     Jie Yang <yang.jie@linux.intel.com>
+L:     alsa-devel@alsa-project.org
+S:     Supported
+F:     sound/soc/intel/sst-haswell*
+F:     sound/soc/intel/sst-dsp*
+F:     sound/soc/intel/sst-firmware.c
+F:     sound/soc/intel/broadwell.c
+F:     sound/soc/intel/haswell.c
+
 INTEL C600 SERIES SAS CONTROLLER DRIVER
 M:     Intel SCU Linux support <intel-linux-scu@intel.com>
 M:     Artur Paszkiewicz <artur.paszkiewicz@intel.com>
@@ -9231,7 +9251,6 @@ F:        drivers/net/ethernet/dlink/sundance.c
 
 SUPERH
 L:     linux-sh@vger.kernel.org
-W:     http://www.linux-sh.org
 Q:     http://patchwork.kernel.org/project/linux-sh/list/
 S:     Orphan
 F:     Documentation/sh/
@@ -10166,6 +10185,7 @@ USERSPACE I/O (UIO)
 M:     "Hans J. Koch" <hjk@hansjkoch.de>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
 F:     Documentation/DocBook/uio-howto.tmpl
 F:     drivers/uio/
 F:     include/linux/uio*.h
index 95a0e827ecd30a40950643be5f2d516aac17b2f8..b15036b1890cae7031b56c4718edbd5baddfacbb 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
index 98838a05ba6d89f0459742131010f57c38cbed05..9d0ac091a52a7d16cf1f78f402ab48c511924a24 100644 (file)
@@ -156,6 +156,8 @@ retry:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 6f7e3a68803a097461d27ace9fa940ba3211ab6b..563cb27e37f55f3f99badc9b99e64aca6ee397b7 100644 (file)
@@ -161,6 +161,8 @@ good_area:
 
        if (fault & VM_FAULT_OOM)
                goto out_of_memory;
+       else if (fault & VM_FAULT_SIGSEGV)
+               goto bad_area;
        else if (fault & VM_FAULT_SIGBUS)
                goto do_sigbus;
 
index 68be9017593df10f235ca228a6837aaa11f5c818..132c70e2d2f11cfb4e655ddcb05931be5bd03498 100644 (file)
@@ -263,16 +263,37 @@ restart:  adr     r0, LC0
                 * OK... Let's do some funky business here.
                 * If we do have a DTB appended to zImage, and we do have
                 * an ATAG list around, we want the later to be translated
-                * and folded into the former here.  To be on the safe side,
-                * let's temporarily move  the stack away into the malloc
-                * area.  No GOT fixup has occurred yet, but none of the
-                * code we're about to call uses any global variable.
+                * and folded into the former here. No GOT fixup has occurred
+                * yet, but none of the code we're about to call uses any
+                * global variable.
                */
-               add     sp, sp, #0x10000
+
+               /* Get the initial DTB size */
+               ldr     r5, [r6, #4]
+#ifndef __ARMEB__
+               /* convert to little endian */
+               eor     r1, r5, r5, ror #16
+               bic     r1, r1, #0x00ff0000
+               mov     r5, r5, ror #8
+               eor     r5, r5, r1, lsr #8
+#endif
+               /* 50% DTB growth should be good enough */
+               add     r5, r5, r5, lsr #1
+               /* preserve 64-bit alignment */
+               add     r5, r5, #7
+               bic     r5, r5, #7
+               /* clamp to 32KB min and 1MB max */
+               cmp     r5, #(1 << 15)
+               movlo   r5, #(1 << 15)
+               cmp     r5, #(1 << 20)
+               movhi   r5, #(1 << 20)
+               /* temporarily relocate the stack past the DTB work space */
+               add     sp, sp, r5
+
                stmfd   sp!, {r0-r3, ip, lr}
                mov     r0, r8
                mov     r1, r6
-               sub     r2, sp, r6
+               mov     r2, r5
                bl      atags_to_fdt
 
                /*
@@ -285,11 +306,11 @@ restart:  adr     r0, LC0
                bic     r0, r0, #1
                add     r0, r0, #0x100
                mov     r1, r6
-               sub     r2, sp, r6
+               mov     r2, r5
                bleq    atags_to_fdt
 
                ldmfd   sp!, {r0-r3, ip, lr}
-               sub     sp, sp, #0x10000
+               sub     sp, sp, r5
 #endif
 
                mov     r8, r6                  @ use the appended device tree
@@ -306,7 +327,7 @@ restart:    adr     r0, LC0
                subs    r1, r5, r1
                addhi   r9, r9, r1
 
-               /* Get the dtb's size */
+               /* Get the current DTB size */
                ldr     r5, [r6, #4]
 #ifndef __ARMEB__
                /* convert r5 (dtb size) to little endian */
index b8168f1f8139baf3a1420ba17061ad18dfa41151..24ff27049ce015bf6c1203b73d97bfaa12ff0e37 100644 (file)
        };
 
        i2s1: i2s@13960000 {
-               compatible = "samsung,s5pv210-i2s";
+               compatible = "samsung,s3c6410-i2s";
                reg = <0x13960000 0x100>;
                clocks = <&clock CLK_I2S1>;
                clock-names = "iis";
        };
 
        i2s2: i2s@13970000 {
-               compatible = "samsung,s5pv210-i2s";
+               compatible = "samsung,s3c6410-i2s";
                reg = <0x13970000 0x100>;
                clocks = <&clock CLK_I2S2>;
                clock-names = "iis";
index 7b4099fcf81788714def505ff009e2b7a4948db2..d5c4669224b1734178f1a04efd42e9d5ec9cda27 100644 (file)
 
        aliases {
                ethernet0 = &emac;
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
-               serial6 = &uart6;
-               serial7 = &uart7;
        };
 
        chosen {
                                 <&ahb_gates 44>;
                        status = "disabled";
                };
+
+               framebuffer@1 {
+                       compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
+                       allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
+                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
+                                <&ahb_gates 44>, <&ahb_gates 46>;
+                       status = "disabled";
+               };
        };
 
        cpus {
                        reg-names = "phy_ctrl", "pmu1", "pmu2";
                        clocks = <&usb_clk 8>;
                        clock-names = "usb_phy";
-                       resets = <&usb_clk 1>, <&usb_clk 2>;
-                       reset-names = "usb1_reset", "usb2_reset";
+                       resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
+                       reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
                        status = "disabled";
                };
 
index fe3c559ca6a8e6d24412abe6d2a81c2a6b97bdda..bfa742817690d823d2044d3e6397f2237abb8cd9 100644 (file)
        model = "Olimex A10s-Olinuxino Micro";
        compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart2;
+               serial2 = &uart3;
+       };
+
        soc@01c00000 {
                emac: ethernet@01c0b000 {
                        pinctrl-names = "default";
index 1b76667f3182694ffb7d055cb8f8e7230809a0bf..2e7d8263799d77fe864a4776285844deb6a21fa3 100644 (file)
 
        aliases {
                ethernet0 = &emac;
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
        };
 
        chosen {
                        reg-names = "phy_ctrl", "pmu1";
                        clocks = <&usb_clk 8>;
                        clock-names = "usb_phy";
-                       resets = <&usb_clk 1>;
-                       reset-names = "usb1_reset";
+                       resets = <&usb_clk 0>, <&usb_clk 1>;
+                       reset-names = "usb0_reset", "usb1_reset";
                        status = "disabled";
                };
 
index eeed1f236ee8c400465cbe87824516462dc6ec63..c7be3abd9fcc31a9ab3280b0a7b4a01ba73880b4 100644 (file)
        model = "HSG H702";
        compatible = "hsg,h702", "allwinner,sun5i-a13";
 
+       aliases {
+               serial0 = &uart1;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index 916ee8bb826f7186380ecaf967d55488f84db6aa..3decefb3c37ac438c27574e7aed940fa2a684ea8 100644 (file)
        model = "Olimex A13-Olinuxino Micro";
        compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
 
+       aliases {
+               serial0 = &uart1;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index e31d291d14cbcd22add60622a9a2e83b8206ea11..b421f7fa197b475f7e0e3e23637c1acf6a7624ec 100644 (file)
        model = "Olimex A13-Olinuxino";
        compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
 
+       aliases {
+               serial0 = &uart1;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index c35217ea1f6473653b4d67ce953f3c761fa043a0..c556688f8b8ba400a7bef986690e40a633f836e2 100644 (file)
 / {
        interrupt-parent = <&intc>;
 
-       aliases {
-               serial0 = &uart1;
-               serial1 = &uart3;
-       };
-
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
                        reg-names = "phy_ctrl", "pmu1";
                        clocks = <&usb_clk 8>;
                        clock-names = "usb_phy";
-                       resets = <&usb_clk 1>;
-                       reset-names = "usb1_reset";
+                       resets = <&usb_clk 0>, <&usb_clk 1>;
+                       reset-names = "usb0_reset", "usb1_reset";
                        status = "disabled";
                };
 
index f47156b6572bbaf09872686b70f09268200c3cc9..1e7e7bcf83071f005b2fd57c5fc8112432104f35 100644 (file)
        interrupt-parent = <&gic>;
 
        aliases {
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
                ethernet0 = &gmac;
        };
 
index 1cf1214cc068f2e197335cfbfc10d1bf9054636d..bd7b15add6972d26fbcffdfa010fe3435141c0b5 100644 (file)
        model = "LeMaker Banana Pi";
        compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart3;
+               serial2 = &uart7;
+       };
+
        soc@01c00000 {
                spi0: spi@01c05000 {
                        pinctrl-names = "default";
index 0e4bfa3b2b8540b361b07f9cf1c943c4cba8a9c6..0bcefcbbb756e0b7d7e8ad7450413e0c01c2a273 100644 (file)
        model = "Merrii A20 Hummingbird";
        compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart2;
+               serial2 = &uart3;
+               serial3 = &uart4;
+               serial4 = &uart5;
+       };
+
        soc@01c00000 {
                mmc0: mmc@01c0f000 {
                        pinctrl-names = "default";
index 9d669cdf031d1aa1ea78c2d8a7c23b713dff3f98..66cc7770719867d8331b30b296611a1921bfac9b 100644 (file)
@@ -20,6 +20,9 @@
        compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
 
        aliases {
+               serial0 = &uart0;
+               serial1 = &uart6;
+               serial2 = &uart7;
                spi0 = &spi1;
                spi1 = &spi2;
        };
index e21ce5992d565c348ae3b798ad232cd8f6c16c6b..89749ce34a844ef8777649a712232631534467d1 100644 (file)
 
        aliases {
                ethernet0 = &gmac;
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
-               serial6 = &uart6;
-               serial7 = &uart7;
        };
 
        chosen {
index 7f2117ce6985b1de46cfd2fbfbc42fb76e19a1d7..32ad80804dbbc3d31adb703b373c62e9cf597beb 100644 (file)
        model = "Ippo Q8H Dual Core Tablet (v5)";
        compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
 
+       aliases {
+               serial0 = &r_uart;
+       };
+
        chosen {
                bootargs = "earlyprintk console=ttyS0,115200";
        };
index 0746cd1024d7a73b32bcbf6002954a4859d77ee5..86584fcf5e323bc43489c79be74e43ef8931d3a2 100644 (file)
 / {
        interrupt-parent = <&gic>;
 
-       aliases {
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &r_uart;
-       };
-
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
index 506948f582eee37a11fdb17ea5a7f5eb7277b390..11ec71072e815ac5b4e54a3db3cace2ee81b78b8 100644 (file)
        model = "Merrii A80 Optimus Board";
        compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
 
+       aliases {
+               serial0 = &uart0;
+               serial1 = &uart4;
+       };
+
        chosen {
                bootargs = "earlyprintk console=ttyS0,115200";
        };
index 494714f67b57821816f1b484659765aca58b8eb0..9ef4438206a9986ed82b733ebeaf961f474b742a 100644 (file)
 / {
        interrupt-parent = <&gic>;
 
-       aliases {
-               serial0 = &uart0;
-               serial1 = &uart1;
-               serial2 = &uart2;
-               serial3 = &uart3;
-               serial4 = &uart4;
-               serial5 = &uart5;
-               serial6 = &r_uart;
-       };
-
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
index 66ce17655bb9e29a73f58ebd9a4716735fcf9739..7b0152321b20baa3178d6f9135f12cb62a27987f 100644 (file)
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
        vcpu->arch.hcr = HCR_GUEST_MASK;
 }
 
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.hcr;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+       vcpu->arch.hcr = hcr;
+}
+
 static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
 {
        return 1;
index 254e0650e48bbc1d07a81d091c8e33570f983851..04b4ea0b550a111811876369bce25358f4d1965e 100644 (file)
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
         * Anything that is not used directly from assembly code goes
         * here.
         */
-       /* dcache set/way operation pending */
-       int last_pcpu;
-       cpumask_t require_dcache_flush;
 
        /* Don't run the guest on this vcpu */
        bool pause;
index 63e0ecc0490180e8b8b5e548b6b6a17b95b725e1..1bca8f8af4424154d69289bb32793a42ff223d30 100644 (file)
@@ -44,6 +44,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/highmem.h>
 #include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
        return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
 }
 
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-                                            unsigned long size,
-                                            bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                              unsigned long size,
+                                              bool ipa_uncached)
 {
-       if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
-               kvm_flush_dcache_to_poc((void *)hva, size);
-       
        /*
         * If we are going to insert an instruction page and the icache is
         * either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
         *
         * VIVT caches are tagged using both the ASID and the VMID and doesn't
         * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+        *
+        * We need to do this through a kernel mapping (using the
+        * user-space mapping has proved to be the wrong
+        * solution). For that, we need to kmap one page at a time,
+        * and iterate over the range.
         */
-       if (icache_is_pipt()) {
-               __cpuc_coherent_user_range(hva, hva + size);
-       } else if (!icache_is_vivt_asid_tagged()) {
+
+       bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+
+       VM_BUG_ON(size & PAGE_MASK);
+
+       if (!need_flush && !icache_is_pipt())
+               goto vipt_cache;
+
+       while (size) {
+               void *va = kmap_atomic_pfn(pfn);
+
+               if (need_flush)
+                       kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+               if (icache_is_pipt())
+                       __cpuc_coherent_user_range((unsigned long)va,
+                                                  (unsigned long)va + PAGE_SIZE);
+
+               size -= PAGE_SIZE;
+               pfn++;
+
+               kunmap_atomic(va);
+       }
+
+vipt_cache:
+       if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
                /* any kind of VIPT cache */
                __flush_icache_all();
        }
 }
 
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+       void *va = kmap_atomic(pte_page(pte));
+
+       kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+       kunmap_atomic(va);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       unsigned long size = PMD_SIZE;
+       pfn_t pfn = pmd_pfn(pmd);
+
+       while (size) {
+               void *va = kmap_atomic_pfn(pfn);
+
+               kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+               pfn++;
+               size -= PAGE_SIZE;
+
+               kunmap_atomic(va);
+       }
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+}
+
 #define kvm_virt_to_phys(x)            virt_to_idmap((unsigned long)(x))
 
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 
 #endif /* !__ASSEMBLY__ */
 
index 2260f1855820fa2d2961025b2683c2e30982a8e0..8944f4991c3cfd4e76585cdc11b78e3c3b2257d2 100644 (file)
 
 __invalid_entry:
        v7m_exception_entry
+#ifdef CONFIG_PRINTK
        adr     r0, strerr
        mrs     r1, ipsr
        mov     r2, lr
        bl      printk
+#endif
        mov     r0, sp
        bl      show_regs
 1:     b       1b
index 466bd299b1a8aad54949364d976d9c5430c2375e..3afee5f40f4f1d7ff6d10b6a95b1b6434415c63e 100644 (file)
@@ -23,6 +23,7 @@ config KVM
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_ARM_HOST
+       select SRCU
        depends on ARM_VIRT_EXT && ARM_LPAE
        ---help---
          Support hosting virtualized guest machines. You will also
index 2d6d91001062f975981dd9beed2b43815f73c238..0b0d58a905c43ba05afc61ca06341bfab348b528 100644 (file)
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        vcpu->cpu = cpu;
        vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 
-       /*
-        * Check whether this vcpu requires the cache to be flushed on
-        * this physical CPU. This is a consequence of doing dcache
-        * operations by set/way on this vcpu. We do it here to be in
-        * a non-preemptible section.
-        */
-       if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
-               flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
-
        kvm_arm_set_running_vcpu(vcpu);
 }
 
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 
                vcpu->mode = OUTSIDE_GUEST_MODE;
-               vcpu->arch.last_pcpu = smp_processor_id();
                kvm_guest_exit();
                trace_kvm_exit(*vcpu_pc(vcpu));
                /*
index 7928dbdf210239a71f4f35e8ef289e9c2e8f0375..f3d88dc388bc560778d49dacfded70aebde44a89 100644 (file)
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
        return true;
 }
 
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct coproc_params *p,
                        const struct coproc_reg *r)
 {
-       unsigned long val;
-       int cpu;
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
-       cpu = get_cpu();
-
-       cpumask_setall(&vcpu->arch.require_dcache_flush);
-       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
-       /* If we were already preempted, take the long way around */
-       if (cpu != vcpu->arch.last_pcpu) {
-               flush_cache_all();
-               goto done;
-       }
-
-       val = *vcpu_reg(vcpu, p->Rt1);
-
-       switch (p->CRm) {
-       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
-       case 14:                /* DCCISW */
-               asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
-               break;
-
-       case 10:                /* DCCSW */
-               asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
-               break;
-       }
-
-done:
-       put_cpu();
-
+       kvm_set_way_flush(vcpu);
        return true;
 }
 
 /*
  * Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set.  If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
+ *
+ * Used by the cpu-specific code.
  */
-static bool access_vm_reg(struct kvm_vcpu *vcpu,
-                         const struct coproc_params *p,
-                         const struct coproc_reg *r)
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+                  const struct coproc_params *p,
+                  const struct coproc_reg *r)
 {
+       bool was_enabled = vcpu_has_cache_enabled(vcpu);
+
        BUG_ON(!p->is_write);
 
        vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
        if (p->is_64bit)
                vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
 
-       return true;
-}
-
-/*
- * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- *
- * Used by the cpu-specific code.
- */
-bool access_sctlr(struct kvm_vcpu *vcpu,
-                 const struct coproc_params *p,
-                 const struct coproc_reg *r)
-{
-       access_vm_reg(vcpu, p, r);
-
-       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
-               vcpu->arch.hcr &= ~HCR_TVM;
-               stage2_flush_vm(vcpu->kvm);
-       }
-
+       kvm_toggle_cache(vcpu, was_enabled);
        return true;
 }
 
index 1a44bbe39643f519ec986d43dcd3e416881d13a9..88d24a3a977812b512e8e0c03144fec796727f8b 100644 (file)
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
 #define is64           .is_64 = true
 #define is32           .is_64 = false
 
-bool access_sctlr(struct kvm_vcpu *vcpu,
-                 const struct coproc_params *p,
-                 const struct coproc_reg *r);
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+                  const struct coproc_params *p,
+                  const struct coproc_reg *r);
 
 #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
index e6f4ae48bda968f8cac7caf6c94ffd1413631436..a7136757d3731534fd169e06938c74b85803cdfc 100644 (file)
@@ -34,7 +34,7 @@
 static const struct coproc_reg a15_regs[] = {
        /* SCTLR: swapped by interrupt.S. */
        { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
-                       access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+                       access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
 };
 
 static struct kvm_coproc_target_table a15_target_table = {
index 17fc7cd479d3e75d322c207ffa35d42f8785d7ee..b19e46d1b2c08187cc70d4e86742ea4a470ec83f 100644 (file)
@@ -37,7 +37,7 @@
 static const struct coproc_reg a7_regs[] = {
        /* SCTLR: swapped by interrupt.S. */
        { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
-                       access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+                       access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
 };
 
 static struct kvm_coproc_target_table a7_target_table = {
index 1dc9778a00af358431bbed021ba2d5244642debb..136662547ca6298f0fd5b6f2c73c7aea557ff472 100644 (file)
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
                kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
+/*
+ * D-Cache management functions. They take the page table entries by
+ * value, as they are flushing the cache using the kernel mapping (or
+ * kmap on 32bit).
+ */
+static void kvm_flush_dcache_pte(pte_t pte)
+{
+       __kvm_flush_dcache_pte(pte);
+}
+
+static void kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       __kvm_flush_dcache_pmd(pmd);
+}
+
+static void kvm_flush_dcache_pud(pud_t pud)
+{
+       __kvm_flush_dcache_pud(pud);
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
                                  int min, int max)
 {
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
        put_page(virt_to_page(pmd));
 }
 
+/*
+ * Unmapping vs dcache management:
+ *
+ * If a guest maps certain memory pages as uncached, all writes will
+ * bypass the data cache and go directly to RAM.  However, the CPUs
+ * can still speculate reads (not writes) and fill cache lines with
+ * data.
+ *
+ * Those cache lines will be *clean* cache lines though, so a
+ * clean+invalidate operation is equivalent to an invalidate
+ * operation, because no cache lines are marked dirty.
+ *
+ * Those clean cache lines could be filled prior to an uncached write
+ * by the guest, and the cache coherent IO subsystem would therefore
+ * end up writing old data to disk.
+ *
+ * This is why right after unmapping a page/section and invalidating
+ * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
+ * the IO subsystem will never hit in the cache.
+ */
 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
                       phys_addr_t addr, phys_addr_t end)
 {
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
        start_pte = pte = pte_offset_kernel(pmd, addr);
        do {
                if (!pte_none(*pte)) {
+                       pte_t old_pte = *pte;
+
                        kvm_set_pte(pte, __pte(0));
-                       put_page(virt_to_page(pte));
                        kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                       /* No need to invalidate the cache for device mappings */
+                       if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                               kvm_flush_dcache_pte(old_pte);
+
+                       put_page(virt_to_page(pte));
                }
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
                next = kvm_pmd_addr_end(addr, end);
                if (!pmd_none(*pmd)) {
                        if (kvm_pmd_huge(*pmd)) {
+                               pmd_t old_pmd = *pmd;
+
                                pmd_clear(pmd);
                                kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                               kvm_flush_dcache_pmd(old_pmd);
+
                                put_page(virt_to_page(pmd));
                        } else {
                                unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
                next = kvm_pud_addr_end(addr, end);
                if (!pud_none(*pud)) {
                        if (pud_huge(*pud)) {
+                               pud_t old_pud = *pud;
+
                                pud_clear(pud);
                                kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                               kvm_flush_dcache_pud(old_pud);
+
                                put_page(virt_to_page(pud));
                        } else {
                                unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               if (!pte_none(*pte)) {
-                       hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                       kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
-               }
+               if (!pte_none(*pte) &&
+                   (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                       kvm_flush_dcache_pte(*pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
        do {
                next = kvm_pmd_addr_end(addr, end);
                if (!pmd_none(*pmd)) {
-                       if (kvm_pmd_huge(*pmd)) {
-                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                               kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
-                       } else {
+                       if (kvm_pmd_huge(*pmd))
+                               kvm_flush_dcache_pmd(*pmd);
+                       else
                                stage2_flush_ptes(kvm, pmd, addr, next);
-                       }
                }
        } while (pmd++, addr = next, addr != end);
 }
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
        do {
                next = kvm_pud_addr_end(addr, end);
                if (!pud_none(*pud)) {
-                       if (pud_huge(*pud)) {
-                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                               kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
-                       } else {
+                       if (pud_huge(*pud))
+                               kvm_flush_dcache_pud(*pud);
+                       else
                                stage2_flush_pmds(kvm, pud, addr, next);
-                       }
                }
        } while (pud++, addr = next, addr != end);
 }
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
  * Go through the stage 2 page tables and invalidate any cache lines
  * backing memory already mapped to the VM.
  */
-void stage2_flush_vm(struct kvm *kvm)
+static void stage2_flush_vm(struct kvm *kvm)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
        return !pfn_valid(pfn);
 }
 
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                     unsigned long size, bool uncached)
+{
+       __coherent_cache_guest_page(vcpu, pfn, size, uncached);
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
                          unsigned long fault_status)
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pmd_writable(&new_pmd);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
-                                         fault_ipa_uncached);
+               coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pte_writable(&new_pte);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
-                                         fault_ipa_uncached);
+               coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
                ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
                        pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
        }
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        unmap_stage2_range(kvm, gpa, size);
        spin_unlock(&kvm->mmu_lock);
 }
+
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ *
+ * Main problems:
+ * - S/W ops are local to a CPU (not broadcast)
+ * - We have line migration behind our back (speculation)
+ * - System caches don't support S/W at all (damn!)
+ *
+ * In the face of the above, the best we can do is to try and convert
+ * S/W ops to VA ops. Because the guest is not allowed to infer the
+ * S/W to PA mapping, it can only use S/W to nuke the whole cache,
+ * which is a rather good thing for us.
+ *
+ * Also, it is only used when turning caches on/off ("The expected
+ * usage of the cache maintenance instructions that operate by set/way
+ * is associated with the cache maintenance instructions associated
+ * with the powerdown and powerup of caches, if this is required by
+ * the implementation.").
+ *
+ * We use the following policy:
+ *
+ * - If we trap a S/W operation, we enable VM trapping to detect
+ *   caches being turned on/off, and do a full clean.
+ *
+ * - We flush the caches on both caches being turned on and off.
+ *
+ * - Once the caches are enabled, we stop trapping VM ops.
+ */
+void kvm_set_way_flush(struct kvm_vcpu *vcpu)
+{
+       unsigned long hcr = vcpu_get_hcr(vcpu);
+
+       /*
+        * If this is the first time we do a S/W operation
+        * (i.e. HCR_TVM not set) flush the whole memory, and set the
+        * VM trapping.
+        *
+        * Otherwise, rely on the VM trapping to wait for the MMU +
+        * Caches to be turned off. At that point, we'll be able to
+        * clean the caches again.
+        */
+       if (!(hcr & HCR_TVM)) {
+               trace_kvm_set_way_flush(*vcpu_pc(vcpu),
+                                       vcpu_has_cache_enabled(vcpu));
+               stage2_flush_vm(vcpu->kvm);
+               vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+       }
+}
+
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
+{
+       bool now_enabled = vcpu_has_cache_enabled(vcpu);
+
+       /*
+        * If switching the MMU+caches on, need to invalidate the caches.
+        * If switching it off, need to clean the caches.
+        * Clean + invalidate does the trick always.
+        */
+       if (now_enabled != was_enabled)
+               stage2_flush_vm(vcpu->kvm);
+
+       /* Caches are now on, stop trapping VM ops (until a S/W op) */
+       if (now_enabled)
+               vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+
+       trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
+}
index b1d640f78623971337ad08072efdcc981c124c0f..b6a6e71022010bf09ae9b52aad981e745c67ef74 100644 (file)
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
                  __entry->vcpu_pc, __entry->r0, __entry->imm)
 );
 
+TRACE_EVENT(kvm_set_way_flush,
+           TP_PROTO(unsigned long vcpu_pc, bool cache),
+           TP_ARGS(vcpu_pc, cache),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           cache           )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->cache              = cache;
+           ),
+
+           TP_printk("S/W flush at 0x%016lx (cache %s)",
+                     __entry->vcpu_pc, __entry->cache ? "on" : "off")
+);
+
+TRACE_EVENT(kvm_toggle_cache,
+           TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
+           TP_ARGS(vcpu_pc, was, now),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           was             )
+                   __field(    bool,           now             )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->was                = was;
+                   __entry->now                = now;
+           ),
+
+           TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
+                     __entry->vcpu_pc, __entry->was ? "on" : "off",
+                     __entry->now ? "on" : "off")
+);
+
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
index caa21e9b8cd9819de5ff0c0e505a8a937940dfae..ccef8806bb58771b16a87dc80edc32e585597d29 100644 (file)
@@ -189,6 +189,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
        coherency_cpu_base = of_iomap(np, 0);
        arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
 
+       /*
+        * We should switch the PL310 to I/O coherency mode only if
+        * I/O coherency is actually enabled.
+        */
+       if (!coherency_available())
+               return;
+
        /*
         * Add the PL310 property "arm,io-coherent". This makes sure the
         * outer sync operation is not used, which allows to
index 66f67816a844623977a4595ef23642ed381b3549..444f22d370f0ef7a4e35957abfc3d458e3d7e49e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/kernel.h>
 #include <linux/mfd/tmio.h>
 #include <linux/mmc/host.h>
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
                                      sizeof(ape6evm_leds_pdata));
 }
 
+static void __init ape6evm_legacy_init_time(void)
+{
+       /* Do not invoke DT-based timers via clocksource_of_init() */
+}
+
+static void __init ape6evm_legacy_init_irq(void)
+{
+       void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
+
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+
+       /* Do not invoke DT-based interrupt code via irqchip_init() */
+}
+
+
 static const char *ape6evm_boards_compat_dt[] __initdata = {
        "renesas,ape6evm",
        NULL,
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
 
 DT_MACHINE_START(APE6EVM_DT, "ape6evm")
        .init_early     = shmobile_init_delay,
+       .init_irq       = ape6evm_legacy_init_irq,
        .init_machine   = ape6evm_add_standard_devices,
        .init_late      = shmobile_init_late,
        .dt_compat      = ape6evm_boards_compat_dt,
+       .init_time      = ape6evm_legacy_init_time,
 MACHINE_END
index f8197eb6e5669ada1b8ddd57e7bb09e42bedfe62..65b128dd4072b8070fb1bb65edd97b7abb4c3d5e 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
 #include <linux/kernel.h>
 #include <linux/leds.h>
 #include <linux/mfd/tmio.h>
@@ -811,6 +813,16 @@ static void __init lager_init(void)
                                          lager_ksz8041_fixup);
 }
 
+static void __init lager_legacy_init_irq(void)
+{
+       void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
+       void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
+
+       gic_init(0, 29, gic_dist_base, gic_cpu_base);
+
+       /* Do not invoke DT-based interrupt code via irqchip_init() */
+}
+
 static const char * const lager_boards_compat_dt[] __initconst = {
        "renesas,lager",
        NULL,
@@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
 DT_MACHINE_START(LAGER_DT, "lager")
        .smp            = smp_ops(r8a7790_smp_ops),
        .init_early     = shmobile_init_delay,
+       .init_irq       = lager_legacy_init_irq,
        .init_time      = rcar_gen2_timer_init,
        .init_machine   = lager_init,
        .init_late      = shmobile_init_late,
index 3dd6edd9bd1d3dfe5b6bb7787e4d672c1a6f63be..cc9470dfb1cee51eb50d0597603c6a195d67af5f 100644 (file)
@@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
 #ifdef CONFIG_COMMON_CLK
        rcar_gen2_clocks_init(mode);
 #endif
+#ifdef CONFIG_ARCH_SHMOBILE_MULTI
        clocksource_of_init();
+#endif
 }
 
 struct memory_reserve_config {
index f1d027aa7a81ac3361401380553771bdb37d47d2..0edf2a6d2bbef7f78fcb8f5c0396445dde21618c 100644 (file)
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
        if (!max_freq)
                return;
 
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+       /* Non-multiplatform r8a73a4 SoC cannot use arch timer due
+        * to GIC being initialized from C and arch timer via DT */
+       if (of_machine_is_compatible("renesas,r8a73a4"))
+               has_arch_timer = false;
+
+       /* Non-multiplatform r8a7790 SoC cannot use arch timer due
+        * to GIC being initialized from C and arch timer via DT */
+       if (of_machine_is_compatible("renesas,r8a7790"))
+               has_arch_timer = false;
+#endif
+
        if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
                if (is_a7_a8_a9)
                        shmobile_setup_delay_hz(max_freq, 1, 3);
index 03823e784f63e7acf91fdda4d5f58927ccf527d9..c43c714555661337048b72a5a21a6b5357659567 100644 (file)
@@ -1012,6 +1012,7 @@ config ARCH_SUPPORTS_BIG_ENDIAN
 
 config ARM_KERNMEM_PERMS
        bool "Restrict kernel memory permissions"
+       depends on MMU
        help
          If this is set, kernel memory other than kernel text (and rodata)
          will be made non-executable. The tradeoff is that each region is
index 91892569710f5ab79127218e808e84ef14ef33eb..845769e413323120b6d7b4afed7640746413bc98 100644 (file)
@@ -144,21 +144,17 @@ static void flush_context(unsigned int cpu)
        /* Update the list of reserved ASIDs and the ASID bitmap. */
        bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
        for_each_possible_cpu(i) {
-               if (i == cpu) {
-                       asid = 0;
-               } else {
-                       asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
-                       /*
-                        * If this CPU has already been through a
-                        * rollover, but hasn't run another task in
-                        * the meantime, we must preserve its reserved
-                        * ASID, as this is the only trace we have of
-                        * the process it is still running.
-                        */
-                       if (asid == 0)
-                               asid = per_cpu(reserved_asids, i);
-                       __set_bit(asid & ~ASID_MASK, asid_map);
-               }
+               asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
+               /*
+                * If this CPU has already been through a
+                * rollover, but hasn't run another task in
+                * the meantime, we must preserve its reserved
+                * ASID, as this is the only trace we have of
+                * the process it is still running.
+                */
+               if (asid == 0)
+                       asid = per_cpu(reserved_asids, i);
+               __set_bit(asid & ~ASID_MASK, asid_map);
                per_cpu(reserved_asids, i) = asid;
        }
 
index 7864797609b3849628455782c79949f094f6997e..903dba064a034c7e5d9fff950d3fa334301130d9 100644 (file)
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
 }
 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
 
+static int __arm_iommu_attach_device(struct device *dev,
+                                    struct dma_iommu_mapping *mapping)
+{
+       int err;
+
+       err = iommu_attach_device(mapping->domain, dev);
+       if (err)
+               return err;
+
+       kref_get(&mapping->kref);
+       dev->archdata.mapping = mapping;
+
+       pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       return 0;
+}
+
 /**
  * arm_iommu_attach_device
  * @dev: valid struct device pointer
  * @mapping: io address space mapping structure (returned from
  *     arm_iommu_create_mapping)
  *
- * Attaches specified io address space mapping to the provided device,
+ * Attaches specified io address space mapping to the provided device.
+ * This replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version.
+ *
  * More than one client might be attached to the same io address space
  * mapping.
  */
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev,
 {
        int err;
 
-       err = iommu_attach_device(mapping->domain, dev);
+       err = __arm_iommu_attach_device(dev, mapping);
        if (err)
                return err;
 
-       kref_get(&mapping->kref);
-       dev->archdata.mapping = mapping;
-
-       pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+       set_dma_ops(dev, &iommu_ops);
        return 0;
 }
 EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
 
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- */
-void arm_iommu_detach_device(struct device *dev)
+static void __arm_iommu_detach_device(struct device *dev)
 {
        struct dma_iommu_mapping *mapping;
 
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
 
        pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
 }
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+       __arm_iommu_detach_device(dev);
+       set_dma_ops(dev, NULL);
+}
 EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
 
 static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
                return false;
        }
 
-       if (arm_iommu_attach_device(dev, mapping)) {
+       if (__arm_iommu_attach_device(dev, mapping)) {
                pr_warn("Failed to attached device %s to IOMMU_mapping\n",
                                dev_name(dev));
                arm_iommu_release_mapping(mapping);
@@ -2025,7 +2048,10 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
 {
        struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 
-       arm_iommu_detach_device(dev);
+       if (!mapping)
+               return;
+
+       __arm_iommu_detach_device(dev);
        arm_iommu_release_mapping(mapping);
 }
 
index 865a7e28ea2d166efc0f27911970fd480bce4c3d..3cb4c856b10da40a73c88138d97a2586b21eb6ff 100644 (file)
@@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 &= ~HCR_RW;
 }
 
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.hcr_el2;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+       vcpu->arch.hcr_el2 = hcr;
+}
+
 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 {
        return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
index 0b7dfdb931dff6f9610df181015ce7ecc1a7f16a..acd101a9014d374d2c235f5936a2b6a5c1d7c28f 100644 (file)
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
         * Anything that is not used directly from assembly code goes
         * here.
         */
-       /* dcache set/way operation pending */
-       int last_pcpu;
-       cpumask_t require_dcache_flush;
 
        /* Don't run the guest */
        bool pause;
index 14a74f136272b94852d86901ae75c4f329ee2ee3..adcf49547301b1acc598e72722752b36240220be 100644 (file)
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
        return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 }
 
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-                                            unsigned long size,
-                                            bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                              unsigned long size,
+                                              bool ipa_uncached)
 {
+       void *va = page_address(pfn_to_page(pfn));
+
        if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
-               kvm_flush_dcache_to_poc((void *)hva, size);
+               kvm_flush_dcache_to_poc(va, size);
 
        if (!icache_is_aliasing()) {            /* PIPT */
-               flush_icache_range(hva, hva + size);
+               flush_icache_range((unsigned long)va,
+                                  (unsigned long)va + size);
        } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
                /* any kind of VIPT cache */
                __flush_icache_all();
        }
 }
 
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+       struct page *page = pte_page(pte);
+       kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       struct page *page = pmd_page(pmd);
+       kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+       struct page *page = pud_page(pud);
+       kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
 #define kvm_virt_to_phys(x)            __virt_to_phys((unsigned long)(x))
 
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index 8ba85e9ea388d1778c54eabdd5ca34afad114faf..b334084d3675e33761ec618b9f776650c02f2251 100644 (file)
@@ -26,6 +26,7 @@ config KVM
        select KVM_ARM_HOST
        select KVM_ARM_VGIC
        select KVM_ARM_TIMER
+       select SRCU
        ---help---
          Support hosting virtualized guest machines.
 
index 3d7c2df89946cc1d1606a4b3401115f10e44ab71..f31e8bb2bc5bd0c7aec76e654fce52f5723e527b 100644 (file)
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
        return ccsidr;
 }
 
-static void do_dc_cisw(u32 val)
-{
-       asm volatile("dc cisw, %x0" : : "r" (val));
-       dsb(ish);
-}
-
-static void do_dc_csw(u32 val)
-{
-       asm volatile("dc csw, %x0" : : "r" (val));
-       dsb(ish);
-}
-
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct sys_reg_params *p,
                        const struct sys_reg_desc *r)
 {
-       unsigned long val;
-       int cpu;
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
-       cpu = get_cpu();
-
-       cpumask_setall(&vcpu->arch.require_dcache_flush);
-       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
-       /* If we were already preempted, take the long way around */
-       if (cpu != vcpu->arch.last_pcpu) {
-               flush_cache_all();
-               goto done;
-       }
-
-       val = *vcpu_reg(vcpu, p->Rt);
-
-       switch (p->CRm) {
-       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
-       case 14:                /* DCCISW */
-               do_dc_cisw(val);
-               break;
-
-       case 10:                /* DCCSW */
-               do_dc_csw(val);
-               break;
-       }
-
-done:
-       put_cpu();
-
+       kvm_set_way_flush(vcpu);
        return true;
 }
 
 /*
  * Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
  */
 static bool access_vm_reg(struct kvm_vcpu *vcpu,
                          const struct sys_reg_params *p,
                          const struct sys_reg_desc *r)
 {
        unsigned long val;
+       bool was_enabled = vcpu_has_cache_enabled(vcpu);
 
        BUG_ON(!p->is_write);
 
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
                vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
        }
 
-       return true;
-}
-
-/*
- * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set.  If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- */
-static bool access_sctlr(struct kvm_vcpu *vcpu,
-                        const struct sys_reg_params *p,
-                        const struct sys_reg_desc *r)
-{
-       access_vm_reg(vcpu, p, r);
-
-       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
-               vcpu->arch.hcr_el2 &= ~HCR_TVM;
-               stage2_flush_vm(vcpu->kvm);
-       }
-
+       kvm_toggle_cache(vcpu, was_enabled);
        return true;
 }
 
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
          NULL, reset_mpidr, MPIDR_EL1 },
        /* SCTLR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
-         access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+         access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
        /* CPACR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
          NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
  * register).
  */
 static const struct sys_reg_desc cp15_regs[] = {
-       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
index 0eca93327195077ec16bdfd99efd7294c6ab2de6..d223a8b57c1eaad282289e75089654153ab598d6 100644 (file)
@@ -142,6 +142,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 1790f22e71a21a859b2b7b1942cbbc503c2d557e..2686a7aa8ec82c50f29592840185b519522c53a7 100644 (file)
@@ -176,6 +176,8 @@ retry:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 9a66372fc7c76019ca874a9c3780c2fc8392266c..ec4917ddf67872aa46b60c6b067b0a67ec5417a4 100644 (file)
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 7225dad87094d81e89459e5a61909fa5b2d10ca0..ba5ba7accd0d6bb4dbab34f7fc307c4306347f4a 100644 (file)
@@ -172,6 +172,8 @@ retry:
                 */
                if (fault & VM_FAULT_OOM) {
                        goto out_of_memory;
+               } else if (fault & VM_FAULT_SIGSEGV) {
+                       goto bad_area;
                } else if (fault & VM_FAULT_SIGBUS) {
                        signal = SIGBUS;
                        goto bad_area;
index e9c6a8014bd647eec50a66afb5bc75b076b35e4d..e3d4d4890104cc27e2eb9de2f22cb6f53f939c90 100644 (file)
@@ -200,6 +200,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 2bd7487440c455802dac6470ec05ac138148bfcb..b2f04aee46ecc2f7a5fb1db26d8e4279f6b6ea2e 100644 (file)
@@ -145,6 +145,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto map_err;
                else if (fault & VM_FAULT_SIGBUS)
                        goto bus_err;
                BUG();
index 332680e5ebf23c7909b796c415c2273efd77ba3c..2de5dc695a87fa96d41a83e127166a7126d10df0 100644 (file)
@@ -141,6 +141,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index fa4cf52aa7a6d386711690005a314ece7d67fc53..d46a5ebb7570e07869ea03b9b995374aa3bff82e 100644 (file)
@@ -224,6 +224,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 3289969ee423a9edbe7fb12359f56cea2bd2be2a..843713c05b79fe69f8bbc057a17a5e200dfc54da 100644 (file)
@@ -2656,27 +2656,21 @@ config TRAD_SIGNALS
        bool
 
 config MIPS32_COMPAT
-       bool "Kernel support for Linux/MIPS 32-bit binary compatibility"
-       depends on 64BIT
-       help
-         Select this option if you want Linux/MIPS 32-bit binary
-         compatibility. Since all software available for Linux/MIPS is
-         currently 32-bit you should say Y here.
+       bool
 
 config COMPAT
        bool
-       depends on MIPS32_COMPAT
-       select ARCH_WANT_OLD_COMPAT_IPC
-       default y
 
 config SYSVIPC_COMPAT
        bool
-       depends on COMPAT && SYSVIPC
-       default y
 
 config MIPS32_O32
        bool "Kernel support for o32 binaries"
-       depends on MIPS32_COMPAT
+       depends on 64BIT
+       select ARCH_WANT_OLD_COMPAT_IPC
+       select COMPAT
+       select MIPS32_COMPAT
+       select SYSVIPC_COMPAT if SYSVIPC
        help
          Select this option if you want to run o32 binaries.  These are pure
          32-bit binaries as used by the 32-bit Linux/MIPS port.  Most of
@@ -2686,7 +2680,10 @@ config MIPS32_O32
 
 config MIPS32_N32
        bool "Kernel support for n32 binaries"
-       depends on MIPS32_COMPAT
+       depends on 64BIT
+       select COMPAT
+       select MIPS32_COMPAT
+       select SYSVIPC_COMPAT if SYSVIPC
        help
          Select this option if you want to run n32 binaries.  These are
          64-bit binaries using 32-bit quantities for addressing and certain
index 8585078ae50e90d0cf3bc87217b68cf55512dd23..2a4c52e27f416e146e5c268edad9fd867e79c5fe 100644 (file)
@@ -49,7 +49,8 @@
 /*
  * Some extra ELF definitions
  */
-#define PT_MIPS_REGINFO 0x70000000     /* Register usage information */
+#define PT_MIPS_REGINFO        0x70000000      /* Register usage information */
+#define PT_MIPS_ABIFLAGS       0x70000003      /* Records ABI related flags  */
 
 /* -------------------------------------------------------------------- */
 
@@ -349,39 +350,46 @@ int main(int argc, char *argv[])
 
        for (i = 0; i < ex.e_phnum; i++) {
                /* Section types we can ignore... */
-               if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE ||
-                   ph[i].p_type == PT_PHDR
-                   || ph[i].p_type == PT_MIPS_REGINFO)
+               switch (ph[i].p_type) {
+               case PT_NULL:
+               case PT_NOTE:
+               case PT_PHDR:
+               case PT_MIPS_REGINFO:
+               case PT_MIPS_ABIFLAGS:
                        continue;
-               /* Section types we can't handle... */
-               else if (ph[i].p_type != PT_LOAD) {
-                       fprintf(stderr,
-                               "Program header %d type %d can't be converted.\n",
-                               ex.e_phnum, ph[i].p_type);
-                       exit(1);
-               }
-               /* Writable (data) segment? */
-               if (ph[i].p_flags & PF_W) {
-                       struct sect ndata, nbss;
 
-                       ndata.vaddr = ph[i].p_vaddr;
-                       ndata.len = ph[i].p_filesz;
-                       nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
-                       nbss.len = ph[i].p_memsz - ph[i].p_filesz;
+               case PT_LOAD:
+                       /* Writable (data) segment? */
+                       if (ph[i].p_flags & PF_W) {
+                               struct sect ndata, nbss;
+
+                               ndata.vaddr = ph[i].p_vaddr;
+                               ndata.len = ph[i].p_filesz;
+                               nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz;
+                               nbss.len = ph[i].p_memsz - ph[i].p_filesz;
 
-                       combine(&data, &ndata, 0);
-                       combine(&bss, &nbss, 1);
-               } else {
-                       struct sect ntxt;
+                               combine(&data, &ndata, 0);
+                               combine(&bss, &nbss, 1);
+                       } else {
+                               struct sect ntxt;
 
-                       ntxt.vaddr = ph[i].p_vaddr;
-                       ntxt.len = ph[i].p_filesz;
+                               ntxt.vaddr = ph[i].p_vaddr;
+                               ntxt.len = ph[i].p_filesz;
 
-                       combine(&text, &ntxt, 0);
+                               combine(&text, &ntxt, 0);
+                       }
+                       /* Remember the lowest segment start address. */
+                       if (ph[i].p_vaddr < cur_vma)
+                               cur_vma = ph[i].p_vaddr;
+                       break;
+
+               default:
+                       /* Section types we can't handle... */
+                       fprintf(stderr,
+                               "Program header %d type %d can't be converted.\n",
+                               ex.e_phnum, ph[i].p_type);
+                       exit(1);
                }
-               /* Remember the lowest segment start address. */
-               if (ph[i].p_vaddr < cur_vma)
-                       cur_vma = ph[i].p_vaddr;
        }
 
        /* Sections must be in order to be converted... */
index ecd903dd1c456788db99adf1be0c6536a2a41bf4..8b1eeffa12edf0fbc2446ba81ab19750e5d49891 100644 (file)
@@ -240,9 +240,7 @@ static int octeon_cpu_disable(void)
 
        set_cpu_online(cpu, false);
        cpu_clear(cpu, cpu_callin_map);
-       local_irq_disable();
        octeon_fixup_irqs();
-       local_irq_enable();
 
        flush_cache_all();
        local_flush_tlb_all();
index f57b96dcf7df57894e4fcc6d582a2f8036670d33..61a4460d67d32b2c7d8d266f40c316c70aef5e7c 100644 (file)
@@ -132,7 +132,6 @@ CONFIG_IP_NF_MATCH_ECN=m
 CONFIG_IP_NF_MATCH_TTL=m
 CONFIG_IP_NF_FILTER=m
 CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_ULOG=m
 CONFIG_IP_NF_MANGLE=m
 CONFIG_IP_NF_TARGET_CLUSTERIP=m
 CONFIG_IP_NF_TARGET_ECN=m
@@ -175,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
 CONFIG_BRIDGE_EBT_REDIRECT=m
 CONFIG_BRIDGE_EBT_SNAT=m
 CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
 CONFIG_BRIDGE_EBT_NFLOG=m
 CONFIG_IP_SCTP=m
 CONFIG_BRIDGE=m
@@ -220,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_CLS_IND=y
 CONFIG_CFG80211=m
 CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
 CONFIG_MAC80211_MESH=y
 CONFIG_RFKILL=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -248,19 +244,13 @@ CONFIG_ATA_OVER_ETH=m
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECD=y
 CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_PIIX=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
 CONFIG_RAID_ATTRS=m
-CONFIG_SCSI=m
-CONFIG_BLK_DEV_SD=m
+CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
 CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_BLK_DEV_SR_VENDOR=y
 CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_SCSI_CONSTANTS=y
 CONFIG_SCSI_LOGGING=y
 CONFIG_SCSI_SCAN_ASYNC=y
@@ -273,6 +263,8 @@ CONFIG_SCSI_AACRAID=m
 CONFIG_SCSI_AIC7XXX=m
 CONFIG_AIC7XXX_RESET_DELAY_MS=15000
 # CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+CONFIG_ATA_PIIX=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
 CONFIG_MD_LINEAR=m
@@ -340,6 +332,7 @@ CONFIG_UIO=m
 CONFIG_UIO_CIF=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
 CONFIG_REISERFS_FS=m
 CONFIG_REISERFS_PROC_INFO=y
 CONFIG_REISERFS_FS_XATTR=y
@@ -441,4 +434,3 @@ CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
index 994d219396761317d8fe626ec01d671c595d2b00..affebb78f5d6573dbf97f62630ad1e6a35026602 100644 (file)
@@ -64,7 +64,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
                        return SIGFPE;
 
                /* set FRE */
-               write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE);
+               set_c0_config5(MIPS_CONF5_FRE);
                goto fr_common;
 
        case FPU_64BIT:
@@ -74,8 +74,10 @@ static inline int __enable_fpu(enum fpu_mode mode)
 #endif
                /* fall through */
        case FPU_32BIT:
-               /* clear FRE */
-               write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE);
+               if (cpu_has_fre) {
+                       /* clear FRE */
+                       clear_c0_config5(MIPS_CONF5_FRE);
+               }
 fr_common:
                /* set CU1 & change FR appropriately */
                fr = (int)mode & FPU_FR_MASK;
@@ -182,25 +184,32 @@ static inline int init_fpu(void)
        int ret = 0;
 
        if (cpu_has_fpu) {
+               unsigned int config5;
+
                ret = __own_fpu();
-               if (!ret) {
-                       unsigned int config5 = read_c0_config5();
-
-                       /*
-                        * Ensure FRE is clear whilst running _init_fpu, since
-                        * single precision FP instructions are used. If FRE
-                        * was set then we'll just end up initialising all 32
-                        * 64b registers.
-                        */
-                       write_c0_config5(config5 & ~MIPS_CONF5_FRE);
-                       enable_fpu_hazard();
+               if (ret)
+                       return ret;
 
+               if (!cpu_has_fre) {
                        _init_fpu();
 
-                       /* Restore FRE */
-                       write_c0_config5(config5);
-                       enable_fpu_hazard();
+                       return 0;
                }
+
+               /*
+                * Ensure FRE is clear whilst running _init_fpu, since
+                * single precision FP instructions are used. If FRE
+                * was set then we'll just end up initialising all 32
+                * 64b registers.
+                */
+               config5 = clear_c0_config5(MIPS_CONF5_FRE);
+               enable_fpu_hazard();
+
+               _init_fpu();
+
+               /* Restore FRE */
+               write_c0_config5(config5);
+               enable_fpu_hazard();
        } else
                fpu_emulator_init_fpu();
 
index f8d37d1df5de53cd091b696e1c5372878e38d000..9fac64a2635307846c8ddcc2aaf775390de56387 100644 (file)
@@ -119,7 +119,7 @@ union key_u {
 #define SGI_ARCS_REV   10                      /* rev .10, 3/04/92 */
 #endif
 
-typedef struct component {
+typedef struct {
        CONFIGCLASS     Class;
        CONFIGTYPE      Type;
        IDENTIFIERFLAG  Flags;
@@ -140,7 +140,7 @@ struct cfgdata {
 };
 
 /* System ID */
-typedef struct systemid {
+typedef struct {
        CHAR VendorId[8];
        CHAR ProductId[8];
 } SYSTEMID;
@@ -166,7 +166,7 @@ typedef enum memorytype {
 #endif /* _NT_PROM */
 } MEMORYTYPE;
 
-typedef struct memorydescriptor {
+typedef struct {
        MEMORYTYPE      Type;
        LONG            BasePage;
        LONG            PageCount;
index b95a827d763ee22427a5f87050598eaf8e1d9da6..59c0901bdd847c6c51450eb92f67839f5380d44a 100644 (file)
@@ -89,9 +89,9 @@ static inline bool mips_cm_has_l2sync(void)
 
 /* Macros to ease the creation of register access functions */
 #define BUILD_CM_R_(name, off)                                 \
-static inline u32 *addr_gcr_##name(void)                       \
+static inline u32 __iomem *addr_gcr_##name(void)               \
 {                                                              \
-       return (u32 *)(mips_cm_base + (off));                   \
+       return (u32 __iomem *)(mips_cm_base + (off));           \
 }                                                              \
                                                                \
 static inline u32 read_gcr_##name(void)                                \
index 5e4aef304b0217c239dc015dda4b9c8c348c7d89..5b720d8c2745b2e8b891f38c5256db82a1232bc5 100644 (file)
@@ -1386,12 +1386,27 @@ do {                                                                    \
        __res;                                                          \
 })
 
+#define _write_32bit_cp1_register(dest, val, gas_hardfloat)            \
+do {                                                                   \
+       __asm__ __volatile__(                                           \
+       "       .set    push                                    \n"     \
+       "       .set    reorder                                 \n"     \
+       "       "STR(gas_hardfloat)"                            \n"     \
+       "       ctc1    %0,"STR(dest)"                          \n"     \
+       "       .set    pop                                     \n"     \
+       : : "r" (val));                                                 \
+} while (0)
+
 #ifdef GAS_HAS_SET_HARDFLOAT
 #define read_32bit_cp1_register(source)                                        \
        _read_32bit_cp1_register(source, .set hardfloat)
+#define write_32bit_cp1_register(dest, val)                            \
+       _write_32bit_cp1_register(dest, val, .set hardfloat)
 #else
 #define read_32bit_cp1_register(source)                                        \
        _read_32bit_cp1_register(source, )
+#define write_32bit_cp1_register(dest, val)                            \
+       _write_32bit_cp1_register(dest, val, )
 #endif
 
 #ifdef HAVE_AS_DSP
index bb7963753730d817117f209e6bac10f4943a0b56..6499d93ae68d7096d63416a349d9afcdcc0cb3ae 100644 (file)
 static inline long syscall_get_nr(struct task_struct *task,
                                  struct pt_regs *regs)
 {
-       /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
-       if ((config_enabled(CONFIG_32BIT) ||
-           test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
-           (regs->regs[2] == __NR_syscall))
-               return regs->regs[4];
-       else
-               return regs->regs[2];
+       return current_thread_info()->syscall;
 }
 
 static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
index 99eea59604e984b61907a5a6a82feeca9660a609..e4440f92b366f7a1d90e4af10dd2477acdf0223a 100644 (file)
@@ -36,6 +36,7 @@ struct thread_info {
                                                 */
        struct restart_block    restart_block;
        struct pt_regs          *regs;
+       long                    syscall;        /* syscall number */
 };
 
 /*
index d001bb1ad177e7b6e2df2fbb7e42e894784b2e91..c03088f9f514e7c21f7ae0e185f8be0456af372b 100644 (file)
 #define __NR_getrandom                 (__NR_Linux + 353)
 #define __NR_memfd_create              (__NR_Linux + 354)
 #define __NR_bpf                       (__NR_Linux + 355)
+#define __NR_execveat                  (__NR_Linux + 356)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            355
+#define __NR_Linux_syscalls            356
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                355
+#define __NR_O32_Linux_syscalls                356
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_getrandom                 (__NR_Linux + 313)
 #define __NR_memfd_create              (__NR_Linux + 314)
 #define __NR_bpf                       (__NR_Linux + 315)
+#define __NR_execveat                  (__NR_Linux + 316)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            315
+#define __NR_Linux_syscalls            316
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         315
+#define __NR_64_Linux_syscalls         316
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_getrandom                 (__NR_Linux + 317)
 #define __NR_memfd_create              (__NR_Linux + 318)
 #define __NR_bpf                       (__NR_Linux + 319)
+#define __NR_execveat                  (__NR_Linux + 320)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            319
+#define __NR_Linux_syscalls            320
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                319
+#define __NR_N32_Linux_syscalls                320
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 2531da1d3add4d0af7dcc3a3d1cfb82ae4c61216..97206b3deb9777b963bf5f47cbb0b0de6d1e5bb4 100644 (file)
@@ -30,6 +30,9 @@
 #include <asm/irq_cpu.h>
 
 #include <asm/mach-jz4740/base.h>
+#include <asm/mach-jz4740/irq.h>
+
+#include "irq.h"
 
 static void __iomem *jz_intc_base;
 
index c92b15df6893f555549bf3cc51bf6c39f78e1875..a5b5b56485c1618c34af3daea2b67e795a0f8c2b 100644 (file)
@@ -19,8 +19,8 @@ enum {
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
                     bool is_interp, struct arch_elf_state *state)
 {
-       struct elfhdr *ehdr = _ehdr;
-       struct elf_phdr *phdr = _phdr;
+       struct elf32_hdr *ehdr = _ehdr;
+       struct elf32_phdr *phdr = _phdr;
        struct mips_elf_abiflags_v0 abiflags;
        int ret;
 
@@ -48,7 +48,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
        return 0;
 }
 
-static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
 {
        /* If the ABI requirement is provided, simply return that */
        if (in_abi != -1)
@@ -65,7 +65,7 @@ static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
 int arch_check_elf(void *_ehdr, bool has_interpreter,
                   struct arch_elf_state *state)
 {
-       struct elfhdr *ehdr = _ehdr;
+       struct elf32_hdr *ehdr = _ehdr;
        unsigned fp_abi, interp_fp_abi, abi0, abi1;
 
        /* Ignore non-O32 binaries */
index 590c2c980fd38b6b3d3b3740d3987cadbfc78b76..6eb7a3f515fc82d97cb4cbbc1f1160dfb5d345d8 100644 (file)
@@ -57,6 +57,8 @@ static struct irq_chip mips_cpu_irq_controller = {
        .irq_mask_ack   = mask_mips_irq,
        .irq_unmask     = unmask_mips_irq,
        .irq_eoi        = unmask_mips_irq,
+       .irq_disable    = mask_mips_irq,
+       .irq_enable     = unmask_mips_irq,
 };
 
 /*
@@ -93,6 +95,8 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
        .irq_mask_ack   = mips_mt_cpu_irq_ack,
        .irq_unmask     = unmask_mips_irq,
        .irq_eoi        = unmask_mips_irq,
+       .irq_disable    = mask_mips_irq,
+       .irq_enable     = unmask_mips_irq,
 };
 
 asmlinkage void __weak plat_irq_dispatch(void)
index eb76434828e8b403e536ca6d496eeb00baf5a368..85bff5d513e5b42ae483e414c14a4a844793b9a1 100644 (file)
@@ -82,6 +82,30 @@ void flush_thread(void)
 {
 }
 
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+       /*
+        * Save any process state which is live in hardware registers to the
+        * parent context prior to duplication. This prevents the new child
+        * state becoming stale if the parent is preempted before copy_thread()
+        * gets a chance to save the parent's live hardware registers to the
+        * child context.
+        */
+       preempt_disable();
+
+       if (is_msa_enabled())
+               save_msa(current);
+       else if (is_fpu_owner())
+               _save_fp(current);
+
+       save_dsp(current);
+
+       preempt_enable();
+
+       *dst = *src;
+       return 0;
+}
+
 int copy_thread(unsigned long clone_flags, unsigned long usp,
        unsigned long arg, struct task_struct *p)
 {
@@ -92,18 +116,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 
        childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
-       preempt_disable();
-
-       if (is_msa_enabled())
-               save_msa(p);
-       else if (is_fpu_owner())
-               save_fp(p);
-
-       if (cpu_has_dsp)
-               save_dsp(p);
-
-       preempt_enable();
-
        /* set up new TSS. */
        childregs = (struct pt_regs *) childksp - 1;
        /*  Put the stack after the struct pt_regs.  */
index 9d1487d832932a0b3dcb4d2c694e773e753a8767..51045281259403c55fcefac09d510f874a3047bb 100644 (file)
@@ -770,6 +770,8 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
        long ret = 0;
        user_exit();
 
+       current_thread_info()->syscall = syscall;
+
        if (secure_computing() == -1)
                return -1;
 
index 00cad1005a16d1fc1925166ec5746e5ac9890154..6e8de80bb4468c82378d3c4af8ef3d08cefd5cf1 100644 (file)
@@ -181,6 +181,7 @@ illegal_syscall:
        sll     t1, t0, 2
        beqz    v0, einval
        lw      t2, sys_call_table(t1)          # syscall routine
+       sw      a0, PT_R2(sp)                   # call routine directly on restart
 
        /* Some syscalls like execve get their arguments from struct pt_regs
           and claim zero arguments in the syscall table. Thus we have to
@@ -580,3 +581,4 @@ EXPORT(sys_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
+       PTR     sys_execveat
index 5251565e344b48f1931f500f52494eadd4e51a04..ad4d44635c7601162ca0dd8f1b626df28eeeafb2 100644 (file)
@@ -435,4 +435,5 @@ EXPORT(sys_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 5315 */
+       PTR     sys_execveat
        .size   sys_call_table,.-sys_call_table
index 77e74398b828770fa8814ed831c2efd6cb412b40..446cc654da56c5f5fcaad749242dd98d593776e1 100644 (file)
@@ -428,4 +428,5 @@ EXPORT(sysn32_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf
+       PTR     compat_sys_execveat             /* 6320 */
        .size   sysn32_call_table,.-sysn32_call_table
index 6f8db9f728e8d7d3f22c0518434ebb844e544953..d07b210fbeff3667f49737dbec9d9d30d3f119ed 100644 (file)
@@ -186,6 +186,7 @@ LEAF(sys32_syscall)
        dsll    t1, t0, 3
        beqz    v0, einval
        ld      t2, sys32_call_table(t1)                # syscall routine
+       sd      a0, PT_R2(sp)           # call routine directly on restart
 
        move    a0, a1                  # shift argument registers
        move    a1, a2
@@ -565,4 +566,5 @@ EXPORT(sys32_call_table)
        PTR     sys_getrandom
        PTR     sys_memfd_create
        PTR     sys_bpf                         /* 4355 */
+       PTR     compat_sys_execveat
        .size   sys32_call_table,.-sys32_call_table
index 1e0a93c5a3e7d2f6a4970dc8694538e3e1b3dd3a..e36a859af66677034a8a4203714a306ec2c0ea51 100644 (file)
@@ -44,8 +44,8 @@ static void cmp_init_secondary(void)
        struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
 
        /* Assume GIC is present */
-       change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
-                                STATUSF_IP7);
+       change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
+                                STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
 
        /* Enable per-cpu interrupts: platform specific */
 
index ad86951b73bdcd8dca4f81e760a77f064ffadd4a..17ea705f6c405081d89a1b5dba30916c1832d73c 100644 (file)
@@ -161,7 +161,8 @@ static void vsmp_init_secondary(void)
 #ifdef CONFIG_MIPS_GIC
        /* This is Malta specific: IPI,performance and timer interrupts */
        if (gic_present)
-               change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+               change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+                                        STATUSF_IP4 | STATUSF_IP5 |
                                         STATUSF_IP6 | STATUSF_IP7);
        else
 #endif
index c94c4e92e17d4bb964a5ca8e7ae77523c4fa44a4..1c0d8c50b7e120482e7891c82653e49a5b63a953 100644 (file)
@@ -123,10 +123,10 @@ asmlinkage void start_secondary(void)
        unsigned int cpu;
 
        cpu_probe();
-       cpu_report();
        per_cpu_trap_init(false);
        mips_clockevent_init();
        mp_ops->init_secondary();
+       cpu_report();
 
        /*
         * XXX parity protection should be folded in here when it's converted
index ad3d2031c327737f64c53a43ab6264a20ca5a354..c3b41e24c05a47337509b9579d5b1302ba6f6e80 100644 (file)
@@ -1231,7 +1231,8 @@ static int enable_restore_fp_context(int msa)
 
                /* Restore the scalar FP control & status register */
                if (!was_fpu_owner)
-                       asm volatile("ctc1 %0, $31" : : "r"(current->thread.fpu.fcr31));
+                       write_32bit_cp1_register(CP1_STATUS,
+                                                current->thread.fpu.fcr31);
        }
 
 out:
index 30e334e823bd60822285efa9da808a991e6e19bb..2ae12825529f8fe37e6e6b13c79eee39d4c4753c 100644 (file)
@@ -20,6 +20,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select KVM_MMIO
+       select SRCU
        ---help---
          Support for hosting Guest kernels.
          Currently supported on MIPS32 processors.
index becc42bb18495adf98389bd039bc111c1893cedd..70ab5d664332694e92305331f13ed15a35ab1956 100644 (file)
@@ -158,6 +158,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index e90b2e899291620ee8d1b89f8674bf93b5450274..30639a6e9b8ca3ad3677afb0cbd553b9c9472c18 100644 (file)
@@ -489,6 +489,8 @@ static void r4k_tlb_configure(void)
 #ifdef CONFIG_64BIT
                pg |= PG_ELPA;
 #endif
+               if (cpu_has_rixiex)
+                       pg |= PG_IEC;
                write_c0_pagegrain(pg);
        }
 
index faed90240dedd1f3f88ba34105a268efa1dd6fd2..6d6df839948f6640244e6b472287f68f569c2f63 100644 (file)
@@ -159,13 +159,6 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        memcpy(dst, src, len)
 
-/*
- * Internal debugging function
- */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_CACHEFLUSH_H */
index 3516cbdf1ee93acb82ebef6428f79df9af104514..0c2cc5d39c8e37ce1cfe5be191902bc435c41090 100644 (file)
@@ -262,6 +262,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 15a0bb5fc06d970a6c3dcd92ce31173a8914670b..d194c0427b26ee8de18ca5bf1f301df681a6ab70 100644 (file)
@@ -135,6 +135,8 @@ survive:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
@@ -157,9 +159,11 @@ bad_area:
 bad_area_nosemaphore:
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
-               pr_alert("%s: unhandled page fault (%d) at 0x%08lx, "
-                       "cause %ld\n", current->comm, SIGSEGV, address, cause);
-               show_regs(regs);
+               if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
+                       pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
+                               "cause %ld\n", current->comm, SIGSEGV, address, cause);
+                       show_regs(regs);
+               }
                _exception(SIGSEGV, regs, code, address);
                return;
        }
index 0703acf7d3276811919fd3d398ada99b1b9c6d50..230ac20ae7944f71636e5083fdaf3f034eb10af2 100644 (file)
@@ -171,6 +171,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 3ca9c1131cfe0d80b9b12fb5c0e599a3363942c0..e5120e653240c4fa52d4895c7d1d206d3d12e68c 100644 (file)
@@ -256,6 +256,8 @@ good_area:
                 */
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto bad_area;
                BUG();
index 5b9312220e849e40dad8d7516b303b619431d08b..30b35fff2deaba9e01da24b3d019df4db1c3e66a 100644 (file)
@@ -60,13 +60,6 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        memcpy(dst, src, len)
 
-
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-/* internal debugging function */
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_POWERPC_CACHEFLUSH_H */
index f5769f19ae256906dd750dd6fcd4ad5448e55bbd..11850f310fb41fb50c639e558da1921a167af7ac 100644 (file)
@@ -21,6 +21,7 @@ config KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        select HAVE_KVM_EVENTFD
+       select SRCU
 
 config KVM_BOOK3S_HANDLER
        bool
index 5a236f082c78386a47b9b415f98f619e8e615688..1b5305d4bdabe95c4f4430b89c0fb56512bb6fdf 100644 (file)
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
                if (*flt & VM_FAULT_OOM) {
                        ret = -ENOMEM;
                        goto out_unlock;
-               } else if (*flt & VM_FAULT_SIGBUS) {
+               } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
                        ret = -EFAULT;
                        goto out_unlock;
                }
index eb79907f34fac2df170be8fcb3a14a9cbf400b1e..6154b0a2b06331f0c29efe56b210baa6f90d43c7 100644 (file)
@@ -437,6 +437,8 @@ good_area:
         */
        fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+               if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                rc = mm_fault_error(regs, address, fault);
                if (rc >= MM_FAULT_RETURN)
                        goto bail;
index 3e20383d09219f8bbbe26ddd731e362870e9676a..58fae7d098cf0993f7484b3f2df7191fe1afe14d 100644 (file)
@@ -4,10 +4,6 @@
 /* Caches aren't brain-dead on the s390. */
 #include <asm-generic/cacheflush.h>
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
index 646db9c467d136d211650b0d6cdaea63ff85d2a1..5fce52cf0e57dc4831d0737ddd3c3a2cfbc386d7 100644 (file)
@@ -28,6 +28,7 @@ config KVM
        select HAVE_KVM_IRQCHIP
        select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
+       select SRCU
        ---help---
          Support hosting paravirtualized guest machines using the SIE
          virtualization capability on the mainframe. This should work
index 811937bb90be69a18f57621d1be7e6fbfc12d423..9065d5aa3932dd7f6637069e493f2ad4a3ad72f3 100644 (file)
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
                                do_no_context(regs);
                        else
                                pagefault_out_of_memory();
+               } else if (fault & VM_FAULT_SIGSEGV) {
+                       /* Kernel mode? Handle exceptions or die */
+                       if (!user_mode(regs))
+                               do_no_context(regs);
+                       else
+                               do_sigsegv(regs, SEGV_MAPERR);
                } else if (fault & VM_FAULT_SIGBUS) {
                        /* Kernel mode? Handle exceptions or die */
                        if (!user_mode(regs))
index 52238983527d605914853fd5415ea39617944ffe..6860beb2a280d0a4a65a67c89ad2201b33513068 100644 (file)
@@ -114,6 +114,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 541dc610150888e706977c7944c42ab1d61d7437..a58fec9b55e016df85cdfb7c214cc385e300479c 100644 (file)
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
        } else {
                if (fault & VM_FAULT_SIGBUS)
                        do_sigbus(regs, error_code, address);
+               else if (fault & VM_FAULT_SIGSEGV)
+                       bad_area(regs, error_code, address);
                else
                        BUG();
        }
index 38965379e350ffe7144087e83a3b4afe20228962..68513c41e10def4a09cb1d842fcc64cb2c34a122 100644 (file)
@@ -74,11 +74,6 @@ void flush_ptrace_access(struct vm_area_struct *, struct page *,
 #define flush_cache_vmap(start, end)           do { } while (0)
 #define flush_cache_vunmap(start, end)         do { } while (0)
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-/* internal debugging function */
-void kernel_map_pages(struct page *page, int numpages, int enable);
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _SPARC64_CACHEFLUSH_H */
index 908e8c17c902bef419877cd1bedcc896b9627636..70d817154fe8bfd04aeaa71f45f15667f4962c23 100644 (file)
@@ -249,6 +249,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 18fcd71670959291f8ef4933e37d5bc394e98f51..4798232494294a7ece0bef232216dd4a26408d88 100644 (file)
@@ -446,6 +446,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 2298cb1daff74e411ac0a616252e84988bfdd06f..1e968f7550dc0363c0ddff9d68626cd59afb119c 100644 (file)
@@ -21,6 +21,7 @@ config KVM
        depends on HAVE_KVM && MODULES
        select PREEMPT_NOTIFIERS
        select ANON_INODES
+       select SRCU
        ---help---
          Support hosting paravirtualized guest machines.
 
index 565e25a98334201ee031d09381ea570a2fcbda03..0f61a73534e6d7c41ccf56ee71244f926908f6d0 100644 (file)
@@ -442,6 +442,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 5678c3571e7cb4d1572d0b16a91b0650f76095c7..209617302df89e02994b7c1e45df4340826bad05 100644 (file)
@@ -80,6 +80,8 @@ good_area:
                if (unlikely(fault & VM_FAULT_ERROR)) {
                        if (fault & VM_FAULT_OOM) {
                                goto out_of_memory;
+                       } else if (fault & VM_FAULT_SIGSEGV) {
+                               goto out;
                        } else if (fault & VM_FAULT_SIGBUS) {
                                err = -EACCES;
                                goto out;
index 0dc9d0144a27957d2bd2cdadf3b141a3195ccab0..5e28e2be3a41d6234ed18de61cff9d6767050b01 100644 (file)
@@ -138,6 +138,7 @@ config X86
        select HAVE_ACPI_APEI_NMI if ACPI
        select ACPI_LEGACY_TABLES_LOOKUP if ACPI
        select X86_FEATURE_NAMES if PROC_FS
+       select SRCU
 
 config INSTRUCTION_DECODER
        def_bool y
@@ -855,6 +856,10 @@ config SCHED_MC
 
 source "kernel/Kconfig.preempt"
 
+config UP_LATE_INIT
+       def_bool y
+       depends on !SMP && X86_LOCAL_APIC
+
 config X86_UP_APIC
        bool "Local APIC support on uniprocessors"
        depends on X86_32 && !SMP && !X86_32_NON_STANDARD
index 25e13403193cc4d0231b8ce97b49ef4fa6f4bb75..020f137df7a24dfbcc810283136655fb64708af6 100644 (file)
@@ -1,6 +1,5 @@
-#ifndef BOOT_ISDIGIT_H
-
-#define BOOT_ISDIGIT_H
+#ifndef BOOT_CTYPE_H
+#define BOOT_CTYPE_H
 
 static inline int isdigit(int ch)
 {
index 5df2869c874baced33de00a78a7b693f3237ea0f..45a07684bbabf3b617dcbd5b53ac5710bcb10e57 100644 (file)
@@ -2,8 +2,6 @@
 
 #define DEFAULT_SERIAL_PORT 0x3f8 /* ttyS0 */
 
-#define XMTRDY          0x20
-
 #define DLAB           0x80
 
 #define TXR             0       /*  Transmit register (WRITE) */
@@ -74,8 +72,8 @@ static void parse_earlyprintk(void)
                        static const int bases[] = { 0x3f8, 0x2f8 };
                        int idx = 0;
 
-                       if (!strncmp(arg + pos, "ttyS", 4))
-                               pos += 4;
+                       /* += strlen("ttyS"); */
+                       pos += 4;
 
                        if (arg[pos++] == '1')
                                idx = 1;
index 82e8a1d446583efaa5a3c0426887d3a47abc5ddd..156ebcab4ada6d54cf8992fcd95398af390f41cc 100644 (file)
@@ -179,8 +179,8 @@ sysenter_dispatch:
 sysexit_from_sys_call:
        andl    $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
        /* clear IF, that popfq doesn't enable interrupts early */
-       andl  $~0x200,EFLAGS-R11(%rsp) 
-       movl    RIP-R11(%rsp),%edx              /* User %eip */
+       andl    $~0x200,EFLAGS-ARGOFFSET(%rsp)
+       movl    RIP-ARGOFFSET(%rsp),%edx                /* User %eip */
        CFI_REGISTER rip,rdx
        RESTORE_ARGS 0,24,0,0,0,0
        xorq    %r8,%r8
index 465b309af25425dce160848ab8c32df14058ef35..92003f3c8a427b9138796ceef1b76bc237860da3 100644 (file)
@@ -106,7 +106,14 @@ extern u32 native_safe_apic_wait_icr_idle(void);
 extern void native_apic_icr_write(u32 low, u32 id);
 extern u64 native_apic_icr_read(void);
 
-extern int x2apic_mode;
+static inline bool apic_is_x2apic_enabled(void)
+{
+       u64 msr;
+
+       if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
+               return false;
+       return msr & X2APIC_ENABLE;
+}
 
 #ifdef CONFIG_X86_X2APIC
 /*
@@ -169,48 +176,23 @@ static inline u64 native_x2apic_icr_read(void)
        return val;
 }
 
+extern int x2apic_mode;
 extern int x2apic_phys;
-extern int x2apic_preenabled;
-extern void check_x2apic(void);
-extern void enable_x2apic(void);
+extern void __init check_x2apic(void);
+extern void x2apic_setup(void);
 static inline int x2apic_enabled(void)
 {
-       u64 msr;
-
-       if (!cpu_has_x2apic)
-               return 0;
-
-       rdmsrl(MSR_IA32_APICBASE, msr);
-       if (msr & X2APIC_ENABLE)
-               return 1;
-       return 0;
+       return cpu_has_x2apic && apic_is_x2apic_enabled();
 }
 
 #define x2apic_supported()     (cpu_has_x2apic)
-static inline void x2apic_force_phys(void)
-{
-       x2apic_phys = 1;
-}
 #else
-static inline void disable_x2apic(void)
-{
-}
-static inline void check_x2apic(void)
-{
-}
-static inline void enable_x2apic(void)
-{
-}
-static inline int x2apic_enabled(void)
-{
-       return 0;
-}
-static inline void x2apic_force_phys(void)
-{
-}
+static inline void check_x2apic(void) { }
+static inline void x2apic_setup(void) { }
+static inline int x2apic_enabled(void) { return 0; }
 
-#define        x2apic_preenabled 0
-#define        x2apic_supported()      0
+#define x2apic_mode            (0)
+#define        x2apic_supported()      (0)
 #endif
 
 extern void enable_IR_x2apic(void);
@@ -219,7 +201,6 @@ extern int get_physical_broadcast(void);
 
 extern int lapic_get_maxlvt(void);
 extern void clear_local_APIC(void);
-extern void connect_bsp_APIC(void);
 extern void disconnect_bsp_APIC(int virt_wire_setup);
 extern void disable_local_APIC(void);
 extern void lapic_shutdown(void);
@@ -227,8 +208,6 @@ extern int verify_local_APIC(void);
 extern void sync_Arb_IDs(void);
 extern void init_bsp_APIC(void);
 extern void setup_local_APIC(void);
-extern void end_local_APIC_setup(void);
-extern void bsp_end_local_APIC_setup(void);
 extern void init_apic_mappings(void);
 void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
@@ -236,6 +215,9 @@ extern void setup_secondary_APIC_clock(void);
 extern int APIC_init_uniprocessor(void);
 extern int apic_force_enable(unsigned long addr);
 
+extern int apic_bsp_setup(bool upmode);
+extern void apic_ap_setup(void);
+
 /*
  * On 32bit this is mach-xxx local
  */
index 76659b67fd11f6da9f9de43aacc74b3edd20697e..1f1297b46f833ecd7843bf09a9592e0e5e61ec96 100644 (file)
@@ -83,7 +83,6 @@ For 32-bit we have the following conventions - kernel is built with
 #define SS             160
 
 #define ARGOFFSET      R11
-#define SWFRAME                ORIG_RAX
 
        .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1, rax_enosys=0
        subq  $9*8+\addskip, %rsp
index aede2c347bde307d9b74aa4ff4887b2b0b05eac6..90a54851aedc98b29c65856986ade222818381b8 100644 (file)
 #define X86_FEATURE_TOPOEXT    ( 6*32+22) /* topology extensions CPUID leafs */
 #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
 #define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT      (6*32+26) /* data breakpoint extension */
 #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
 
 /*
@@ -388,6 +389,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 #define cpu_has_cx16           boot_cpu_has(X86_FEATURE_CX16)
 #define cpu_has_eager_fpu      boot_cpu_has(X86_FEATURE_EAGER_FPU)
 #define cpu_has_topoext                boot_cpu_has(X86_FEATURE_TOPOEXT)
+#define cpu_has_bpext          boot_cpu_has(X86_FEATURE_BPEXT)
 
 #if __GNUC__ >= 4
 extern void warn_pre_alternatives(void);
index 61fd18b83b6c6af777f2cdd2a63d943089de55e1..12cb66f6d3a5204c32808c7a22ff379b5919a34d 100644 (file)
@@ -114,5 +114,10 @@ static inline void debug_stack_usage_inc(void) { }
 static inline void debug_stack_usage_dec(void) { }
 #endif /* X86_64 */
 
+#ifdef CONFIG_CPU_SUP_AMD
+extern void set_dr_addr_mask(unsigned long mask, int dr);
+#else
+static inline void set_dr_addr_mask(unsigned long mask, int dr) { }
+#endif
 
 #endif /* _ASM_X86_DEBUGREG_H */
index ef1c4d2d41eceff8cee01b4fdc9c18987c2dbe20..6c98be864a75afcbce9530a8ea538a54816bdae8 100644 (file)
@@ -12,6 +12,7 @@
  */
 struct arch_hw_breakpoint {
        unsigned long   address;
+       unsigned long   mask;
        u8              len;
        u8              type;
 };
index bf006cce94181ce72346b51c6de22dc24ebb908f..2f91685fe1cdb51d937eb20d29c46952d54f298f 100644 (file)
@@ -279,6 +279,11 @@ static inline void disable_ioapic_support(void) { }
 #define native_ioapic_set_affinity     NULL
 #define native_setup_ioapic_entry      NULL
 #define native_eoi_ioapic_pin          NULL
+
+static inline void setup_IO_APIC(void) { }
+static inline void enable_IO_APIC(void) { }
+static inline void setup_ioapic_dest(void) { }
+
 #endif
 
 #endif /* _ASM_X86_IO_APIC_H */
index b7747c4c2cf2ff0e73e2e0896fe550825b827eba..6224d316c405c444553877845385e2d7b151c161 100644 (file)
@@ -33,8 +33,6 @@ struct irq_cfg;
 
 #ifdef CONFIG_IRQ_REMAP
 
-extern void setup_irq_remapping_ops(void);
-extern int irq_remapping_supported(void);
 extern void set_irq_remapping_broken(void);
 extern int irq_remapping_prepare(void);
 extern int irq_remapping_enable(void);
@@ -60,8 +58,6 @@ void irq_remap_modify_chip_defaults(struct irq_chip *chip);
 
 #else  /* CONFIG_IRQ_REMAP */
 
-static inline void setup_irq_remapping_ops(void) { }
-static inline int irq_remapping_supported(void) { return 0; }
 static inline void set_irq_remapping_broken(void) { }
 static inline int irq_remapping_prepare(void) { return -ENODEV; }
 static inline int irq_remapping_enable(void) { return -ENODEV; }
index 51b26e895933cddc06e90904e11471ee4c3f998b..9b3de99dc0044a8b6ccdba0e7523f5b1e8425c03 100644 (file)
@@ -190,7 +190,6 @@ enum mcp_flags {
 void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
 
 int mce_notify_irq(void);
-void mce_notify_process(void);
 
 DECLARE_PER_CPU(struct mce, injectm);
 
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
deleted file mode 100644 (file)
index 0da7409..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
- * which needs to alter them. */
-
-static inline void smpboot_clear_io_apic_irqs(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       io_apic_irqs = 0;
-#endif
-}
-
-static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-       CMOS_WRITE(0xa, 0xf);
-       spin_unlock_irqrestore(&rtc_lock, flags);
-       local_flush_tlb();
-       pr_debug("1.\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
-                                                       start_eip >> 4;
-       pr_debug("2.\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
-                                                       start_eip & 0xf;
-       pr_debug("3.\n");
-}
-
-static inline void smpboot_restore_warm_reset_vector(void)
-{
-       unsigned long flags;
-
-       /*
-        * Install writable page 0 entry to set BIOS data area.
-        */
-       local_flush_tlb();
-
-       /*
-        * Paranoid:  Set warm reset code and vector here back
-        * to default values.
-        */
-       spin_lock_irqsave(&rtc_lock, flags);
-       CMOS_WRITE(0, 0xf);
-       spin_unlock_irqrestore(&rtc_lock, flags);
-
-       *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
-}
-
-static inline void __init smpboot_setup_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       /*
-        * Here we can be sure that there is an IO-APIC in the system. Let's
-        * go and set it up:
-        */
-       if (!skip_ioapic_setup && nr_ioapics)
-               setup_IO_APIC();
-       else {
-               nr_ioapics = 0;
-       }
-#endif
-}
-
-static inline void smpboot_clear_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       nr_ioapics = 0;
-#endif
-}
index 547e344a6dc60d7db27d43c74d44c783326291bb..e82e95abc92bd514e9991af3cd97a7028c289271 100644 (file)
@@ -75,7 +75,6 @@ struct thread_info {
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
-#define TIF_MCE_NOTIFY         10      /* notify userspace of an MCE */
 #define TIF_USER_RETURN_NOTIFY 11      /* notify kernel of userspace return */
 #define TIF_UPROBE             12      /* breakpointed or singlestepping */
 #define TIF_NOTSC              16      /* TSC is not accessible in userland */
@@ -100,7 +99,6 @@ struct thread_info {
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
-#define _TIF_MCE_NOTIFY                (1 << TIF_MCE_NOTIFY)
 #define _TIF_USER_RETURN_NOTIFY        (1 << TIF_USER_RETURN_NOTIFY)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
 #define _TIF_NOTSC             (1 << TIF_NOTSC)
@@ -140,7 +138,7 @@ struct thread_info {
 
 /* Only used for 64 bit */
 #define _TIF_DO_NOTIFY_MASK                                            \
-       (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME |       \
+       (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME |                         \
         _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
 
 /* flags to check in __switch_to() */
@@ -170,6 +168,17 @@ static inline struct thread_info *current_thread_info(void)
        return ti;
 }
 
+static inline unsigned long current_stack_pointer(void)
+{
+       unsigned long sp;
+#ifdef CONFIG_X86_64
+       asm("mov %%rsp,%0" : "=g" (sp));
+#else
+       asm("mov %%esp,%0" : "=g" (sp));
+#endif
+       return sp;
+}
+
 #else /* !__ASSEMBLY__ */
 
 /* how to get the thread information struct from ASM */
index 707adc6549d82335a20bdf18d18b697fa1fe9eab..4e49d7dff78e5f30ffb6353c37277eaf6ab264aa 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_TRAPS_H
 #define _ASM_X86_TRAPS_H
 
+#include <linux/context_tracking_state.h>
 #include <linux/kprobes.h>
 
 #include <asm/debugreg.h>
@@ -110,6 +111,11 @@ asmlinkage void smp_thermal_interrupt(void);
 asmlinkage void mce_threshold_interrupt(void);
 #endif
 
+extern enum ctx_state ist_enter(struct pt_regs *regs);
+extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state);
+extern void ist_begin_non_atomic(struct pt_regs *regs);
+extern void ist_end_non_atomic(void);
+
 /* Interrupts/Exceptions */
 enum {
        X86_TRAP_DE = 0,        /*  0, Divide-by-zero */
index c8aa65d56027eca717502898daa563b59ba7ca21..d979e5abae5510400ee0a8d0257a06a4cd9302ba 100644 (file)
 /* Fam 16h MSRs */
 #define MSR_F16H_L2I_PERF_CTL          0xc0010230
 #define MSR_F16H_L2I_PERF_CTR          0xc0010231
+#define MSR_F16H_DR1_ADDR_MASK         0xc0011019
+#define MSR_F16H_DR2_ADDR_MASK         0xc001101a
+#define MSR_F16H_DR3_ADDR_MASK         0xc001101b
+#define MSR_F16H_DR0_ADDR_MASK         0xc0011027
 
 /* Fam 15h MSRs */
 #define MSR_F15H_PERF_CTL              0xc0010200
index b9e30daa0881b3213bb9b6be8bf2bb1803cfba7b..a18fff361c7f47113359de453625feeb036a2e64 100644 (file)
@@ -653,6 +653,7 @@ static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
        return gsi;
 }
 
+#ifdef CONFIG_X86_LOCAL_APIC
 static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
                                    int trigger, int polarity)
 {
@@ -675,6 +676,7 @@ static void acpi_unregister_gsi_ioapic(u32 gsi)
        mutex_unlock(&acpi_ioapic_lock);
 #endif
 }
+#endif
 
 int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
                           int trigger, int polarity) = acpi_register_gsi_pic;
index 29b5b18afa27dca80d384fade47906299a581e8f..b665d241efaddc6e34880897db45b8de669ed2e0 100644 (file)
@@ -134,9 +134,6 @@ static inline void imcr_apic_to_pic(void)
  */
 static int force_enable_local_apic __initdata;
 
-/* Control whether x2APIC mode is enabled or not */
-static bool nox2apic __initdata;
-
 /*
  * APIC command line parameters
  */
@@ -161,33 +158,6 @@ static __init int setup_apicpmtimer(char *s)
 __setup("apicpmtimer", setup_apicpmtimer);
 #endif
 
-int x2apic_mode;
-#ifdef CONFIG_X86_X2APIC
-/* x2apic enabled before OS handover */
-int x2apic_preenabled;
-static int x2apic_disabled;
-static int __init setup_nox2apic(char *str)
-{
-       if (x2apic_enabled()) {
-               int apicid = native_apic_msr_read(APIC_ID);
-
-               if (apicid >= 255) {
-                       pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
-                                  apicid);
-                       return 0;
-               }
-
-               pr_warning("x2apic already enabled. will disable it\n");
-       } else
-               setup_clear_cpu_cap(X86_FEATURE_X2APIC);
-
-       nox2apic = true;
-
-       return 0;
-}
-early_param("nox2apic", setup_nox2apic);
-#endif
-
 unsigned long mp_lapic_addr;
 int disable_apic;
 /* Disable local APIC timer from the kernel commandline or via dmi quirk */
@@ -1475,7 +1445,7 @@ void setup_local_APIC(void)
 #endif
 }
 
-void end_local_APIC_setup(void)
+static void end_local_APIC_setup(void)
 {
        lapic_setup_esr();
 
@@ -1492,116 +1462,184 @@ void end_local_APIC_setup(void)
        apic_pm_activate();
 }
 
-void __init bsp_end_local_APIC_setup(void)
+/*
+ * APIC setup function for application processors. Called from smpboot.c
+ */
+void apic_ap_setup(void)
 {
+       setup_local_APIC();
        end_local_APIC_setup();
-
-       /*
-        * Now that local APIC setup is completed for BP, configure the fault
-        * handling for interrupt remapping.
-        */
-       irq_remap_enable_fault_handling();
-
 }
 
 #ifdef CONFIG_X86_X2APIC
-/*
- * Need to disable xapic and x2apic at the same time and then enable xapic mode
- */
-static inline void __disable_x2apic(u64 msr)
-{
-       wrmsrl(MSR_IA32_APICBASE,
-              msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
-       wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
-}
+int x2apic_mode;
 
-static __init void disable_x2apic(void)
+enum {
+       X2APIC_OFF,
+       X2APIC_ON,
+       X2APIC_DISABLED,
+};
+static int x2apic_state;
+
+static inline void __x2apic_disable(void)
 {
        u64 msr;
 
-       if (!cpu_has_x2apic)
+       if (cpu_has_apic)
                return;
 
        rdmsrl(MSR_IA32_APICBASE, msr);
-       if (msr & X2APIC_ENABLE) {
-               u32 x2apic_id = read_apic_id();
-
-               if (x2apic_id >= 255)
-                       panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
+       if (!(msr & X2APIC_ENABLE))
+               return;
+       /* Disable xapic and x2apic first and then reenable xapic mode */
+       wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
+       wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
+       printk_once(KERN_INFO "x2apic disabled\n");
+}
 
-               pr_info("Disabling x2apic\n");
-               __disable_x2apic(msr);
+static inline void __x2apic_enable(void)
+{
+       u64 msr;
 
-               if (nox2apic) {
-                       clear_cpu_cap(&cpu_data(0), X86_FEATURE_X2APIC);
-                       setup_clear_cpu_cap(X86_FEATURE_X2APIC);
-               }
+       rdmsrl(MSR_IA32_APICBASE, msr);
+       if (msr & X2APIC_ENABLE)
+               return;
+       wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
+       printk_once(KERN_INFO "x2apic enabled\n");
+}
 
-               x2apic_disabled = 1;
-               x2apic_mode = 0;
+static int __init setup_nox2apic(char *str)
+{
+       if (x2apic_enabled()) {
+               int apicid = native_apic_msr_read(APIC_ID);
 
-               register_lapic_address(mp_lapic_addr);
+               if (apicid >= 255) {
+                       pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
+                                  apicid);
+                       return 0;
+               }
+               pr_warning("x2apic already enabled.\n");
+               __x2apic_disable();
        }
+       setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+       x2apic_state = X2APIC_DISABLED;
+       x2apic_mode = 0;
+       return 0;
 }
+early_param("nox2apic", setup_nox2apic);
 
-void check_x2apic(void)
+/* Called from cpu_init() to enable x2apic on (secondary) cpus */
+void x2apic_setup(void)
 {
-       if (x2apic_enabled()) {
-               pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
-               x2apic_preenabled = x2apic_mode = 1;
+       /*
+        * If x2apic is not in ON state, disable it if already enabled
+        * from BIOS.
+        */
+       if (x2apic_state != X2APIC_ON) {
+               __x2apic_disable();
+               return;
        }
+       __x2apic_enable();
 }
 
-void enable_x2apic(void)
+static __init void x2apic_disable(void)
 {
-       u64 msr;
+       u32 x2apic_id;
 
-       rdmsrl(MSR_IA32_APICBASE, msr);
-       if (x2apic_disabled) {
-               __disable_x2apic(msr);
+       if (x2apic_state != X2APIC_ON)
+               goto out;
+
+       x2apic_id = read_apic_id();
+       if (x2apic_id >= 255)
+               panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
+
+       __x2apic_disable();
+       register_lapic_address(mp_lapic_addr);
+out:
+       x2apic_state = X2APIC_DISABLED;
+       x2apic_mode = 0;
+}
+
+static __init void x2apic_enable(void)
+{
+       if (x2apic_state != X2APIC_OFF)
                return;
-       }
 
-       if (!x2apic_mode)
+       x2apic_mode = 1;
+       x2apic_state = X2APIC_ON;
+       __x2apic_enable();
+}
+
+static __init void try_to_enable_x2apic(int remap_mode)
+{
+       if (x2apic_state == X2APIC_DISABLED)
                return;
 
-       if (!(msr & X2APIC_ENABLE)) {
-               printk_once(KERN_INFO "Enabling x2apic\n");
-               wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
+       if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
+               /* IR is required if there is APIC ID > 255 even when running
+                * under KVM
+                */
+               if (max_physical_apicid > 255 ||
+                   (IS_ENABLED(CONFIG_HYPERVISOR_GUEST) &&
+                    !hypervisor_x2apic_available())) {
+                       pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
+                       x2apic_disable();
+                       return;
+               }
+
+               /*
+                * without IR all CPUs can be addressed by IOAPIC/MSI
+                * only in physical mode
+                */
+               x2apic_phys = 1;
        }
+       x2apic_enable();
 }
-#endif /* CONFIG_X86_X2APIC */
 
-int __init enable_IR(void)
+void __init check_x2apic(void)
 {
-#ifdef CONFIG_IRQ_REMAP
-       if (!irq_remapping_supported()) {
-               pr_debug("intr-remapping not supported\n");
-               return -1;
+       if (x2apic_enabled()) {
+               pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
+               x2apic_mode = 1;
+               x2apic_state = X2APIC_ON;
+       } else if (!cpu_has_x2apic) {
+               x2apic_state = X2APIC_DISABLED;
        }
+}
+#else /* CONFIG_X86_X2APIC */
+static int __init validate_x2apic(void)
+{
+       if (!apic_is_x2apic_enabled())
+               return 0;
+       /*
+        * Checkme: Can we simply turn off x2apic here instead of panic?
+        */
+       panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
+}
+early_initcall(validate_x2apic);
 
-       if (!x2apic_preenabled && skip_ioapic_setup) {
-               pr_info("Skipped enabling intr-remap because of skipping "
-                       "io-apic setup\n");
+static inline void try_to_enable_x2apic(int remap_mode) { }
+static inline void __x2apic_enable(void) { }
+#endif /* !CONFIG_X86_X2APIC */
+
+static int __init try_to_enable_IR(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+       if (!x2apic_enabled() && skip_ioapic_setup) {
+               pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
                return -1;
        }
-
-       return irq_remapping_enable();
 #endif
-       return -1;
+       return irq_remapping_enable();
 }
 
 void __init enable_IR_x2apic(void)
 {
        unsigned long flags;
-       int ret, x2apic_enabled = 0;
-       int hardware_init_ret;
-
-       /* Make sure irq_remap_ops are initialized */
-       setup_irq_remapping_ops();
+       int ret, ir_stat;
 
-       hardware_init_ret = irq_remapping_prepare();
-       if (hardware_init_ret && !x2apic_supported())
+       ir_stat = irq_remapping_prepare();
+       if (ir_stat < 0 && !x2apic_supported())
                return;
 
        ret = save_ioapic_entries();
@@ -1614,49 +1652,13 @@ void __init enable_IR_x2apic(void)
        legacy_pic->mask_all();
        mask_ioapic_entries();
 
-       if (x2apic_preenabled && nox2apic)
-               disable_x2apic();
-
-       if (hardware_init_ret)
-               ret = -1;
-       else
-               ret = enable_IR();
-
-       if (!x2apic_supported())
-               goto skip_x2apic;
+       /* If irq_remapping_prepare() succeded, try to enable it */
+       if (ir_stat >= 0)
+               ir_stat = try_to_enable_IR();
+       /* ir_stat contains the remap mode or an error code */
+       try_to_enable_x2apic(ir_stat);
 
-       if (ret < 0) {
-               /* IR is required if there is APIC ID > 255 even when running
-                * under KVM
-                */
-               if (max_physical_apicid > 255 ||
-                   !hypervisor_x2apic_available()) {
-                       if (x2apic_preenabled)
-                               disable_x2apic();
-                       goto skip_x2apic;
-               }
-               /*
-                * without IR all CPUs can be addressed by IOAPIC/MSI
-                * only in physical mode
-                */
-               x2apic_force_phys();
-       }
-
-       if (ret == IRQ_REMAP_XAPIC_MODE) {
-               pr_info("x2apic not enabled, IRQ remapping is in xapic mode\n");
-               goto skip_x2apic;
-       }
-
-       x2apic_enabled = 1;
-
-       if (x2apic_supported() && !x2apic_mode) {
-               x2apic_mode = 1;
-               enable_x2apic();
-               pr_info("Enabled x2apic\n");
-       }
-
-skip_x2apic:
-       if (ret < 0) /* IR enabling failed */
+       if (ir_stat < 0)
                restore_ioapic_entries();
        legacy_pic->restore_mask();
        local_irq_restore(flags);
@@ -1847,82 +1849,8 @@ void __init register_lapic_address(unsigned long address)
        }
 }
 
-/*
- * This initializes the IO-APIC and APIC hardware if this is
- * a UP kernel.
- */
 int apic_version[MAX_LOCAL_APIC];
 
-int __init APIC_init_uniprocessor(void)
-{
-       if (disable_apic) {
-               pr_info("Apic disabled\n");
-               return -1;
-       }
-#ifdef CONFIG_X86_64
-       if (!cpu_has_apic) {
-               disable_apic = 1;
-               pr_info("Apic disabled by BIOS\n");
-               return -1;
-       }
-#else
-       if (!smp_found_config && !cpu_has_apic)
-               return -1;
-
-       /*
-        * Complain if the BIOS pretends there is one.
-        */
-       if (!cpu_has_apic &&
-           APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
-               pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
-                       boot_cpu_physical_apicid);
-               return -1;
-       }
-#endif
-
-       default_setup_apic_routing();
-
-       verify_local_APIC();
-       connect_bsp_APIC();
-
-#ifdef CONFIG_X86_64
-       apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
-#else
-       /*
-        * Hack: In case of kdump, after a crash, kernel might be booting
-        * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
-        * might be zero if read from MP tables. Get it from LAPIC.
-        */
-# ifdef CONFIG_CRASH_DUMP
-       boot_cpu_physical_apicid = read_apic_id();
-# endif
-#endif
-       physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
-       setup_local_APIC();
-
-#ifdef CONFIG_X86_IO_APIC
-       /*
-        * Now enable IO-APICs, actually call clear_IO_APIC
-        * We need clear_IO_APIC before enabling error vector
-        */
-       if (!skip_ioapic_setup && nr_ioapics)
-               enable_IO_APIC();
-#endif
-
-       bsp_end_local_APIC_setup();
-
-#ifdef CONFIG_X86_IO_APIC
-       if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
-               setup_IO_APIC();
-       else {
-               nr_ioapics = 0;
-       }
-#endif
-
-       x86_init.timers.setup_percpu_clockev();
-       return 0;
-}
-
 /*
  * Local APIC interrupts
  */
@@ -2027,7 +1955,7 @@ __visible void smp_trace_error_interrupt(struct pt_regs *regs)
 /**
  * connect_bsp_APIC - attach the APIC to the interrupt system
  */
-void __init connect_bsp_APIC(void)
+static void __init connect_bsp_APIC(void)
 {
 #ifdef CONFIG_X86_32
        if (pic_mode) {
@@ -2274,6 +2202,100 @@ void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
        }
 }
 
+static void __init apic_bsp_up_setup(void)
+{
+#ifdef CONFIG_X86_64
+       apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
+#else
+       /*
+        * Hack: In case of kdump, after a crash, kernel might be booting
+        * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
+        * might be zero if read from MP tables. Get it from LAPIC.
+        */
+# ifdef CONFIG_CRASH_DUMP
+       boot_cpu_physical_apicid = read_apic_id();
+# endif
+#endif
+       physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
+}
+
+/**
+ * apic_bsp_setup - Setup function for local apic and io-apic
+ * @upmode:            Force UP mode (for APIC_init_uniprocessor)
+ *
+ * Returns:
+ * apic_id of BSP APIC
+ */
+int __init apic_bsp_setup(bool upmode)
+{
+       int id;
+
+       connect_bsp_APIC();
+       if (upmode)
+               apic_bsp_up_setup();
+       setup_local_APIC();
+
+       if (x2apic_mode)
+               id = apic_read(APIC_LDR);
+       else
+               id = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
+
+       enable_IO_APIC();
+       end_local_APIC_setup();
+       irq_remap_enable_fault_handling();
+       setup_IO_APIC();
+       /* Setup local timer */
+       x86_init.timers.setup_percpu_clockev();
+       return id;
+}
+
+/*
+ * This initializes the IO-APIC and APIC hardware if this is
+ * a UP kernel.
+ */
+int __init APIC_init_uniprocessor(void)
+{
+       if (disable_apic) {
+               pr_info("Apic disabled\n");
+               return -1;
+       }
+#ifdef CONFIG_X86_64
+       if (!cpu_has_apic) {
+               disable_apic = 1;
+               pr_info("Apic disabled by BIOS\n");
+               return -1;
+       }
+#else
+       if (!smp_found_config && !cpu_has_apic)
+               return -1;
+
+       /*
+        * Complain if the BIOS pretends there is one.
+        */
+       if (!cpu_has_apic &&
+           APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
+               pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
+                       boot_cpu_physical_apicid);
+               return -1;
+       }
+#endif
+
+       if (!smp_found_config)
+               disable_ioapic_support();
+
+       default_setup_apic_routing();
+       verify_local_APIC();
+       apic_bsp_setup(true);
+       return 0;
+}
+
+#ifdef CONFIG_UP_LATE_INIT
+void __init up_late_init(void)
+{
+       APIC_init_uniprocessor();
+}
+#endif
+
 /*
  * Power management
  */
@@ -2359,9 +2381,9 @@ static void lapic_resume(void)
        mask_ioapic_entries();
        legacy_pic->mask_all();
 
-       if (x2apic_mode)
-               enable_x2apic();
-       else {
+       if (x2apic_mode) {
+               __x2apic_enable();
+       else {
                /*
                 * Make sure the APICBASE points to the right address
                 *
index 3f5f60406ab17659e294e725fbe18655c94048ac..f4dc2462a1ac410803cd94ff4944ebf23c636fa9 100644 (file)
@@ -1507,7 +1507,10 @@ void __init enable_IO_APIC(void)
        int i8259_apic, i8259_pin;
        int apic, pin;
 
-       if (!nr_legacy_irqs())
+       if (skip_ioapic_setup)
+               nr_ioapics = 0;
+
+       if (!nr_legacy_irqs() || !nr_ioapics)
                return;
 
        for_each_ioapic_pin(apic, pin) {
@@ -2295,7 +2298,7 @@ static inline void __init check_timer(void)
        }
        local_irq_disable();
        apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
-       if (x2apic_preenabled)
+       if (apic_is_x2apic_enabled())
                apic_printk(APIC_QUIET, KERN_INFO
                            "Perhaps problem with the pre-enabled x2apic mode\n"
                            "Try booting with x2apic and interrupt-remapping disabled in the bios.\n");
@@ -2373,9 +2376,9 @@ void __init setup_IO_APIC(void)
 {
        int ioapic;
 
-       /*
-        * calling enable_IO_APIC() is moved to setup_local_APIC for BP
-        */
+       if (skip_ioapic_setup || !nr_ioapics)
+               return;
+
        io_apic_irqs = nr_legacy_irqs() ? ~PIC_IRQS : ~0UL;
 
        apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
index 15c5df92f74ec84a1861ac521fb8f4a9d2895dea..a220239cea65ca99b3c52ebbfaed6ce973f0dec1 100644 (file)
@@ -869,3 +869,22 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
 
        return false;
 }
+
+void set_dr_addr_mask(unsigned long mask, int dr)
+{
+       if (!cpu_has_bpext)
+               return;
+
+       switch (dr) {
+       case 0:
+               wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
+               break;
+       case 1:
+       case 2:
+       case 3:
+               wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
+               break;
+       default:
+               break;
+       }
+}
index c6049650c093f79f849c4779d7800601311eb4d3..b15bffcaba6d41fdcb1fff7d813aa3fd09f839ac 100644 (file)
@@ -491,17 +491,18 @@ u16 __read_mostly tlb_lld_2m[NR_INFO];
 u16 __read_mostly tlb_lld_4m[NR_INFO];
 u16 __read_mostly tlb_lld_1g[NR_INFO];
 
-void cpu_detect_tlb(struct cpuinfo_x86 *c)
+static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 {
        if (this_cpu->c_detect_tlb)
                this_cpu->c_detect_tlb(c);
 
-       printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n"
-               "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
+       pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
                tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
-               tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
-               tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
-               tlb_lld_1g[ENTRIES]);
+               tlb_lli_4m[ENTRIES]);
+
+       pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
+               tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
+               tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
 }
 
 void detect_ht(struct cpuinfo_x86 *c)
@@ -1332,7 +1333,7 @@ void cpu_init(void)
        barrier();
 
        x86_configure_nx();
-       enable_x2apic();
+       x2apic_setup();
 
        /*
         * set up and load the per-CPU TSS
index 9cc6b6f25f424d18426adbab3e31e251e78c13af..94d7dcb1214530dfa86b4fef2f2bcbf007d32fe4 100644 (file)
@@ -487,10 +487,8 @@ static void init_intel(struct cpuinfo_x86 *c)
 
                rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
                if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
-                       printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
-                               " Set to 'normal', was 'performance'\n"
-                               "ENERGY_PERF_BIAS: View and update with"
-                               " x86_energy_perf_policy(8)\n");
+                       pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
+                       pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
                        epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
                        wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
                }
index d2c611699cd9d2d49bfd1cee5b79c7fedf87ef71..d23179900755a33e2c337f5211b8a867053846d3 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/export.h>
 
 #include <asm/processor.h>
+#include <asm/traps.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
 
@@ -1002,51 +1003,6 @@ static void mce_clear_state(unsigned long *toclear)
        }
 }
 
-/*
- * Need to save faulting physical address associated with a process
- * in the machine check handler some place where we can grab it back
- * later in mce_notify_process()
- */
-#define        MCE_INFO_MAX    16
-
-struct mce_info {
-       atomic_t                inuse;
-       struct task_struct      *t;
-       __u64                   paddr;
-       int                     restartable;
-} mce_info[MCE_INFO_MAX];
-
-static void mce_save_info(__u64 addr, int c)
-{
-       struct mce_info *mi;
-
-       for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++) {
-               if (atomic_cmpxchg(&mi->inuse, 0, 1) == 0) {
-                       mi->t = current;
-                       mi->paddr = addr;
-                       mi->restartable = c;
-                       return;
-               }
-       }
-
-       mce_panic("Too many concurrent recoverable errors", NULL, NULL);
-}
-
-static struct mce_info *mce_find_info(void)
-{
-       struct mce_info *mi;
-
-       for (mi = mce_info; mi < &mce_info[MCE_INFO_MAX]; mi++)
-               if (atomic_read(&mi->inuse) && mi->t == current)
-                       return mi;
-       return NULL;
-}
-
-static void mce_clear_info(struct mce_info *mi)
-{
-       atomic_set(&mi->inuse, 0);
-}
-
 /*
  * The actual machine check handler. This only handles real
  * exceptions when something got corrupted coming in through int 18.
@@ -1063,6 +1019,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
 {
        struct mca_config *cfg = &mca_cfg;
        struct mce m, *final;
+       enum ctx_state prev_state;
        int i;
        int worst = 0;
        int severity;
@@ -1084,6 +1041,10 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
        DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
        char *msg = "Unknown";
+       u64 recover_paddr = ~0ull;
+       int flags = MF_ACTION_REQUIRED;
+
+       prev_state = ist_enter(regs);
 
        this_cpu_inc(mce_exception_count);
 
@@ -1203,9 +1164,9 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                if (no_way_out)
                        mce_panic("Fatal machine check on current CPU", &m, msg);
                if (worst == MCE_AR_SEVERITY) {
-                       /* schedule action before return to userland */
-                       mce_save_info(m.addr, m.mcgstatus & MCG_STATUS_RIPV);
-                       set_thread_flag(TIF_MCE_NOTIFY);
+                       recover_paddr = m.addr;
+                       if (!(m.mcgstatus & MCG_STATUS_RIPV))
+                               flags |= MF_MUST_KILL;
                } else if (kill_it) {
                        force_sig(SIGBUS, current);
                }
@@ -1216,6 +1177,27 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
 out:
        sync_core();
+
+       if (recover_paddr == ~0ull)
+               goto done;
+
+       pr_err("Uncorrected hardware memory error in user-access at %llx",
+                recover_paddr);
+       /*
+        * We must call memory_failure() here even if the current process is
+        * doomed. We still need to mark the page as poisoned and alert any
+        * other users of the page.
+        */
+       ist_begin_non_atomic(regs);
+       local_irq_enable();
+       if (memory_failure(recover_paddr >> PAGE_SHIFT, MCE_VECTOR, flags) < 0) {
+               pr_err("Memory error not recovered");
+               force_sig(SIGBUS, current);
+       }
+       local_irq_disable();
+       ist_end_non_atomic();
+done:
+       ist_exit(regs, prev_state);
 }
 EXPORT_SYMBOL_GPL(do_machine_check);
 
@@ -1232,42 +1214,6 @@ int memory_failure(unsigned long pfn, int vector, int flags)
 }
 #endif
 
-/*
- * Called in process context that interrupted by MCE and marked with
- * TIF_MCE_NOTIFY, just before returning to erroneous userland.
- * This code is allowed to sleep.
- * Attempt possible recovery such as calling the high level VM handler to
- * process any corrupted pages, and kill/signal current process if required.
- * Action required errors are handled here.
- */
-void mce_notify_process(void)
-{
-       unsigned long pfn;
-       struct mce_info *mi = mce_find_info();
-       int flags = MF_ACTION_REQUIRED;
-
-       if (!mi)
-               mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
-       pfn = mi->paddr >> PAGE_SHIFT;
-
-       clear_thread_flag(TIF_MCE_NOTIFY);
-
-       pr_err("Uncorrected hardware memory error in user-access at %llx",
-                mi->paddr);
-       /*
-        * We must call memory_failure() here even if the current process is
-        * doomed. We still need to mark the page as poisoned and alert any
-        * other users of the page.
-        */
-       if (!mi->restartable)
-               flags |= MF_MUST_KILL;
-       if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
-               pr_err("Memory error not recovered");
-               force_sig(SIGBUS, current);
-       }
-       mce_clear_info(mi);
-}
-
 /*
  * Action optional processing happens here (picking up
  * from the list of faulting pages that do_machine_check()
index a3042989398c1cdb7d33da49bf7dea1887e00aa5..ec2663a708e40d2e3fa03b36fa7587c860a1871b 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/smp.h>
 
 #include <asm/processor.h>
+#include <asm/traps.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
 
@@ -17,8 +18,11 @@ int mce_p5_enabled __read_mostly;
 /* Machine check handler for Pentium class Intel CPUs: */
 static void pentium_machine_check(struct pt_regs *regs, long error_code)
 {
+       enum ctx_state prev_state;
        u32 loaddr, hi, lotype;
 
+       prev_state = ist_enter(regs);
+
        rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
        rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
 
@@ -33,6 +37,8 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
        }
 
        add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+       ist_exit(regs, prev_state);
 }
 
 /* Set up machine check reporting for processors with Intel style MCE: */
index 7dc5564d0cdf57c0e7ca8c181f87f3ebb6f6ceb2..bd5d46a32210a15deb8895c37e67e5e0dea9d7c5 100644 (file)
@@ -7,14 +7,19 @@
 #include <linux/types.h>
 
 #include <asm/processor.h>
+#include <asm/traps.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
 
 /* Machine check handler for WinChip C6: */
 static void winchip_machine_check(struct pt_regs *regs, long error_code)
 {
+       enum ctx_state prev_state = ist_enter(regs);
+
        printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
        add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+       ist_exit(regs, prev_state);
 }
 
 /* Set up machine check reporting on the Winchip C6 series */
index 15c29096136ba5e071c213d1412004740aa1f459..36a83617eb21cc19245794a89c986eba45179d3a 100644 (file)
@@ -552,7 +552,7 @@ static int __init microcode_init(void)
        int error;
 
        if (paravirt_enabled() || dis_ucode_ldr)
-               return 0;
+               return -EINVAL;
 
        if (c->x86_vendor == X86_VENDOR_INTEL)
                microcode_ops = init_intel_microcode();
index 944bf019b74f425e06cc465358d25b85741a5b47..498b6d967138b1fff29659e81813c77e70ff1f58 100644 (file)
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 55: /* 22nm Atom "Silvermont"                */
+       case 76: /* 14nm Atom "Airmont"                   */
        case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
index 6e434f8e5fc8a34ea8f098280c9c0da3227b834e..c4bb8b8e5017403b25847a97ccce42c96bba3837 100644 (file)
@@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
         * or use ldexp(count, -32).
         * Watts = Joules/Time delta
         */
-       return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
+       return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
 }
 
 static u64 rapl_event_update(struct perf_event *event)
index 10b8d3eaaf15d760468a6ad88105ab7e06cd540b..c635b8b49e931e7926efc3dc96475a8c577958e0 100644 (file)
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        box->phys_id = phys_id;
        box->pci_dev = pdev;
        box->pmu = pmu;
-       uncore_box_init(box);
        pci_set_drvdata(pdev, box);
 
        raw_spin_lock(&uncore_box_lock);
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
                        pmu = &type->pmus[j];
                        box = *per_cpu_ptr(pmu->box, cpu);
                        /* called by uncore_cpu_init? */
-                       if (box && box->phys_id >= 0) {
-                               uncore_box_init(box);
+                       if (box && box->phys_id >= 0)
                                continue;
-                       }
 
                        for_each_online_cpu(k) {
                                exist = *per_cpu_ptr(pmu->box, k);
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
                                }
                        }
 
-                       if (box) {
+                       if (box)
                                box->phys_id = phys_id;
-                               uncore_box_init(box);
-                       }
                }
        }
        return 0;
index 863d9b02563e596cd6bc04005546a383179175bf..6c8c1e7e69d85d3ad217eada0f0e55573c3daaf0 100644 (file)
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
        return box->pmu->type->num_counters;
 }
 
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+               if (box->pmu->type->ops->init_box)
+                       box->pmu->type->ops->init_box(box);
+       }
+}
+
 static inline void uncore_disable_box(struct intel_uncore_box *box)
 {
        if (box->pmu->type->ops->disable_box)
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
 
 static inline void uncore_enable_box(struct intel_uncore_box *box)
 {
+       uncore_box_init(box);
+
        if (box->pmu->type->ops->enable_box)
                box->pmu->type->ops->enable_box(box);
 }
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
        return box->pmu->type->ops->read_counter(box, event);
 }
 
-static inline void uncore_box_init(struct intel_uncore_box *box)
-{
-       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
-               if (box->pmu->type->ops->init_box)
-                       box->pmu->type->ops->init_box(box);
-       }
-}
-
 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 {
        return (box->phys_id < 0);
index 9ebaf63ba18212559728664d4c69385e7321d4f2..db13655c3a2aff4a4475a9adf6ce1cb5b3639220 100644 (file)
@@ -143,7 +143,8 @@ ENDPROC(native_usergs_sysret64)
        movq \tmp,RSP+\offset(%rsp)
        movq $__USER_DS,SS+\offset(%rsp)
        movq $__USER_CS,CS+\offset(%rsp)
-       movq $-1,RCX+\offset(%rsp)
+       movq RIP+\offset(%rsp),\tmp  /* get rip */
+       movq \tmp,RCX+\offset(%rsp)  /* copy it to rcx as sysret would do */
        movq R11+\offset(%rsp),\tmp  /* get eflags */
        movq \tmp,EFLAGS+\offset(%rsp)
        .endm
@@ -155,27 +156,6 @@ ENDPROC(native_usergs_sysret64)
        movq \tmp,R11+\offset(%rsp)
        .endm
 
-       .macro FAKE_STACK_FRAME child_rip
-       /* push in order ss, rsp, eflags, cs, rip */
-       xorl %eax, %eax
-       pushq_cfi $__KERNEL_DS /* ss */
-       /*CFI_REL_OFFSET        ss,0*/
-       pushq_cfi %rax /* rsp */
-       CFI_REL_OFFSET  rsp,0
-       pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
-       /*CFI_REL_OFFSET        rflags,0*/
-       pushq_cfi $__KERNEL_CS /* cs */
-       /*CFI_REL_OFFSET        cs,0*/
-       pushq_cfi \child_rip /* rip */
-       CFI_REL_OFFSET  rip,0
-       pushq_cfi %rax /* orig rax */
-       .endm
-
-       .macro UNFAKE_STACK_FRAME
-       addq $8*6, %rsp
-       CFI_ADJUST_CFA_OFFSET   -(6*8)
-       .endm
-
 /*
  * initial frame state for interrupts (and exceptions without error code)
  */
@@ -238,51 +218,6 @@ ENDPROC(native_usergs_sysret64)
        CFI_REL_OFFSET r15, R15+\offset
        .endm
 
-/* save partial stack frame */
-       .macro SAVE_ARGS_IRQ
-       cld
-       /* start from rbp in pt_regs and jump over */
-       movq_cfi rdi, (RDI-RBP)
-       movq_cfi rsi, (RSI-RBP)
-       movq_cfi rdx, (RDX-RBP)
-       movq_cfi rcx, (RCX-RBP)
-       movq_cfi rax, (RAX-RBP)
-       movq_cfi  r8,  (R8-RBP)
-       movq_cfi  r9,  (R9-RBP)
-       movq_cfi r10, (R10-RBP)
-       movq_cfi r11, (R11-RBP)
-
-       /* Save rbp so that we can unwind from get_irq_regs() */
-       movq_cfi rbp, 0
-
-       /* Save previous stack value */
-       movq %rsp, %rsi
-
-       leaq -RBP(%rsp),%rdi    /* arg1 for handler */
-       testl $3, CS-RBP(%rsi)
-       je 1f
-       SWAPGS
-       /*
-        * irq_count is used to check if a CPU is already on an interrupt stack
-        * or not. While this is essentially redundant with preempt_count it is
-        * a little cheaper to use a separate counter in the PDA (short of
-        * moving irq_enter into assembly, which would be too much work)
-        */
-1:     incl PER_CPU_VAR(irq_count)
-       cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
-       CFI_DEF_CFA_REGISTER    rsi
-
-       /* Store previous stack value */
-       pushq %rsi
-       CFI_ESCAPE      0x0f /* DW_CFA_def_cfa_expression */, 6, \
-                       0x77 /* DW_OP_breg7 */, 0, \
-                       0x06 /* DW_OP_deref */, \
-                       0x08 /* DW_OP_const1u */, SS+8-RBP, \
-                       0x22 /* DW_OP_plus */
-       /* We entered an interrupt context - irqs are off: */
-       TRACE_IRQS_OFF
-       .endm
-
 ENTRY(save_paranoid)
        XCPT_FRAME 1 RDI+8
        cld
@@ -426,15 +361,12 @@ system_call_fastpath:
  * Has incomplete stack frame and undefined top of stack.
  */
 ret_from_sys_call:
-       movl $_TIF_ALLWORK_MASK,%edi
-       /* edi: flagmask */
-sysret_check:
+       testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
+       jnz int_ret_from_sys_call_fixup /* Go the the slow path */
+
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
-       andl %edi,%edx
-       jnz  sysret_careful
        CFI_REMEMBER_STATE
        /*
         * sysretq will re-enable interrupts:
@@ -448,49 +380,10 @@ sysret_check:
        USERGS_SYSRET64
 
        CFI_RESTORE_STATE
-       /* Handle reschedules */
-       /* edx: work, edi: workmask */
-sysret_careful:
-       bt $TIF_NEED_RESCHED,%edx
-       jnc sysret_signal
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq_cfi %rdi
-       SCHEDULE_USER
-       popq_cfi %rdi
-       jmp sysret_check
 
-       /* Handle a signal */
-sysret_signal:
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-#ifdef CONFIG_AUDITSYSCALL
-       bt $TIF_SYSCALL_AUDIT,%edx
-       jc sysret_audit
-#endif
-       /*
-        * We have a signal, or exit tracing or single-step.
-        * These all wind up with the iret return path anyway,
-        * so just join that path right now.
-        */
+int_ret_from_sys_call_fixup:
        FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
-       jmp int_check_syscall_exit_work
-
-#ifdef CONFIG_AUDITSYSCALL
-       /*
-        * Return fast path for syscall audit.  Call __audit_syscall_exit()
-        * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
-        * masked off.
-        */
-sysret_audit:
-       movq RAX-ARGOFFSET(%rsp),%rsi   /* second arg, syscall return value */
-       cmpq $-MAX_ERRNO,%rsi   /* is it < -MAX_ERRNO? */
-       setbe %al               /* 1 if so, 0 if not */
-       movzbl %al,%edi         /* zero-extend that into %edi */
-       call __audit_syscall_exit
-       movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
-       jmp sysret_check
-#endif /* CONFIG_AUDITSYSCALL */
+       jmp int_ret_from_sys_call
 
        /* Do syscall tracing */
 tracesys:
@@ -626,19 +519,6 @@ END(\label)
        FORK_LIKE  vfork
        FIXED_FRAME stub_iopl, sys_iopl
 
-ENTRY(ptregscall_common)
-       DEFAULT_FRAME 1 8       /* offset 8: return address */
-       RESTORE_TOP_OF_STACK %r11, 8
-       movq_cfi_restore R15+8, r15
-       movq_cfi_restore R14+8, r14
-       movq_cfi_restore R13+8, r13
-       movq_cfi_restore R12+8, r12
-       movq_cfi_restore RBP+8, rbp
-       movq_cfi_restore RBX+8, rbx
-       ret $REST_SKIP          /* pop extended registers */
-       CFI_ENDPROC
-END(ptregscall_common)
-
 ENTRY(stub_execve)
        CFI_STARTPROC
        addq $8, %rsp
@@ -779,7 +659,48 @@ END(interrupt)
        /* reserve pt_regs for scratch regs and rbp */
        subq $ORIG_RAX-RBP, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
-       SAVE_ARGS_IRQ
+       cld
+       /* start from rbp in pt_regs and jump over */
+       movq_cfi rdi, (RDI-RBP)
+       movq_cfi rsi, (RSI-RBP)
+       movq_cfi rdx, (RDX-RBP)
+       movq_cfi rcx, (RCX-RBP)
+       movq_cfi rax, (RAX-RBP)
+       movq_cfi  r8,  (R8-RBP)
+       movq_cfi  r9,  (R9-RBP)
+       movq_cfi r10, (R10-RBP)
+       movq_cfi r11, (R11-RBP)
+
+       /* Save rbp so that we can unwind from get_irq_regs() */
+       movq_cfi rbp, 0
+
+       /* Save previous stack value */
+       movq %rsp, %rsi
+
+       leaq -RBP(%rsp),%rdi    /* arg1 for handler */
+       testl $3, CS-RBP(%rsi)
+       je 1f
+       SWAPGS
+       /*
+        * irq_count is used to check if a CPU is already on an interrupt stack
+        * or not. While this is essentially redundant with preempt_count it is
+        * a little cheaper to use a separate counter in the PDA (short of
+        * moving irq_enter into assembly, which would be too much work)
+        */
+1:     incl PER_CPU_VAR(irq_count)
+       cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
+       CFI_DEF_CFA_REGISTER    rsi
+
+       /* Store previous stack value */
+       pushq %rsi
+       CFI_ESCAPE      0x0f /* DW_CFA_def_cfa_expression */, 6, \
+                       0x77 /* DW_OP_breg7 */, 0, \
+                       0x06 /* DW_OP_deref */, \
+                       0x08 /* DW_OP_const1u */, SS+8-RBP, \
+                       0x22 /* DW_OP_plus */
+       /* We entered an interrupt context - irqs are off: */
+       TRACE_IRQS_OFF
+
        call \func
        .endm
 
@@ -831,6 +752,60 @@ retint_swapgs:             /* return to user-space */
         */
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_IRETQ
+
+       /*
+        * Try to use SYSRET instead of IRET if we're returning to
+        * a completely clean 64-bit userspace context.
+        */
+       movq (RCX-R11)(%rsp), %rcx
+       cmpq %rcx,(RIP-R11)(%rsp)               /* RCX == RIP */
+       jne opportunistic_sysret_failed
+
+       /*
+        * On Intel CPUs, sysret with non-canonical RCX/RIP will #GP
+        * in kernel space.  This essentially lets the user take over
+        * the kernel, since userspace controls RSP.  It's not worth
+        * testing for canonicalness exactly -- this check detects any
+        * of the 17 high bits set, which is true for non-canonical
+        * or kernel addresses.  (This will pessimize vsyscall=native.
+        * Big deal.)
+        *
+        * If virtual addresses ever become wider, this will need
+        * to be updated to remain correct on both old and new CPUs.
+        */
+       .ifne __VIRTUAL_MASK_SHIFT - 47
+       .error "virtual address width changed -- sysret checks need update"
+       .endif
+       shr $__VIRTUAL_MASK_SHIFT, %rcx
+       jnz opportunistic_sysret_failed
+
+       cmpq $__USER_CS,(CS-R11)(%rsp)          /* CS must match SYSRET */
+       jne opportunistic_sysret_failed
+
+       movq (R11-ARGOFFSET)(%rsp), %r11
+       cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp)      /* R11 == RFLAGS */
+       jne opportunistic_sysret_failed
+
+       testq $X86_EFLAGS_RF,%r11               /* sysret can't restore RF */
+       jnz opportunistic_sysret_failed
+
+       /* nothing to check for RSP */
+
+       cmpq $__USER_DS,(SS-ARGOFFSET)(%rsp)    /* SS must match SYSRET */
+       jne opportunistic_sysret_failed
+
+       /*
+        * We win!  This label is here just for ease of understanding
+        * perf profiles.  Nothing jumps here.
+        */
+irq_return_via_sysret:
+       CFI_REMEMBER_STATE
+       RESTORE_ARGS 1,8,1
+       movq (RSP-RIP)(%rsp),%rsp
+       USERGS_SYSRET64
+       CFI_RESTORE_STATE
+
+opportunistic_sysret_failed:
        SWAPGS
        jmp restore_args
 
@@ -1048,6 +1023,11 @@ ENTRY(\sym)
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
 
        .if \paranoid
+       .if \paranoid == 1
+       CFI_REMEMBER_STATE
+       testl $3, CS(%rsp)              /* If coming from userspace, switch */
+       jnz 1f                          /* stacks. */
+       .endif
        call save_paranoid
        .else
        call error_entry
@@ -1088,6 +1068,36 @@ ENTRY(\sym)
        jmp error_exit                  /* %ebx: no swapgs flag */
        .endif
 
+       .if \paranoid == 1
+       CFI_RESTORE_STATE
+       /*
+        * Paranoid entry from userspace.  Switch stacks and treat it
+        * as a normal entry.  This means that paranoid handlers
+        * run in real process context if user_mode(regs).
+        */
+1:
+       call error_entry
+
+       DEFAULT_FRAME 0
+
+       movq %rsp,%rdi                  /* pt_regs pointer */
+       call sync_regs
+       movq %rax,%rsp                  /* switch stack */
+
+       movq %rsp,%rdi                  /* pt_regs pointer */
+
+       .if \has_error_code
+       movq ORIG_RAX(%rsp),%rsi        /* get error code */
+       movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
+       .else
+       xorl %esi,%esi                  /* no error code */
+       .endif
+
+       call \do_sym
+
+       jmp error_exit                  /* %ebx: no swapgs flag */
+       .endif
+
        CFI_ENDPROC
 END(\sym)
 .endm
@@ -1108,7 +1118,7 @@ idtentry overflow do_overflow has_error_code=0
 idtentry bounds do_bounds has_error_code=0
 idtentry invalid_op do_invalid_op has_error_code=0
 idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault do_double_fault has_error_code=1 paranoid=1
+idtentry double_fault do_double_fault has_error_code=1 paranoid=2
 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
 idtentry invalid_TSS do_invalid_TSS has_error_code=1
 idtentry segment_not_present do_segment_not_present has_error_code=1
@@ -1289,16 +1299,14 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(
 #endif
 
        /*
-        * "Paranoid" exit path from exception stack.
-        * Paranoid because this is used by NMIs and cannot take
-        * any kernel state for granted.
-        * We don't do kernel preemption checks here, because only
-        * NMI should be common and it does not enable IRQs and
-        * cannot get reschedule ticks.
+        * "Paranoid" exit path from exception stack.  This is invoked
+        * only on return from non-NMI IST interrupts that came
+        * from kernel space.
         *
-        * "trace" is 0 for the NMI handler only, because irq-tracing
-        * is fundamentally NMI-unsafe. (we cannot change the soft and
-        * hard flags at once, atomically)
+        * We may be returning to very strange contexts (e.g. very early
+        * in syscall entry), so checking for preemption here would
+        * be complicated.  Fortunately, we there's no good reason
+        * to try to handle preemption here.
         */
 
        /* ebx: no swapgs flag */
@@ -1308,43 +1316,14 @@ ENTRY(paranoid_exit)
        TRACE_IRQS_OFF_DEBUG
        testl %ebx,%ebx                         /* swapgs needed? */
        jnz paranoid_restore
-       testl $3,CS(%rsp)
-       jnz   paranoid_userspace
-paranoid_swapgs:
        TRACE_IRQS_IRETQ 0
        SWAPGS_UNSAFE_STACK
        RESTORE_ALL 8
-       jmp irq_return
+       INTERRUPT_RETURN
 paranoid_restore:
        TRACE_IRQS_IRETQ_DEBUG 0
        RESTORE_ALL 8
-       jmp irq_return
-paranoid_userspace:
-       GET_THREAD_INFO(%rcx)
-       movl TI_flags(%rcx),%ebx
-       andl $_TIF_WORK_MASK,%ebx
-       jz paranoid_swapgs
-       movq %rsp,%rdi                  /* &pt_regs */
-       call sync_regs
-       movq %rax,%rsp                  /* switch stack for scheduling */
-       testl $_TIF_NEED_RESCHED,%ebx
-       jnz paranoid_schedule
-       movl %ebx,%edx                  /* arg3: thread flags */
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       xorl %esi,%esi                  /* arg2: oldset */
-       movq %rsp,%rdi                  /* arg1: &pt_regs */
-       call do_notify_resume
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       jmp paranoid_userspace
-paranoid_schedule:
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_ANY)
-       SCHEDULE_USER
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       TRACE_IRQS_OFF
-       jmp paranoid_userspace
+       INTERRUPT_RETURN
        CFI_ENDPROC
 END(paranoid_exit)
 
index 3d5fb509bdebc300af21de5079b7ed61c16a54e4..7114ba220fd45d64cb24d86f9a3dd5c3bc65bceb 100644 (file)
@@ -126,6 +126,8 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
        *dr7 |= encode_dr7(i, info->len, info->type);
 
        set_debugreg(*dr7, 7);
+       if (info->mask)
+               set_dr_addr_mask(info->mask, i);
 
        return 0;
 }
@@ -161,29 +163,8 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
        *dr7 &= ~__encode_dr7(i, info->len, info->type);
 
        set_debugreg(*dr7, 7);
-}
-
-static int get_hbp_len(u8 hbp_len)
-{
-       unsigned int len_in_bytes = 0;
-
-       switch (hbp_len) {
-       case X86_BREAKPOINT_LEN_1:
-               len_in_bytes = 1;
-               break;
-       case X86_BREAKPOINT_LEN_2:
-               len_in_bytes = 2;
-               break;
-       case X86_BREAKPOINT_LEN_4:
-               len_in_bytes = 4;
-               break;
-#ifdef CONFIG_X86_64
-       case X86_BREAKPOINT_LEN_8:
-               len_in_bytes = 8;
-               break;
-#endif
-       }
-       return len_in_bytes;
+       if (info->mask)
+               set_dr_addr_mask(0, i);
 }
 
 /*
@@ -196,7 +177,7 @@ int arch_check_bp_in_kernelspace(struct perf_event *bp)
        struct arch_hw_breakpoint *info = counter_arch_bp(bp);
 
        va = info->address;
-       len = get_hbp_len(info->len);
+       len = bp->attr.bp_len;
 
        return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
 }
@@ -277,6 +258,8 @@ static int arch_build_bp_info(struct perf_event *bp)
        }
 
        /* Len */
+       info->mask = 0;
+
        switch (bp->attr.bp_len) {
        case HW_BREAKPOINT_LEN_1:
                info->len = X86_BREAKPOINT_LEN_1;
@@ -293,11 +276,17 @@ static int arch_build_bp_info(struct perf_event *bp)
                break;
 #endif
        default:
-               return -EINVAL;
+               if (!is_power_of_2(bp->attr.bp_len))
+                       return -EINVAL;
+               if (!cpu_has_bpext)
+                       return -EOPNOTSUPP;
+               info->mask = bp->attr.bp_len - 1;
+               info->len = X86_BREAKPOINT_LEN_1;
        }
 
        return 0;
 }
+
 /*
  * Validate the arch-specific HW Breakpoint register settings
  */
@@ -312,11 +301,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        if (ret)
                return ret;
 
-       ret = -EINVAL;
-
        switch (info->len) {
        case X86_BREAKPOINT_LEN_1:
                align = 0;
+               if (info->mask)
+                       align = info->mask;
                break;
        case X86_BREAKPOINT_LEN_2:
                align = 1;
@@ -330,7 +319,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                break;
 #endif
        default:
-               return ret;
+               WARN_ON_ONCE(1);
        }
 
        /*
index 63ce838e5a5423ad3f425368f1c5adffb8c8356e..28d28f5eb8f49c2a9b8f84997e1dd0dc8841ece0 100644 (file)
@@ -69,16 +69,9 @@ static void call_on_stack(void *func, void *stack)
                     : "memory", "cc", "edx", "ecx", "eax");
 }
 
-/* how to get the current stack pointer from C */
-#define current_stack_pointer ({               \
-       unsigned long sp;                       \
-       asm("mov %%esp,%0" : "=g" (sp));        \
-       sp;                                     \
-})
-
 static inline void *current_stack(void)
 {
-       return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
+       return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
 }
 
 static inline int
@@ -103,7 +96,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
 
        /* Save the next esp at the bottom of the stack */
        prev_esp = (u32 *)irqstk;
-       *prev_esp = current_stack_pointer;
+       *prev_esp = current_stack_pointer();
 
        if (unlikely(overflow))
                call_on_stack(print_stack_overflow, isp);
@@ -156,7 +149,7 @@ void do_softirq_own_stack(void)
 
        /* Push the previous esp onto the stack */
        prev_esp = (u32 *)irqstk;
-       *prev_esp = current_stack_pointer;
+       *prev_esp = current_stack_pointer();
 
        call_on_stack(__do_softirq, isp);
 }
index ca9622a25e95a6fa27e668df031dff95a2d8f253..fe3dbfe0c4a5ee9b1fd15aae1f75e6c38952a19d 100644 (file)
@@ -170,7 +170,7 @@ static struct platform_device rtc_device = {
 static __init int add_rtc_cmos(void)
 {
 #ifdef CONFIG_PNP
-       static const char * const  const ids[] __initconst =
+       static const char * const ids[] __initconst =
            { "PNP0b00", "PNP0b01", "PNP0b02", };
        struct pnp_dev *dev;
        struct pnp_id *id;
index ed37a768d0fc03dae75cda0b3402dd73202b3c96..2a33c8f68319436a1e92f8bf654f6fcaa852764b 100644 (file)
@@ -740,12 +740,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
 {
        user_exit();
 
-#ifdef CONFIG_X86_MCE
-       /* notify userspace of pending MCEs */
-       if (thread_info_flags & _TIF_MCE_NOTIFY)
-               mce_notify_process();
-#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
-
        if (thread_info_flags & _TIF_UPROBE)
                uprobe_notify_resume(regs);
 
index 6d7022c683e31555967f20edfc18b490576bc10b..febc6aabc72e049443f68c167622d50cd8344f16 100644 (file)
@@ -73,7 +73,6 @@
 #include <asm/setup.h>
 #include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
-#include <asm/smpboot_hooks.h>
 #include <asm/i8259.h>
 #include <asm/realmode.h>
 #include <asm/misc.h>
@@ -104,6 +103,43 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
 
 atomic_t init_deasserted;
 
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&rtc_lock, flags);
+       CMOS_WRITE(0xa, 0xf);
+       spin_unlock_irqrestore(&rtc_lock, flags);
+       local_flush_tlb();
+       pr_debug("1.\n");
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+                                                       start_eip >> 4;
+       pr_debug("2.\n");
+       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+                                                       start_eip & 0xf;
+       pr_debug("3.\n");
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+       unsigned long flags;
+
+       /*
+        * Install writable page 0 entry to set BIOS data area.
+        */
+       local_flush_tlb();
+
+       /*
+        * Paranoid:  Set warm reset code and vector here back
+        * to default values.
+        */
+       spin_lock_irqsave(&rtc_lock, flags);
+       CMOS_WRITE(0, 0xf);
+       spin_unlock_irqrestore(&rtc_lock, flags);
+
+       *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
+}
+
 /*
  * Report back to the Boot Processor during boot time or to the caller processor
  * during CPU online.
@@ -136,8 +172,7 @@ static void smp_callin(void)
         * CPU, first the APIC. (this is probably redundant on most
         * boards)
         */
-       setup_local_APIC();
-       end_local_APIC_setup();
+       apic_ap_setup();
 
        /*
         * Need to setup vector mappings before we enable interrupts.
@@ -955,9 +990,12 @@ void arch_disable_smp_support(void)
  */
 static __init void disable_smp(void)
 {
+       pr_info("SMP disabled\n");
+
+       disable_ioapic_support();
+
        init_cpu_present(cpumask_of(0));
        init_cpu_possible(cpumask_of(0));
-       smpboot_clear_io_apic_irqs();
 
        if (smp_found_config)
                physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
@@ -967,6 +1005,13 @@ static __init void disable_smp(void)
        cpumask_set_cpu(0, cpu_core_mask(0));
 }
 
+enum {
+       SMP_OK,
+       SMP_NO_CONFIG,
+       SMP_NO_APIC,
+       SMP_FORCE_UP,
+};
+
 /*
  * Various sanity checks.
  */
@@ -1014,10 +1059,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
        if (!smp_found_config && !acpi_lapic) {
                preempt_enable();
                pr_notice("SMP motherboard not detected\n");
-               disable_smp();
-               if (APIC_init_uniprocessor())
-                       pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
-               return -1;
+               return SMP_NO_CONFIG;
        }
 
        /*
@@ -1041,9 +1083,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
                                boot_cpu_physical_apicid);
                        pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
                }
-               smpboot_clear_io_apic();
-               disable_ioapic_support();
-               return -1;
+               return SMP_NO_APIC;
        }
 
        verify_local_APIC();
@@ -1053,15 +1093,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
         */
        if (!max_cpus) {
                pr_info("SMP mode deactivated\n");
-               smpboot_clear_io_apic();
-
-               connect_bsp_APIC();
-               setup_local_APIC();
-               bsp_end_local_APIC_setup();
-               return -1;
+               return SMP_FORCE_UP;
        }
 
-       return 0;
+       return SMP_OK;
 }
 
 static void __init smp_cpu_index_default(void)
@@ -1101,10 +1136,21 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
-       if (smp_sanity_check(max_cpus) < 0) {
-               pr_info("SMP disabled\n");
+       switch (smp_sanity_check(max_cpus)) {
+       case SMP_NO_CONFIG:
                disable_smp();
+               if (APIC_init_uniprocessor())
+                       pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
                return;
+       case SMP_NO_APIC:
+               disable_smp();
+               return;
+       case SMP_FORCE_UP:
+               disable_smp();
+               apic_bsp_setup(false);
+               return;
+       case SMP_OK:
+               break;
        }
 
        default_setup_apic_routing();
@@ -1115,33 +1161,10 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
                /* Or can we switch back to PIC here? */
        }
 
-       connect_bsp_APIC();
-
-       /*
-        * Switch from PIC to APIC mode.
-        */
-       setup_local_APIC();
-
-       if (x2apic_mode)
-               cpu0_logical_apicid = apic_read(APIC_LDR);
-       else
-               cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
-
-       /*
-        * Enable IO APIC before setting up error vector
-        */
-       if (!skip_ioapic_setup && nr_ioapics)
-               enable_IO_APIC();
-
-       bsp_end_local_APIC_setup();
-       smpboot_setup_io_apic();
-       /*
-        * Set up local APIC timer on boot CPU.
-        */
+       cpu0_logical_apicid = apic_bsp_setup(false);
 
        pr_info("CPU%d: ", 0);
        print_cpu_info(&cpu_data(0));
-       x86_init.timers.setup_percpu_clockev();
 
        if (is_uv_system())
                uv_system_init();
@@ -1177,9 +1200,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
 
        nmi_selftest();
        impress_friends();
-#ifdef CONFIG_X86_IO_APIC
        setup_ioapic_dest();
-#endif
        mtrr_aps_init();
 }
 
index 88900e288021f23a2f22aebf739e25070f456971..c74f2f5652da7ad99f8d53e844c4646088d428e7 100644 (file)
@@ -108,6 +108,88 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
        preempt_count_dec();
 }
 
+enum ctx_state ist_enter(struct pt_regs *regs)
+{
+       enum ctx_state prev_state;
+
+       if (user_mode_vm(regs)) {
+               /* Other than that, we're just an exception. */
+               prev_state = exception_enter();
+       } else {
+               /*
+                * We might have interrupted pretty much anything.  In
+                * fact, if we're a machine check, we can even interrupt
+                * NMI processing.  We don't want in_nmi() to return true,
+                * but we need to notify RCU.
+                */
+               rcu_nmi_enter();
+               prev_state = IN_KERNEL;  /* the value is irrelevant. */
+       }
+
+       /*
+        * We are atomic because we're on the IST stack (or we're on x86_32,
+        * in which case we still shouldn't schedule).
+        *
+        * This must be after exception_enter(), because exception_enter()
+        * won't do anything if in_interrupt() returns true.
+        */
+       preempt_count_add(HARDIRQ_OFFSET);
+
+       /* This code is a bit fragile.  Test it. */
+       rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work");
+
+       return prev_state;
+}
+
+void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
+{
+       /* Must be before exception_exit. */
+       preempt_count_sub(HARDIRQ_OFFSET);
+
+       if (user_mode_vm(regs))
+               return exception_exit(prev_state);
+       else
+               rcu_nmi_exit();
+}
+
+/**
+ * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
+ * @regs:      regs passed to the IST exception handler
+ *
+ * IST exception handlers normally cannot schedule.  As a special
+ * exception, if the exception interrupted userspace code (i.e.
+ * user_mode_vm(regs) would return true) and the exception was not
+ * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
+ * begins a non-atomic section within an ist_enter()/ist_exit() region.
+ * Callers are responsible for enabling interrupts themselves inside
+ * the non-atomic section, and callers must call is_end_non_atomic()
+ * before ist_exit().
+ */
+void ist_begin_non_atomic(struct pt_regs *regs)
+{
+       BUG_ON(!user_mode_vm(regs));
+
+       /*
+        * Sanity check: we need to be on the normal thread stack.  This
+        * will catch asm bugs and any attempt to use ist_preempt_enable
+        * from double_fault.
+        */
+       BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack))
+               & ~(THREAD_SIZE - 1)) != 0);
+
+       preempt_count_sub(HARDIRQ_OFFSET);
+}
+
+/**
+ * ist_end_non_atomic() - begin a non-atomic section in an IST exception
+ *
+ * Ends a non-atomic section started with ist_begin_non_atomic().
+ */
+void ist_end_non_atomic(void)
+{
+       preempt_count_add(HARDIRQ_OFFSET);
+}
+
 static nokprobe_inline int
 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
                  struct pt_regs *regs, long error_code)
@@ -251,6 +333,8 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
         * end up promoting it to a doublefault.  In that case, modify
         * the stack to make it look like we just entered the #GP
         * handler from user space, similar to bad_iret.
+        *
+        * No need for ist_enter here because we don't use RCU.
         */
        if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
                regs->cs == __KERNEL_CS &&
@@ -263,12 +347,12 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
                normal_regs->orig_ax = 0;  /* Missing (lost) #GP error code */
                regs->ip = (unsigned long)general_protection;
                regs->sp = (unsigned long)&normal_regs->orig_ax;
+
                return;
        }
 #endif
 
-       exception_enter();
-       /* Return not checked because double check cannot be ignored */
+       ist_enter(regs);  /* Discard prev_state because we won't return. */
        notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 
        tsk->thread.error_code = error_code;
@@ -434,7 +518,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
        if (poke_int3_handler(regs))
                return;
 
-       prev_state = exception_enter();
+       prev_state = ist_enter(regs);
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
        if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
                                SIGTRAP) == NOTIFY_STOP)
@@ -460,33 +544,20 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
        preempt_conditional_cli(regs);
        debug_stack_usage_dec();
 exit:
-       exception_exit(prev_state);
+       ist_exit(regs, prev_state);
 }
 NOKPROBE_SYMBOL(do_int3);
 
 #ifdef CONFIG_X86_64
 /*
- * Help handler running on IST stack to switch back to user stack
- * for scheduling or signal handling. The actual stack switch is done in
- * entry.S
+ * Help handler running on IST stack to switch off the IST stack if the
+ * interrupted code was in user mode. The actual stack switch is done in
+ * entry_64.S
  */
 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
-       struct pt_regs *regs = eregs;
-       /* Did already sync */
-       if (eregs == (struct pt_regs *)eregs->sp)
-               ;
-       /* Exception from user space */
-       else if (user_mode(eregs))
-               regs = task_pt_regs(current);
-       /*
-        * Exception from kernel and interrupts are enabled. Move to
-        * kernel process stack.
-        */
-       else if (eregs->flags & X86_EFLAGS_IF)
-               regs = (struct pt_regs *)(eregs->sp -= sizeof(struct pt_regs));
-       if (eregs != regs)
-               *regs = *eregs;
+       struct pt_regs *regs = task_pt_regs(current);
+       *regs = *eregs;
        return regs;
 }
 NOKPROBE_SYMBOL(sync_regs);
@@ -554,7 +625,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        unsigned long dr6;
        int si_code;
 
-       prev_state = exception_enter();
+       prev_state = ist_enter(regs);
 
        get_debugreg(dr6, 6);
 
@@ -629,7 +700,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        debug_stack_usage_dec();
 
 exit:
-       exception_exit(prev_state);
+       ist_exit(regs, prev_state);
 }
 NOKPROBE_SYMBOL(do_debug);
 
index f9d16ff56c6b18df942da02b08aa6fc20fbc6068..7dc7ba577ecded7fcc2b8f77b480200f68b6433a 100644 (file)
@@ -40,6 +40,7 @@ config KVM
        select HAVE_KVM_MSI
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_VFIO
+       select SRCU
        ---help---
          Support hosting fully virtualized guest machines using hardware
          virtualization extensions.  You will need a fairly recent
index 4f0c0b954686cbf5e980f761b5b9bd4f7bc2df9a..d52dcf0776ea930df81ded94ed22af0b9d11e48b 100644 (file)
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
                u16 cid, lid;
                u32 ldr, aid;
 
+               if (!kvm_apic_present(vcpu))
+                       continue;
+
                aid = kvm_apic_id(apic);
                ldr = kvm_apic_get_reg(apic, APIC_LDR);
                cid = apic_cluster_id(new, ldr);
index 38dcec403b46ae5b33c5967e6740faa8378e1380..e3ff27a5b6348ffb2dcff6f592abafe48b6b6396 100644 (file)
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
                if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
                             VM_FAULT_HWPOISON_LARGE))
                        do_sigbus(regs, error_code, address, fault);
+               else if (fault & VM_FAULT_SIGSEGV)
+                       bad_area_nosemaphore(regs, error_code, address);
                else
                        BUG();
        }
index 7b20bccf3648dfb0fcb534f0290c023e12a44f9a..2fb384724ebb52d1cf0ba6131b418e81609ca55c 100644 (file)
@@ -448,6 +448,22 @@ static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "ftServer"),
                },
        },
+        {
+                .callback = set_scan_all,
+                .ident = "Stratus/NEC ftServer",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R32"),
+                },
+        },
+        {
+                .callback = set_scan_all,
+                .ident = "Stratus/NEC ftServer",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "Express5800/R31"),
+                },
+        },
        {}
 };
 
index 44b9271580b5b0532bddf121af554cc0ec951779..852aa4c92da027cb07fb64c77c855aaf0877a1da 100644 (file)
@@ -293,7 +293,6 @@ static void mrst_power_off_unused_dev(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
 
 /*
index 5a4affe025e81e39df53f40b750c2234baf52844..09297c8e1fcd3901496d9001e7eb0f7097c622f7 100644 (file)
@@ -205,4 +205,4 @@ $(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
 PHONY += vdso_install $(vdso_img_insttargets)
 vdso_install: $(vdso_img_insttargets) FORCE
 
-clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80*
+clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64*
index b57c4f91f487efdc6b2f3f44fe43cfb12e9a7e0c..9e3571a6535c3b1bbc8535195ee40405fe9c42c0 100644 (file)
@@ -117,6 +117,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 6774a0e698675927be5c78dc34b0087d873b5ebe..1630a20d5dcfa550ebe9c8815927d51b70bd9d56 100644 (file)
 
 static void blk_mq_sysfs_release(struct kobject *kobj)
 {
-       struct request_queue *q;
-
-       q = container_of(kobj, struct request_queue, mq_kobj);
-       free_percpu(q->queue_ctx);
-}
-
-static void blk_mq_ctx_release(struct kobject *kobj)
-{
-       struct blk_mq_ctx *ctx;
-
-       ctx = container_of(kobj, struct blk_mq_ctx, kobj);
-       kobject_put(&ctx->queue->mq_kobj);
-}
-
-static void blk_mq_hctx_release(struct kobject *kobj)
-{
-       struct blk_mq_hw_ctx *hctx;
-
-       hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
-       kfree(hctx);
 }
 
 struct blk_mq_ctx_sysfs_entry {
@@ -338,13 +318,13 @@ static struct kobj_type blk_mq_ktype = {
 static struct kobj_type blk_mq_ctx_ktype = {
        .sysfs_ops      = &blk_mq_sysfs_ops,
        .default_attrs  = default_ctx_attrs,
-       .release        = blk_mq_ctx_release,
+       .release        = blk_mq_sysfs_release,
 };
 
 static struct kobj_type blk_mq_hw_ktype = {
        .sysfs_ops      = &blk_mq_hw_sysfs_ops,
        .default_attrs  = default_hw_ctx_attrs,
-       .release        = blk_mq_hctx_release,
+       .release        = blk_mq_sysfs_release,
 };
 
 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -375,7 +355,6 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
                return ret;
 
        hctx_for_each_ctx(hctx, ctx, i) {
-               kobject_get(&q->mq_kobj);
                ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
                if (ret)
                        break;
index 9ee3b87c44984d336dbd4c82572fd3a4c3d35e90..2390c5541e71fb09c3353004224d76b4c81bddc8 100644 (file)
@@ -1867,6 +1867,27 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
        mutex_unlock(&set->tag_list_lock);
 }
 
+/*
+ * It is the actual release handler for mq, but we do it from
+ * request queue's release handler for avoiding use-after-free
+ * and headache because q->mq_kobj shouldn't have been introduced,
+ * but we can't group ctx/kctx kobj without it.
+ */
+void blk_mq_release(struct request_queue *q)
+{
+       struct blk_mq_hw_ctx *hctx;
+       unsigned int i;
+
+       /* hctx kobj stays in hctx */
+       queue_for_each_hw_ctx(q, hctx, i)
+               kfree(hctx);
+
+       kfree(q->queue_hw_ctx);
+
+       /* ctx kobj stays in queue_ctx */
+       free_percpu(q->queue_ctx);
+}
+
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 {
        struct blk_mq_hw_ctx **hctxs;
@@ -2000,10 +2021,8 @@ void blk_mq_free_queue(struct request_queue *q)
 
        percpu_ref_exit(&q->mq_usage_counter);
 
-       kfree(q->queue_hw_ctx);
        kfree(q->mq_map);
 
-       q->queue_hw_ctx = NULL;
        q->mq_map = NULL;
 
        mutex_lock(&all_q_mutex);
index 4f4f943c22c3d1e907ef2c18224b8635f0057e82..6a48c4c0d8a2a6efb881ea29b772df3bba9d5540 100644 (file)
@@ -62,6 +62,8 @@ extern void blk_mq_sysfs_unregister(struct request_queue *q);
 
 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
 
+void blk_mq_release(struct request_queue *q);
+
 /*
  * Basic implementation of sparser bitmap, allowing the user to spread
  * the bits over more cachelines.
index 935ea2aa0730289a6de653aa28a53a4132ef5368..faaf36ade7ebdc2fdd363f174978bfb5683a4f9a 100644 (file)
@@ -517,6 +517,8 @@ static void blk_release_queue(struct kobject *kobj)
 
        if (!q->mq_ops)
                blk_free_flush_queue(q->fq);
+       else
+               blk_mq_release(q);
 
        blk_trace_shutdown(q);
 
index 694d5a70d6ce16d195301aa634c5ace2b1a81ec2..c70d6e45dc1029a8be0422bd21ca979b97e0365e 100644 (file)
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
 
 source "drivers/platform/Kconfig"
 
-source "drivers/soc/Kconfig"
-
 source "drivers/clk/Kconfig"
 
 source "drivers/hwspinlock/Kconfig"
index 4f3febf8a58954b2ca8ced8c057106529ab1f30b..e75737fd7eefbc80de3dc8731fcbec94ab321f89 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * ACPI support for Intel Lynxpoint LPSS.
  *
- * Copyright (C) 2013, 2014, Intel Corporation
+ * Copyright (C) 2013, Intel Corporation
  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  *          Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
@@ -60,8 +60,6 @@ ACPI_MODULE_NAME("acpi_lpss");
 #define LPSS_CLK_DIVIDER               BIT(2)
 #define LPSS_LTR                       BIT(3)
 #define LPSS_SAVE_CTX                  BIT(4)
-#define LPSS_DEV_PROXY                 BIT(5)
-#define LPSS_PROXY_REQ                 BIT(6)
 
 struct lpss_private_data;
 
@@ -72,10 +70,8 @@ struct lpss_device_desc {
        void (*setup)(struct lpss_private_data *pdata);
 };
 
-static struct device *proxy_device;
-
 static struct lpss_device_desc lpss_dma_desc = {
-       .flags = LPSS_CLK | LPSS_PROXY_REQ,
+       .flags = LPSS_CLK,
 };
 
 struct lpss_private_data {
@@ -150,24 +146,22 @@ static struct lpss_device_desc byt_pwm_dev_desc = {
 };
 
 static struct lpss_device_desc byt_uart_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
-                LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .prv_offset = 0x800,
        .setup = lpss_uart_setup,
 };
 
 static struct lpss_device_desc byt_spi_dev_desc = {
-       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX |
-                LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
        .prv_offset = 0x400,
 };
 
 static struct lpss_device_desc byt_sdio_dev_desc = {
-       .flags = LPSS_CLK | LPSS_DEV_PROXY,
+       .flags = LPSS_CLK,
 };
 
 static struct lpss_device_desc byt_i2c_dev_desc = {
-       .flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_DEV_PROXY,
+       .flags = LPSS_CLK | LPSS_SAVE_CTX,
        .prv_offset = 0x800,
        .setup = byt_i2c_setup,
 };
@@ -374,8 +368,6 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        adev->driver_data = pdata;
        pdev = acpi_create_platform_device(adev);
        if (!IS_ERR_OR_NULL(pdev)) {
-               if (!proxy_device && dev_desc->flags & LPSS_DEV_PROXY)
-                       proxy_device = &pdev->dev;
                return 1;
        }
 
@@ -600,14 +592,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
        if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
                acpi_lpss_save_ctx(dev, pdata);
 
-       ret = acpi_dev_runtime_suspend(dev);
-       if (ret)
-               return ret;
-
-       if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device)
-               return pm_runtime_put_sync_suspend(proxy_device);
-
-       return 0;
+       return acpi_dev_runtime_suspend(dev);
 }
 
 static int acpi_lpss_runtime_resume(struct device *dev)
@@ -615,12 +600,6 @@ static int acpi_lpss_runtime_resume(struct device *dev)
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
        int ret;
 
-       if (pdata->dev_desc->flags & LPSS_PROXY_REQ && proxy_device) {
-               ret = pm_runtime_get_sync(proxy_device);
-               if (ret)
-                       return ret;
-       }
-
        ret = acpi_dev_runtime_resume(dev);
        if (ret)
                return ret;
index 0da5865df5b1b4f092fef5652814e7cc96b8993e..beb8b27d4621a6d9f839065c1296fa8ab67f3032 100644 (file)
@@ -51,9 +51,11 @@ struct regmap_async {
 struct regmap {
        union {
                struct mutex mutex;
-               spinlock_t spinlock;
+               struct {
+                       spinlock_t spinlock;
+                       unsigned long spinlock_flags;
+               };
        };
-       unsigned long spinlock_flags;
        regmap_lock lock;
        regmap_unlock unlock;
        void *lock_arg; /* This is passed to lock/unlock functions */
@@ -233,6 +235,10 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 
 void regmap_async_complete_cb(struct regmap_async *async, int ret);
 
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+                                        const struct regmap_bus *bus,
+                                        const struct regmap_config *config);
+
 extern struct regcache_ops regcache_rbtree_ops;
 extern struct regcache_ops regcache_lzo_ops;
 extern struct regcache_ops regcache_flat_ops;
index e4c45d2299c167c65d9542f395f6f8be50b240d6..8d304e2a943d3c62776267534a13483375d3a200 100644 (file)
@@ -74,8 +74,8 @@ static int regmap_ac97_reg_write(void *context, unsigned int reg,
 }
 
 static const struct regmap_bus ac97_regmap_bus = {
-               .reg_write = regmap_ac97_reg_write,
-               .reg_read = regmap_ac97_reg_read,
+       .reg_write = regmap_ac97_reg_write,
+       .reg_read = regmap_ac97_reg_read,
 };
 
 /**
index 053150a7f9f27ca5dd70e1e341e95b3a4d7d338f..4b76e33110a2d1adb14e661e290f2fddccfe6b42 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 
+#include "internal.h"
 
 static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
                                      unsigned int *val)
@@ -87,6 +88,42 @@ static struct regmap_bus regmap_smbus_word = {
        .reg_read = regmap_smbus_word_reg_read,
 };
 
+static int regmap_smbus_word_read_swapped(void *context, unsigned int reg,
+                                         unsigned int *val)
+{
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+       int ret;
+
+       if (reg > 0xff)
+               return -EINVAL;
+
+       ret = i2c_smbus_read_word_swapped(i2c, reg);
+       if (ret < 0)
+               return ret;
+
+       *val = ret;
+
+       return 0;
+}
+
+static int regmap_smbus_word_write_swapped(void *context, unsigned int reg,
+                                          unsigned int val)
+{
+       struct device *dev = context;
+       struct i2c_client *i2c = to_i2c_client(dev);
+
+       if (val > 0xffff || reg > 0xff)
+               return -EINVAL;
+
+       return i2c_smbus_write_word_swapped(i2c, reg, val);
+}
+
+static struct regmap_bus regmap_smbus_word_swapped = {
+       .reg_write = regmap_smbus_word_write_swapped,
+       .reg_read = regmap_smbus_word_read_swapped,
+};
+
 static int regmap_i2c_write(void *context, const void *data, size_t count)
 {
        struct device *dev = context;
@@ -180,7 +217,14 @@ static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
        else if (config->val_bits == 16 && config->reg_bits == 8 &&
                 i2c_check_functionality(i2c->adapter,
                                         I2C_FUNC_SMBUS_WORD_DATA))
-               return &regmap_smbus_word;
+               switch (regmap_get_val_endian(&i2c->dev, NULL, config)) {
+               case REGMAP_ENDIAN_LITTLE:
+                       return &regmap_smbus_word;
+               case REGMAP_ENDIAN_BIG:
+                       return &regmap_smbus_word_swapped;
+               default:                /* everything else is not supported */
+                       break;
+               }
        else if (config->val_bits == 8 && config->reg_bits == 8 &&
                 i2c_check_functionality(i2c->adapter,
                                         I2C_FUNC_SMBUS_BYTE_DATA))
index d2f8a818d20068af51a2d30d74d37e7ebea77db2..f99b098ddabfbd23dae3ab3ee7733b9ff24e28ef 100644 (file)
@@ -473,9 +473,9 @@ static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
        return REGMAP_ENDIAN_BIG;
 }
 
-static enum regmap_endian regmap_get_val_endian(struct device *dev,
-                                       const struct regmap_bus *bus,
-                                       const struct regmap_config *config)
+enum regmap_endian regmap_get_val_endian(struct device *dev,
+                                        const struct regmap_bus *bus,
+                                        const struct regmap_config *config)
 {
        struct device_node *np;
        enum regmap_endian endian;
@@ -513,6 +513,7 @@ static enum regmap_endian regmap_get_val_endian(struct device *dev,
        /* Use this if no other value was found */
        return REGMAP_ENDIAN_BIG;
 }
+EXPORT_SYMBOL_GPL(regmap_get_val_endian);
 
 /**
  * regmap_init(): Initialise register map
index 3ec85dfce12496dd64a8ad2c37984ad61f489e25..8a86b62466f7ce72b54853b283e03fd495df8083 100644 (file)
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
  * If an image has a non-zero parent overlap, get a reference to its
  * parent.
  *
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened.  We
- * drop it again if there is no overlap.
- *
  * Returns true if the rbd device has a parent with a non-zero
  * overlap and a reference for it was successfully taken, or
  * false otherwise.
  */
 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
 {
-       int counter;
+       int counter = 0;
 
        if (!rbd_dev->parent_spec)
                return false;
 
-       counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
-       if (counter > 0 && rbd_dev->parent_overlap)
-               return true;
-
-       /* Image was flattened, but parent is not yet torn down */
+       down_read(&rbd_dev->header_rwsem);
+       if (rbd_dev->parent_overlap)
+               counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+       up_read(&rbd_dev->header_rwsem);
 
        if (counter < 0)
                rbd_warn(rbd_dev, "parent reference overflow");
 
-       return false;
+       return counter > 0;
 }
 
 /*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                 */
                if (rbd_dev->parent_overlap) {
                        rbd_dev->parent_overlap = 0;
-                       smp_mb();
                        rbd_dev_parent_put(rbd_dev);
                        pr_info("%s: clone image has been flattened\n",
                                rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
         * treat it specially.
         */
        rbd_dev->parent_overlap = overlap;
-       smp_mb();
        if (!overlap) {
 
                /* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header *header;
 
-       /* Drop parent reference unless it's already been done (or none) */
-
-       if (rbd_dev->parent_overlap)
-               rbd_dev_parent_put(rbd_dev);
+       rbd_dev_parent_put(rbd_dev);
 
        /* Free dynamic fields from the header, then zero it out */
 
index 04645c09fe5e5eee7f6699c451e9e4f6f8368958..9cd6968e2f924bf7eb5c545c4298445651d5665d 100644 (file)
@@ -569,19 +569,19 @@ static void fast_mix(struct fast_pool *f)
        __u32 c = f->pool[2],   d = f->pool[3];
 
        a += b;                 c += d;
-       b = rol32(a, 6);        d = rol32(c, 27);
+       b = rol32(b, 6);        d = rol32(d, 27);
        d ^= a;                 b ^= c;
 
        a += b;                 c += d;
-       b = rol32(a, 16);       d = rol32(c, 14);
+       b = rol32(b, 16);       d = rol32(d, 14);
        d ^= a;                 b ^= c;
 
        a += b;                 c += d;
-       b = rol32(a, 6);        d = rol32(c, 27);
+       b = rol32(b, 6);        d = rol32(d, 27);
        d ^= a;                 b ^= c;
 
        a += b;                 c += d;
-       b = rol32(a, 16);       d = rol32(c, 14);
+       b = rol32(b, 16);       d = rol32(d, 14);
        d ^= a;                 b ^= c;
 
        f->pool[0] = a;  f->pool[1] = b;
index 3f44f292d066f03c2bd3029f4631d3a5183a70de..91f86131bb7aa62b0c4632e2defa1f8b2f4e6abc 100644 (file)
@@ -13,6 +13,7 @@ config COMMON_CLK
        bool
        select HAVE_CLK_PREPARE
        select CLKDEV_LOOKUP
+       select SRCU
        ---help---
          The common clock framework is a single definition of struct
          clk, useful across many platforms, as well as an
index 29b2ef5a68b9318b3791c37e8e1b94c12b6553d6..a171fef2c2b66d0732e01f349f9fa13b01f8af2e 100644 (file)
@@ -2,6 +2,7 @@ menu "CPU Frequency scaling"
 
 config CPU_FREQ
        bool "CPU Frequency scaling"
+       select SRCU
        help
          CPU Frequency scaling allows you to change the clock speed of 
          CPUs on the fly. This is a nice method to save power, because 
index faf4e70c42e0467f072cde73d6cc972ff27509ae..3891f6781298c39aee61e68010bf6f42396def1f 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig PM_DEVFREQ
        bool "Generic Dynamic Voltage and Frequency Scaling (DVFS) support"
+       select SRCU
        help
          A device may have a list of frequencies and voltages available.
          devfreq, a generic DVFS framework can be registered for a device
index da9c316059bc876ba459832ca7ce01513c389897..eea5d7e578c994bd28b04271837d06fe3fee3d69 100644 (file)
@@ -801,9 +801,11 @@ static int mcp230xx_probe(struct i2c_client *client,
                client->irq = irq_of_parse_and_map(client->dev.of_node, 0);
        } else {
                pdata = dev_get_platdata(&client->dev);
-               if (!pdata || !gpio_is_valid(pdata->base)) {
-                       dev_dbg(&client->dev, "invalid platform data\n");
-                       return -EINVAL;
+               if (!pdata) {
+                       pdata = devm_kzalloc(&client->dev,
+                                       sizeof(struct mcp23s08_platform_data),
+                                       GFP_KERNEL);
+                       pdata->base = -1;
                }
        }
 
@@ -924,10 +926,11 @@ static int mcp23s08_probe(struct spi_device *spi)
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
-               if (!pdata || !gpio_is_valid(pdata->base)) {
-                       dev_dbg(&spi->dev,
-                                       "invalid or missing platform data\n");
-                       return -EINVAL;
+               if (!pdata) {
+                       pdata = devm_kzalloc(&spi->dev,
+                                       sizeof(struct mcp23s08_platform_data),
+                                       GFP_KERNEL);
+                       pdata->base = -1;
                }
 
                for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) {
index 30646cfe0efa91e2378fa0e5871dec14cd7ece7d..f476ae2eb0b3c8610e54377cf7e3010079e916bf 100644 (file)
@@ -88,6 +88,8 @@ struct gpio_bank {
 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
 #define LINE_USED(line, offset) (line & (BIT(offset)))
 
+static void omap_gpio_unmask_irq(struct irq_data *d);
+
 static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
 {
        return bank->chip.base + gpio_irq;
@@ -477,6 +479,16 @@ static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
        return readl_relaxed(reg) & mask;
 }
 
+static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned gpio,
+                              unsigned offset)
+{
+       if (!LINE_USED(bank->mod_usage, offset)) {
+               omap_enable_gpio_module(bank, offset);
+               omap_set_gpio_direction(bank, offset, 1);
+       }
+       bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
+}
+
 static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
 {
        struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -506,15 +518,11 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
        spin_lock_irqsave(&bank->lock, flags);
        offset = GPIO_INDEX(bank, gpio);
        retval = omap_set_gpio_triggering(bank, offset, type);
-       if (!LINE_USED(bank->mod_usage, offset)) {
-               omap_enable_gpio_module(bank, offset);
-               omap_set_gpio_direction(bank, offset, 1);
-       } else if (!omap_gpio_is_input(bank, BIT(offset))) {
+       omap_gpio_init_irq(bank, gpio, offset);
+       if (!omap_gpio_is_input(bank, BIT(offset))) {
                spin_unlock_irqrestore(&bank->lock, flags);
                return -EINVAL;
        }
-
-       bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
        spin_unlock_irqrestore(&bank->lock, flags);
 
        if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -792,6 +800,24 @@ exit:
        pm_runtime_put(bank->dev);
 }
 
+static unsigned int omap_gpio_irq_startup(struct irq_data *d)
+{
+       struct gpio_bank *bank = omap_irq_data_get_bank(d);
+       unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
+       unsigned long flags;
+       unsigned offset = GPIO_INDEX(bank, gpio);
+
+       if (!BANK_USED(bank))
+               pm_runtime_get_sync(bank->dev);
+
+       spin_lock_irqsave(&bank->lock, flags);
+       omap_gpio_init_irq(bank, gpio, offset);
+       spin_unlock_irqrestore(&bank->lock, flags);
+       omap_gpio_unmask_irq(d);
+
+       return 0;
+}
+
 static void omap_gpio_irq_shutdown(struct irq_data *d)
 {
        struct gpio_bank *bank = omap_irq_data_get_bank(d);
@@ -1181,6 +1207,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
        if (!irqc)
                return -ENOMEM;
 
+       irqc->irq_startup = omap_gpio_irq_startup,
        irqc->irq_shutdown = omap_gpio_irq_shutdown,
        irqc->irq_ack = omap_gpio_ack_irq,
        irqc->irq_mask = omap_gpio_mask_irq,
index f62aa115d79ab4f9fe7e249dbb146dfdde44153e..7722ed53bd651faae15692621d099551ef9bf308 100644 (file)
@@ -648,6 +648,7 @@ int gpiod_export_link(struct device *dev, const char *name,
                if (tdev != NULL) {
                        status = sysfs_create_link(&dev->kobj, &tdev->kobj,
                                                name);
+                       put_device(tdev);
                } else {
                        status = -ENODEV;
                }
@@ -695,7 +696,7 @@ int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
        }
 
        status = sysfs_set_active_low(desc, dev, value);
-
+       put_device(dev);
 unlock:
        mutex_unlock(&sysfs_lock);
 
index 0d8694f015c1a58a829da1fef863c99a3fba47f5..0fd592799d58dc6fdbd6c37bb5e826ed0f89cf9a 100644 (file)
@@ -822,7 +822,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
         * Unconditionally decrement this counter, regardless of the queue's
         * type.
         */
-       dqm->total_queue_count++;
+       dqm->total_queue_count--;
        pr_debug("Total of %d queues are accountable so far\n",
                        dqm->total_queue_count);
        mutex_unlock(&dqm->lock);
index a8be6df8534753fbaed2103a57a3f054838bc29a..1c385c23dd0b8e2ad4155972d53bc9fe8195d03a 100644 (file)
@@ -95,10 +95,10 @@ static int __init kfd_module_init(void)
        }
 
        /* Verify module parameters */
-       if ((max_num_of_queues_per_device < 0) ||
+       if ((max_num_of_queues_per_device < 1) ||
                (max_num_of_queues_per_device >
                        KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
-               pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
+               pr_err("kfd: max_num_of_queues_per_device must be between 1 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
                return -1;
        }
 
index f37cf5efe642ca23b42fc45de8ca0c69617d9937..2fda1927bff794e7ff626169277d28aff8d1acba 100644 (file)
@@ -315,7 +315,11 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
        BUG_ON(!pqm);
 
        pqn = get_queue_by_qid(pqm, qid);
-       BUG_ON(!pqn);
+       if (!pqn) {
+               pr_debug("amdkfd: No queue %d exists for update operation\n",
+                               qid);
+               return -EFAULT;
+       }
 
        pqn->q->properties.queue_address = p->queue_address;
        pqn->q->properties.queue_size = p->queue_size;
index c2a1cba1e984546d63f033a4cf4b07ff3279bb16..b9140032962d943e658a9bc18af8be8418bc479f 100644 (file)
 #include "cirrus_drv.h"
 
 int cirrus_modeset = -1;
+int cirrus_bpp = 24;
 
 MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
 module_param_named(modeset, cirrus_modeset, int, 0400);
+MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
+module_param_named(bpp, cirrus_bpp, int, 0400);
 
 /*
  * This is the generic driver code. This binds the driver to the drm core,
index 693a4565c4ffb2a0629d48ec206ba3bb4accf830..705061537a27694c3834374a22bb297510a041be 100644 (file)
@@ -262,4 +262,7 @@ static inline void cirrus_bo_unreserve(struct cirrus_bo *bo)
 
 int cirrus_bo_push_sysram(struct cirrus_bo *bo);
 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr);
+
+extern int cirrus_bpp;
+
 #endif                         /* __CIRRUS_DRV_H__ */
index 4c2d68e9102d6304b8fd5cc655266300706f32da..e4b976658087100304cd76f66330c16b9dcb7271 100644 (file)
@@ -320,6 +320,8 @@ bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
        const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
        const int max_size = cdev->mc.vram_size;
 
+       if (bpp > cirrus_bpp)
+               return false;
        if (bpp > 32)
                return false;
 
index 99d4a74ffeaffd2582ca78353ae2156a90c796f5..61385f2298bf752eb87b1645582d62d47719f3be 100644 (file)
@@ -501,8 +501,13 @@ static int cirrus_vga_get_modes(struct drm_connector *connector)
        int count;
 
        /* Just add a static list of modes */
-       count = drm_add_modes_noedid(connector, 1280, 1024);
-       drm_set_preferred_mode(connector, 1024, 768);
+       if (cirrus_bpp <= 24) {
+               count = drm_add_modes_noedid(connector, 1280, 1024);
+               drm_set_preferred_mode(connector, 1024, 768);
+       } else {
+               count = drm_add_modes_noedid(connector, 800, 600);
+               drm_set_preferred_mode(connector, 800, 600);
+       }
        return count;
 }
 
index cf775a4449c1a7b7f6e13bc5a050ea6117ff779c..dc386ebe5193891e6780c7eba47951f3f3fd3e42 100644 (file)
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
 }
 EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 
+static void remove_from_modeset(struct drm_mode_set *set,
+               struct drm_connector *connector)
+{
+       int i, j;
+
+       for (i = 0; i < set->num_connectors; i++) {
+               if (set->connectors[i] == connector)
+                       break;
+       }
+
+       if (i == set->num_connectors)
+               return;
+
+       for (j = i + 1; j < set->num_connectors; j++) {
+               set->connectors[j - 1] = set->connectors[j];
+       }
+       set->num_connectors--;
+
+       /* because i915 is pissy about this..
+        * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+        */
+       if (set->num_connectors == 0)
+               set->fb = NULL;
+}
+
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
                                       struct drm_connector *connector)
 {
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
        }
        fb_helper->connector_count--;
        kfree(fb_helper_connector);
+
+       /* also cleanup dangling references to the connector: */
+       for (i = 0; i < fb_helper->crtc_count; i++)
+               remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
index 574057cd1d0986b6bc96819f67c72ba3f5280891..7643300828c3aef79d32260af5f2c8e4c7ff5b2c 100644 (file)
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
                        } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-                               WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(IS_HSW_ULT(dev));
-                       } else if (IS_BROADWELL(dev)) {
-                               dev_priv->pch_type = PCH_LPT;
-                               dev_priv->pch_id =
-                                       INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
-                               DRM_DEBUG_KMS("This is Broadwell, assuming "
-                                             "LynxPoint LP PCH\n");
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
                        } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-                               WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(!IS_HSW_ULT(dev));
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
                        } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
index e9f891c432f837b8693df84158b0b2cdf715972e..9d7a7155bf02a6f9fb44d504e69e635fefcc9c2e 100644 (file)
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
-                                ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
-                                (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+                                ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
 #define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
index 76354d3ba925795e0377e197e48a42cbe538b93f..5f614828d365555f70005aff470117ab15b3ae12 100644 (file)
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
                u32 size = i915_gem_obj_ggtt_size(obj);
                uint64_t val;
 
+               /* Adjust fence size to match tiled area */
+               if (obj->tiling_mode != I915_TILING_NONE) {
+                       uint32_t row_size = obj->stride *
+                               (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+                       size = (size / row_size) * row_size;
+               }
+
                val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
                val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
        for (i = 0; i < NUM_L3_SLICES(dev); i++)
                i915_gem_l3_remap(&dev_priv->ring[RCS], i);
 
-       /*
-        * XXX: Contexts should only be initialized once. Doing a switch to the
-        * default context switch however is something we'd like to do after
-        * reset or thaw (the latter may not actually be necessary for HW, but
-        * goes with our code better). Context switching requires rings (for
-        * the do_switch), but before enabling PPGTT. So don't move this.
-        */
-       ret = i915_gem_context_enable(dev_priv);
+       ret = i915_ppgtt_init_hw(dev);
        if (ret && ret != -EIO) {
-               DRM_ERROR("Context enable failed %d\n", ret);
+               DRM_ERROR("PPGTT enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
-
-               return ret;
        }
 
-       ret = i915_ppgtt_init_hw(dev);
+       ret = i915_gem_context_enable(dev_priv);
        if (ret && ret != -EIO) {
-               DRM_ERROR("PPGTT enable failed %d\n", ret);
+               DRM_ERROR("Context enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
+
+               return ret;
        }
 
        return ret;
index 4d63839bd9b4c53be99842c38188331f8e7817c7..dfb783a8f2c36e05bc08abfe21a1272c2903a69f 100644 (file)
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
 
        WARN_ON(panel->backlight.max == 0);
 
-       if (panel->backlight.level == 0) {
+       if (panel->backlight.level <= panel->backlight.min) {
                panel->backlight.level = panel->backlight.max;
                if (panel->backlight.device)
                        panel->backlight.device->props.brightness =
index 9e7f23dd14bd5992d73b72ddec32d084ee906aed..87d5fb21cb61cc8709e4f685fa6c3a24ecbb9e9e 100644 (file)
@@ -34,7 +34,8 @@
 
 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
                                    uint64_t saddr, uint64_t daddr,
-                                   int flag, int n)
+                                   int flag, int n,
+                                   struct reservation_object *resv)
 {
        unsigned long start_jiffies;
        unsigned long end_jiffies;
@@ -47,12 +48,12 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
                case RADEON_BENCHMARK_COPY_DMA:
                        fence = radeon_copy_dma(rdev, saddr, daddr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               resv);
                        break;
                case RADEON_BENCHMARK_COPY_BLIT:
                        fence = radeon_copy_blit(rdev, saddr, daddr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                resv);
                        break;
                default:
                        DRM_ERROR("Unknown copy method\n");
@@ -120,7 +121,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
 
        if (rdev->asic->copy.dma) {
                time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
-                                               RADEON_BENCHMARK_COPY_DMA, n);
+                                               RADEON_BENCHMARK_COPY_DMA, n,
+                                               dobj->tbo.resv);
                if (time < 0)
                        goto out_cleanup;
                if (time > 0)
@@ -130,7 +132,8 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
 
        if (rdev->asic->copy.blit) {
                time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
-                                               RADEON_BENCHMARK_COPY_BLIT, n);
+                                               RADEON_BENCHMARK_COPY_BLIT, n,
+                                               dobj->tbo.resv);
                if (time < 0)
                        goto out_cleanup;
                if (time > 0)
index 102116902a070f728c434a8ee67215ede0cffb70..913fafa597ad210180c03e03618002a702cda441 100644 (file)
@@ -960,6 +960,9 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll,
        if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV &&
            pll->flags & RADEON_PLL_USE_REF_DIV)
                ref_div_max = pll->reference_div;
+       else if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP)
+               /* fix for problems on RS880 */
+               ref_div_max = min(pll->max_ref_div, 7u);
        else
                ref_div_max = pll->max_ref_div;
 
index d0b4f7d1140d6a391a9f32c7ec466f099b99b5fe..ac3c1310b953182acb0db6db41add071fd88e737 100644 (file)
@@ -146,7 +146,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
        struct radeon_bo_va *bo_va;
        int r;
 
-       if (rdev->family < CHIP_CAYMAN) {
+       if ((rdev->family < CHIP_CAYMAN) ||
+           (!rdev->accel_working)) {
                return 0;
        }
 
@@ -176,7 +177,8 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
        struct radeon_bo_va *bo_va;
        int r;
 
-       if (rdev->family < CHIP_CAYMAN) {
+       if ((rdev->family < CHIP_CAYMAN) ||
+           (!rdev->accel_working)) {
                return;
        }
 
index 3cf9c1fa64756fb6b4430d5e351f21aee874ad1d..686411e4e4f6a3620289be34106ae5d38c9f6b93 100644 (file)
@@ -605,14 +605,14 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               vm = &fpriv->vm;
-               r = radeon_vm_init(rdev, vm);
-               if (r) {
-                       kfree(fpriv);
-                       return r;
-               }
-
                if (rdev->accel_working) {
+                       vm = &fpriv->vm;
+                       r = radeon_vm_init(rdev, vm);
+                       if (r) {
+                               kfree(fpriv);
+                               return r;
+                       }
+
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
                                radeon_vm_fini(rdev, vm);
@@ -668,9 +668,9 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
                                        radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
+                       radeon_vm_fini(rdev, vm);
                }
 
-               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index 07b506b410080f482b4d41948c3bd627ae4bb39e..791818165c761f7fdc1807b8f0b7e22de0aed24f 100644 (file)
@@ -119,11 +119,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                if (ring == R600_RING_TYPE_DMA_INDEX)
                        fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               vram_obj->tbo.resv);
                else
                        fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                vram_obj->tbo.resv);
                if (IS_ERR(fence)) {
                        DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
                        r = PTR_ERR(fence);
@@ -170,11 +170,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
                if (ring == R600_RING_TYPE_DMA_INDEX)
                        fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
                                                size / RADEON_GPU_PAGE_SIZE,
-                                               NULL);
+                                               vram_obj->tbo.resv);
                else
                        fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
                                                 size / RADEON_GPU_PAGE_SIZE,
-                                                NULL);
+                                                vram_obj->tbo.resv);
                if (IS_ERR(fence)) {
                        DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
                        r = PTR_ERR(fence);
index 06d2246d07f19a086cddced0d1c9ffd32a2f86fd..2a5a4a9e772d6668ee844b94c61219a0c3100340 100644 (file)
@@ -743,9 +743,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
         */
 
        /* NI is optimized for 256KB fragments, SI and newer for 64KB */
-       uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
+       uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) ||
+                              (rdev->family == CHIP_ARUBA)) ?
                        R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
-       uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
+       uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) ||
+                              (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80;
 
        uint64_t frag_start = ALIGN(pe_start, frag_align);
        uint64_t frag_end = pe_end & ~(frag_align - 1);
index a7de26d1ac801383e2ecc57acad47f082c016ce4..d931cbbed24069a072385725f5c1fd454e04acdb 100644 (file)
@@ -1389,6 +1389,7 @@ config SENSORS_ADS1015
 config SENSORS_ADS7828
        tristate "Texas Instruments ADS7828 and compatibles"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for Texas Instruments ADS7828 and
          ADS7830 8-channel A/D converters. ADS7828 resolution is 12-bit, while
@@ -1430,8 +1431,8 @@ config SENSORS_INA2XX
        tristate "Texas Instruments INA219 and compatibles"
        depends on I2C
        help
-         If you say yes here you get support for INA219, INA220, INA226, and
-         INA230 power monitor chips.
+         If you say yes here you get support for INA219, INA220, INA226,
+         INA230, and INA231 power monitor chips.
 
          The INA2xx driver is configured for the default configuration of
          the part as described in the datasheet.
index 13875968c844f03e00791b2d1340e62a547a9905..6cb89c0ebab6df03f7e8b38fc81cecd3136e57de 100644 (file)
@@ -221,7 +221,7 @@ static ssize_t show_min(struct device *dev,
        struct abx500_temp *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 
-       return sprintf(buf, "%ld\n", data->min[attr->index]);
+       return sprintf(buf, "%lu\n", data->min[attr->index]);
 }
 
 static ssize_t show_max(struct device *dev,
@@ -230,7 +230,7 @@ static ssize_t show_max(struct device *dev,
        struct abx500_temp *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 
-       return sprintf(buf, "%ld\n", data->max[attr->index]);
+       return sprintf(buf, "%lu\n", data->max[attr->index]);
 }
 
 static ssize_t show_max_hyst(struct device *dev,
@@ -239,7 +239,7 @@ static ssize_t show_max_hyst(struct device *dev,
        struct abx500_temp *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 
-       return sprintf(buf, "%ld\n", data->max_hyst[attr->index]);
+       return sprintf(buf, "%lu\n", data->max_hyst[attr->index]);
 }
 
 static ssize_t show_min_alarm(struct device *dev,
index f4f9b219bf1619392e203dfb95f897c742e17357..11955467fc0f48a53f4cf776802cd3494820ccca 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
+#include <linux/bitops.h>
 
 /*
  * AD7314 temperature masks
@@ -67,7 +68,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
        switch (spi_get_device_id(chip->spi_dev)->driver_data) {
        case ad7314:
                data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
-               data = (data << 6) >> 6;
+               data = sign_extend32(data, 9);
 
                return sprintf(buf, "%d\n", 250 * data);
        case adt7301:
@@ -78,7 +79,7 @@ static ssize_t ad7314_show_temperature(struct device *dev,
                 * register.  1lsb - 31.25 milli degrees centigrade
                 */
                data = ret & ADT7301_TEMP_MASK;
-               data = (data << 2) >> 2;
+               data = sign_extend32(data, 13);
 
                return sprintf(buf, "%d\n",
                               DIV_ROUND_CLOSEST(data * 3125, 100));
index 0625e50d7a6e524b49cbc8eb9374264dbaf4ff15..ad2b47e403452a230c9f3e57454517a25e63289f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/regulator/consumer.h>
 #include <linux/mutex.h>
+#include <linux/bitops.h>
 
 /* Addresses to scan
  * The chip also supports addresses 0x35..0x37. Don't scan those addresses
@@ -189,7 +190,7 @@ static ssize_t adc128_show_temp(struct device *dev,
        if (IS_ERR(data))
                return PTR_ERR(data);
 
-       temp = (data->temp[index] << 7) >> 7;   /* sign extend */
+       temp = sign_extend32(data->temp[index], 8);
        return sprintf(buf, "%d\n", temp * 500);/* 0.5 degrees C resolution */
 }
 
index a622d40eec1788ca73e138e41a7518aa98049cd5..bce4e9ff21bff76606484f0a04ad1bf52b1ffee2 100644 (file)
 #include <linux/hwmon-sysfs.h>
 #include <linux/i2c.h>
 #include <linux/init.h>
-#include <linux/jiffies.h>
 #include <linux/module.h>
-#include <linux/mutex.h>
 #include <linux/platform_data/ads7828.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 
 /* The ADS7828 registers */
-#define ADS7828_NCH            8       /* 8 channels supported */
 #define ADS7828_CMD_SD_SE      0x80    /* Single ended inputs */
 #define ADS7828_CMD_PD1                0x04    /* Internal vref OFF && A/D ON */
 #define ADS7828_CMD_PD3                0x0C    /* Internal vref ON && A/D ON */
@@ -50,17 +48,9 @@ enum ads7828_chips { ads7828, ads7830 };
 
 /* Client specific data */
 struct ads7828_data {
-       struct i2c_client *client;
-       struct mutex update_lock;       /* Mutex protecting updates */
-       unsigned long last_updated;     /* Last updated time (in jiffies) */
-       u16 adc_input[ADS7828_NCH];     /* ADS7828_NCH samples */
-       bool valid;                     /* Validity flag */
-       bool diff_input;                /* Differential input */
-       bool ext_vref;                  /* External voltage reference */
-       unsigned int vref_mv;           /* voltage reference value */
+       struct regmap *regmap;
        u8 cmd_byte;                    /* Command byte without channel bits */
        unsigned int lsb_resol;         /* Resolution of the ADC sample LSB */
-       s32 (*read_channel)(const struct i2c_client *client, u8 command);
 };
 
 /* Command byte C2,C1,C0 - see datasheet */
@@ -69,42 +59,22 @@ static inline u8 ads7828_cmd_byte(u8 cmd, int ch)
        return cmd | (((ch >> 1) | (ch & 0x01) << 2) << 4);
 }
 
-/* Update data for the device (all 8 channels) */
-static struct ads7828_data *ads7828_update_device(struct device *dev)
-{
-       struct ads7828_data *data = dev_get_drvdata(dev);
-       struct i2c_client *client = data->client;
-
-       mutex_lock(&data->update_lock);
-
-       if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
-                       || !data->valid) {
-               unsigned int ch;
-               dev_dbg(&client->dev, "Starting ads7828 update\n");
-
-               for (ch = 0; ch < ADS7828_NCH; ch++) {
-                       u8 cmd = ads7828_cmd_byte(data->cmd_byte, ch);
-                       data->adc_input[ch] = data->read_channel(client, cmd);
-               }
-               data->last_updated = jiffies;
-               data->valid = true;
-       }
-
-       mutex_unlock(&data->update_lock);
-
-       return data;
-}
-
 /* sysfs callback function */
 static ssize_t ads7828_show_in(struct device *dev, struct device_attribute *da,
                               char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct ads7828_data *data = ads7828_update_device(dev);
-       unsigned int value = DIV_ROUND_CLOSEST(data->adc_input[attr->index] *
-                                              data->lsb_resol, 1000);
+       struct ads7828_data *data = dev_get_drvdata(dev);
+       u8 cmd = ads7828_cmd_byte(data->cmd_byte, attr->index);
+       unsigned int regval;
+       int err;
 
-       return sprintf(buf, "%d\n", value);
+       err = regmap_read(data->regmap, cmd, &regval);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%d\n",
+                      DIV_ROUND_CLOSEST(regval * data->lsb_resol, 1000));
 }
 
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ads7828_show_in, NULL, 0);
@@ -130,6 +100,16 @@ static struct attribute *ads7828_attrs[] = {
 
 ATTRIBUTE_GROUPS(ads7828);
 
+static const struct regmap_config ads2828_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+};
+
+static const struct regmap_config ads2830_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+};
+
 static int ads7828_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
@@ -137,42 +117,40 @@ static int ads7828_probe(struct i2c_client *client,
        struct ads7828_platform_data *pdata = dev_get_platdata(dev);
        struct ads7828_data *data;
        struct device *hwmon_dev;
+       unsigned int vref_mv = ADS7828_INT_VREF_MV;
+       bool diff_input = false;
+       bool ext_vref = false;
 
        data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
        if (pdata) {
-               data->diff_input = pdata->diff_input;
-               data->ext_vref = pdata->ext_vref;
-               if (data->ext_vref)
-                       data->vref_mv = pdata->vref_mv;
+               diff_input = pdata->diff_input;
+               ext_vref = pdata->ext_vref;
+               if (ext_vref && pdata->vref_mv)
+                       vref_mv = pdata->vref_mv;
        }
 
-       /* Bound Vref with min/max values if it was provided */
-       if (data->vref_mv)
-               data->vref_mv = clamp_val(data->vref_mv,
-                                         ADS7828_EXT_VREF_MV_MIN,
-                                         ADS7828_EXT_VREF_MV_MAX);
-       else
-               data->vref_mv = ADS7828_INT_VREF_MV;
+       /* Bound Vref with min/max values */
+       vref_mv = clamp_val(vref_mv, ADS7828_EXT_VREF_MV_MIN,
+                           ADS7828_EXT_VREF_MV_MAX);
 
        /* ADS7828 uses 12-bit samples, while ADS7830 is 8-bit */
        if (id->driver_data == ads7828) {
-               data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 4096);
-               data->read_channel = i2c_smbus_read_word_swapped;
+               data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 4096);
+               data->regmap = devm_regmap_init_i2c(client,
+                                                   &ads2828_regmap_config);
        } else {
-               data->lsb_resol = DIV_ROUND_CLOSEST(data->vref_mv * 1000, 256);
-               data->read_channel = i2c_smbus_read_byte_data;
+               data->lsb_resol = DIV_ROUND_CLOSEST(vref_mv * 1000, 256);
+               data->regmap = devm_regmap_init_i2c(client,
+                                                   &ads2830_regmap_config);
        }
 
-       data->cmd_byte = data->ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
-       if (!data->diff_input)
+       data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3;
+       if (!diff_input)
                data->cmd_byte |= ADS7828_CMD_SD_SE;
 
-       data->client = client;
-       mutex_init(&data->update_lock);
-
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
                                                           data,
                                                           ads7828_groups);
index e01feba909c3688ea152b75d6c23b645948632ec..d1542b7d4bc3c3840a19d2480aafce78720cc03a 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/hwmon-sysfs.h>
 #include <linux/jiffies.h>
 #include <linux/of.h>
+#include <linux/delay.h>
 
 #include <linux/platform_data/ina2xx.h>
 
@@ -51,7 +52,6 @@
 #define INA226_ALERT_LIMIT             0x07
 #define INA226_DIE_ID                  0xFF
 
-
 /* register count */
 #define INA219_REGISTERS               6
 #define INA226_REGISTERS               8
 
 /* worst case is 68.10 ms (~14.6Hz, ina219) */
 #define INA2XX_CONVERSION_RATE         15
+#define INA2XX_MAX_DELAY               69 /* worst case delay in ms */
+
+#define INA2XX_RSHUNT_DEFAULT          10000
+
+/* bit mask for reading the averaging setting in the configuration register */
+#define INA226_AVG_RD_MASK             0x0E00
+
+#define INA226_READ_AVG(reg)           (((reg) & INA226_AVG_RD_MASK) >> 9)
+#define INA226_SHIFT_AVG(val)          ((val) << 9)
+
+/* common attrs, ina226 attrs and NULL */
+#define INA2XX_MAX_ATTRIBUTE_GROUPS    3
+
+/*
+ * Both bus voltage and shunt voltage conversion times for ina226 are set
+ * to 0b0100 on POR, which translates to 2200 microseconds in total.
+ */
+#define INA226_TOTAL_CONV_TIME_DEFAULT 2200
 
 enum ina2xx_ids { ina219, ina226 };
 
@@ -81,11 +99,16 @@ struct ina2xx_data {
        struct i2c_client *client;
        const struct ina2xx_config *config;
 
+       long rshunt;
+       u16 curr_config;
+
        struct mutex update_lock;
        bool valid;
        unsigned long last_updated;
+       int update_interval; /* in jiffies */
 
        int kind;
+       const struct attribute_group *groups[INA2XX_MAX_ATTRIBUTE_GROUPS];
        u16 regs[INA2XX_MAX_REGISTERS];
 };
 
@@ -110,34 +133,156 @@ static const struct ina2xx_config ina2xx_config[] = {
        },
 };
 
-static struct ina2xx_data *ina2xx_update_device(struct device *dev)
+/*
+ * Available averaging rates for ina226. The indices correspond with
+ * the bit values expected by the chip (according to the ina226 datasheet,
+ * table 3 AVG bit settings, found at
+ * http://www.ti.com/lit/ds/symlink/ina226.pdf.
+ */
+static const int ina226_avg_tab[] = { 1, 4, 16, 64, 128, 256, 512, 1024 };
+
+static int ina226_avg_bits(int avg)
+{
+       int i;
+
+       /* Get the closest average from the tab. */
+       for (i = 0; i < ARRAY_SIZE(ina226_avg_tab) - 1; i++) {
+               if (avg <= (ina226_avg_tab[i] + ina226_avg_tab[i + 1]) / 2)
+                       break;
+       }
+
+       return i; /* Return 0b0111 for values greater than 1024. */
+}
+
+static int ina226_reg_to_interval(u16 config)
+{
+       int avg = ina226_avg_tab[INA226_READ_AVG(config)];
+
+       /*
+        * Multiply the total conversion time by the number of averages.
+        * Return the result in milliseconds.
+        */
+       return DIV_ROUND_CLOSEST(avg * INA226_TOTAL_CONV_TIME_DEFAULT, 1000);
+}
+
+static u16 ina226_interval_to_reg(int interval, u16 config)
+{
+       int avg, avg_bits;
+
+       avg = DIV_ROUND_CLOSEST(interval * 1000,
+                               INA226_TOTAL_CONV_TIME_DEFAULT);
+       avg_bits = ina226_avg_bits(avg);
+
+       return (config & ~INA226_AVG_RD_MASK) | INA226_SHIFT_AVG(avg_bits);
+}
+
+static void ina226_set_update_interval(struct ina2xx_data *data)
+{
+       int ms;
+
+       ms = ina226_reg_to_interval(data->curr_config);
+       data->update_interval = msecs_to_jiffies(ms);
+}
+
+static int ina2xx_calibrate(struct ina2xx_data *data)
+{
+       u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+                                   data->rshunt);
+
+       return i2c_smbus_write_word_swapped(data->client,
+                                           INA2XX_CALIBRATION, val);
+}
+
+/*
+ * Initialize the configuration and calibration registers.
+ */
+static int ina2xx_init(struct ina2xx_data *data)
 {
-       struct ina2xx_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
-       struct ina2xx_data *ret = data;
+       int ret;
 
-       mutex_lock(&data->update_lock);
+       /* device configuration */
+       ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
+                                          data->curr_config);
+       if (ret < 0)
+               return ret;
 
-       if (time_after(jiffies, data->last_updated +
-                      HZ / INA2XX_CONVERSION_RATE) || !data->valid) {
+       /*
+        * Set current LSB to 1mA, shunt is in uOhms
+        * (equation 13 in datasheet).
+        */
+       return ina2xx_calibrate(data);
+}
 
-               int i;
+static int ina2xx_do_update(struct device *dev)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int i, rv, retry;
 
-               dev_dbg(&client->dev, "Starting ina2xx update\n");
+       dev_dbg(&client->dev, "Starting ina2xx update\n");
 
+       for (retry = 5; retry; retry--) {
                /* Read all registers */
                for (i = 0; i < data->config->registers; i++) {
-                       int rv = i2c_smbus_read_word_swapped(client, i);
-                       if (rv < 0) {
-                               ret = ERR_PTR(rv);
-                               goto abort;
-                       }
+                       rv = i2c_smbus_read_word_swapped(client, i);
+                       if (rv < 0)
+                               return rv;
                        data->regs[i] = rv;
                }
+
+               /*
+                * If the current value in the calibration register is 0, the
+                * power and current registers will also remain at 0. In case
+                * the chip has been reset let's check the calibration
+                * register and reinitialize if needed.
+                */
+               if (data->regs[INA2XX_CALIBRATION] == 0) {
+                       dev_warn(dev, "chip not calibrated, reinitializing\n");
+
+                       rv = ina2xx_init(data);
+                       if (rv < 0)
+                               return rv;
+
+                       /*
+                        * Let's make sure the power and current registers
+                        * have been updated before trying again.
+                        */
+                       msleep(INA2XX_MAX_DELAY);
+                       continue;
+               }
+
                data->last_updated = jiffies;
                data->valid = 1;
+
+               return 0;
        }
-abort:
+
+       /*
+        * If we're here then although all write operations succeeded, the
+        * chip still returns 0 in the calibration register. Nothing more we
+        * can do here.
+        */
+       dev_err(dev, "unable to reinitialize the chip\n");
+       return -ENODEV;
+}
+
+static struct ina2xx_data *ina2xx_update_device(struct device *dev)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       struct ina2xx_data *ret = data;
+       unsigned long after;
+       int rv;
+
+       mutex_lock(&data->update_lock);
+
+       after = data->last_updated + data->update_interval;
+       if (time_after(jiffies, after) || !data->valid) {
+               rv = ina2xx_do_update(dev);
+               if (rv < 0)
+                       ret = ERR_PTR(rv);
+       }
+
        mutex_unlock(&data->update_lock);
        return ret;
 }
@@ -164,6 +309,10 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg)
                /* signed register, LSB=1mA (selected), in mA */
                val = (s16)data->regs[reg];
                break;
+       case INA2XX_CALIBRATION:
+               val = DIV_ROUND_CLOSEST(data->config->calibration_factor,
+                                       data->regs[reg]);
+               break;
        default:
                /* programmer goofed */
                WARN_ON_ONCE(1);
@@ -187,6 +336,85 @@ static ssize_t ina2xx_show_value(struct device *dev,
                        ina2xx_get_value(data, attr->index));
 }
 
+static ssize_t ina2xx_set_shunt(struct device *dev,
+                               struct device_attribute *da,
+                               const char *buf, size_t count)
+{
+       struct ina2xx_data *data = ina2xx_update_device(dev);
+       unsigned long val;
+       int status;
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       status = kstrtoul(buf, 10, &val);
+       if (status < 0)
+               return status;
+
+       if (val == 0 ||
+           /* Values greater than the calibration factor make no sense. */
+           val > data->config->calibration_factor)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       data->rshunt = val;
+       status = ina2xx_calibrate(data);
+       mutex_unlock(&data->update_lock);
+       if (status < 0)
+               return status;
+
+       return count;
+}
+
+static ssize_t ina226_set_interval(struct device *dev,
+                                  struct device_attribute *da,
+                                  const char *buf, size_t count)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       unsigned long val;
+       int status;
+
+       status = kstrtoul(buf, 10, &val);
+       if (status < 0)
+               return status;
+
+       if (val > INT_MAX || val == 0)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       data->curr_config = ina226_interval_to_reg(val,
+                                                  data->regs[INA2XX_CONFIG]);
+       status = i2c_smbus_write_word_swapped(data->client,
+                                             INA2XX_CONFIG,
+                                             data->curr_config);
+
+       ina226_set_update_interval(data);
+       /* Make sure the next access re-reads all registers. */
+       data->valid = 0;
+       mutex_unlock(&data->update_lock);
+       if (status < 0)
+               return status;
+
+       return count;
+}
+
+static ssize_t ina226_show_interval(struct device *dev,
+                                   struct device_attribute *da, char *buf)
+{
+       struct ina2xx_data *data = ina2xx_update_device(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       /*
+        * We don't use data->update_interval here as we want to display
+        * the actual interval used by the chip and jiffies_to_msecs()
+        * doesn't seem to be accurate enough.
+        */
+       return snprintf(buf, PAGE_SIZE, "%d\n",
+                       ina226_reg_to_interval(data->regs[INA2XX_CONFIG]));
+}
+
 /* shunt voltage */
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ina2xx_show_value, NULL,
                          INA2XX_SHUNT_VOLTAGE);
@@ -203,15 +431,37 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ina2xx_show_value, NULL,
 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
                          INA2XX_POWER);
 
+/* shunt resistance */
+static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
+                         ina2xx_show_value, ina2xx_set_shunt,
+                         INA2XX_CALIBRATION);
+
+/* update interval (ina226 only) */
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+                         ina226_show_interval, ina226_set_interval, 0);
+
 /* pointers to created device attributes */
 static struct attribute *ina2xx_attrs[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in1_input.dev_attr.attr,
        &sensor_dev_attr_curr1_input.dev_attr.attr,
        &sensor_dev_attr_power1_input.dev_attr.attr,
+       &sensor_dev_attr_shunt_resistor.dev_attr.attr,
        NULL,
 };
-ATTRIBUTE_GROUPS(ina2xx);
+
+static const struct attribute_group ina2xx_group = {
+       .attrs = ina2xx_attrs,
+};
+
+static struct attribute *ina226_attrs[] = {
+       &sensor_dev_attr_update_interval.dev_attr.attr,
+       NULL,
+};
+
+static const struct attribute_group ina226_group = {
+       .attrs = ina226_attrs,
+};
 
 static int ina2xx_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
@@ -221,9 +471,8 @@ static int ina2xx_probe(struct i2c_client *client,
        struct device *dev = &client->dev;
        struct ina2xx_data *data;
        struct device *hwmon_dev;
-       long shunt = 10000; /* default shunt value 10mOhms */
        u32 val;
-       int ret;
+       int ret, group = 0;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
                return -ENODEV;
@@ -234,50 +483,52 @@ static int ina2xx_probe(struct i2c_client *client,
 
        if (dev_get_platdata(dev)) {
                pdata = dev_get_platdata(dev);
-               shunt = pdata->shunt_uohms;
+               data->rshunt = pdata->shunt_uohms;
        } else if (!of_property_read_u32(dev->of_node,
                                         "shunt-resistor", &val)) {
-               shunt = val;
+               data->rshunt = val;
+       } else {
+               data->rshunt = INA2XX_RSHUNT_DEFAULT;
        }
 
-       if (shunt <= 0)
-               return -ENODEV;
-
        /* set the device type */
        data->kind = id->driver_data;
        data->config = &ina2xx_config[data->kind];
-
-       /* device configuration */
-       ret = i2c_smbus_write_word_swapped(client, INA2XX_CONFIG,
-                                          data->config->config_default);
-       if (ret < 0) {
-               dev_err(dev,
-                       "error writing to the config register: %d", ret);
-               return -ENODEV;
-       }
+       data->curr_config = data->config->config_default;
+       data->client = client;
 
        /*
-        * Set current LSB to 1mA, shunt is in uOhms
-        * (equation 13 in datasheet).
+        * Ina226 has a variable update_interval. For ina219 we
+        * use a constant value.
         */
-       ret = i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
-                               data->config->calibration_factor / shunt);
+       if (data->kind == ina226)
+               ina226_set_update_interval(data);
+       else
+               data->update_interval = HZ / INA2XX_CONVERSION_RATE;
+
+       if (data->rshunt <= 0 ||
+           data->rshunt > data->config->calibration_factor)
+               return -ENODEV;
+
+       ret = ina2xx_init(data);
        if (ret < 0) {
-               dev_err(dev,
-                       "error writing to the calibration register: %d", ret);
+               dev_err(dev, "error configuring the device: %d\n", ret);
                return -ENODEV;
        }
 
-       data->client = client;
        mutex_init(&data->update_lock);
 
+       data->groups[group++] = &ina2xx_group;
+       if (data->kind == ina226)
+               data->groups[group++] = &ina226_group;
+
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
-                                                          data, ina2xx_groups);
+                                                          data, data->groups);
        if (IS_ERR(hwmon_dev))
                return PTR_ERR(hwmon_dev);
 
        dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
-                id->name, shunt);
+                id->name, data->rshunt);
 
        return 0;
 }
@@ -287,6 +538,7 @@ static const struct i2c_device_id ina2xx_id[] = {
        { "ina220", ina219 },
        { "ina226", ina226 },
        { "ina230", ina226 },
+       { "ina231", ina226 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, ina2xx_id);
index 388f8bcd898e879c84dd2497f4efee307456bfb0..996bdfd5cf25f93679857407951253d1f1e5b5ca 100644 (file)
@@ -201,7 +201,7 @@ struct jc42_data {
 #define JC42_TEMP_MIN          0
 #define JC42_TEMP_MAX          125000
 
-static u16 jc42_temp_to_reg(int temp, bool extended)
+static u16 jc42_temp_to_reg(long temp, bool extended)
 {
        int ntemp = clamp_val(temp,
                              extended ? JC42_TEMP_MIN_EXTENDED :
@@ -213,11 +213,7 @@ static u16 jc42_temp_to_reg(int temp, bool extended)
 
 static int jc42_temp_from_reg(s16 reg)
 {
-       reg &= 0x1fff;
-
-       /* sign extend register */
-       if (reg & 0x1000)
-               reg |= 0xf000;
+       reg = sign_extend32(reg, 12);
 
        /* convert from 0.0625 to 0.001 resolution */
        return reg * 125 / 2;
@@ -308,15 +304,18 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
                                  const char *buf, size_t count)
 {
        struct jc42_data *data = dev_get_drvdata(dev);
-       unsigned long val;
+       long val;
        int diff, hyst;
        int err;
        int ret = count;
 
-       if (kstrtoul(buf, 10, &val) < 0)
+       if (kstrtol(buf, 10, &val) < 0)
                return -EINVAL;
 
+       val = clamp_val(val, (data->extended ? JC42_TEMP_MIN_EXTENDED :
+                             JC42_TEMP_MIN) - 6000, JC42_TEMP_MAX);
        diff = jc42_temp_from_reg(data->temp[t_crit]) - val;
+
        hyst = 0;
        if (diff > 0) {
                if (diff < 2250)
index ec5678289e4a5828e6d2248ac29e092240b3fc6f..55765790907b3768eb1c4b23e2e3bd77d4eaf294 100644 (file)
@@ -779,7 +779,7 @@ static bool nct7802_regmap_is_volatile(struct device *dev, unsigned int reg)
        return reg != REG_BANK && reg <= 0x20;
 }
 
-static struct regmap_config nct7802_regmap_config = {
+static const struct regmap_config nct7802_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
        .cache_type = REGCACHE_RBTREE,
index ba9f478f64ee68e7092d9fa546663cd02a7cf2cb..9da2735f14243ed30c1365124195d6dcc9e0baf1 100644 (file)
@@ -253,7 +253,7 @@ static int tmp102_remove(struct i2c_client *client)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int tmp102_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
@@ -279,17 +279,10 @@ static int tmp102_resume(struct device *dev)
        config &= ~TMP102_CONF_SD;
        return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
 }
-
-static const struct dev_pm_ops tmp102_dev_pm_ops = {
-       .suspend        = tmp102_suspend,
-       .resume         = tmp102_resume,
-};
-
-#define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops)
-#else
-#define        TMP102_DEV_PM_OPS NULL
 #endif /* CONFIG_PM */
 
+static SIMPLE_DEV_PM_OPS(tmp102_dev_pm_ops, tmp102_suspend, tmp102_resume);
+
 static const struct i2c_device_id tmp102_id[] = {
        { "tmp102", 0 },
        { }
@@ -298,7 +291,7 @@ MODULE_DEVICE_TABLE(i2c, tmp102_id);
 
 static struct i2c_driver tmp102_driver = {
        .driver.name    = DRIVER_NAME,
-       .driver.pm      = TMP102_DEV_PM_OPS,
+       .driver.pm      = &tmp102_dev_pm_ops,
        .probe          = tmp102_probe,
        .remove         = tmp102_remove,
        .id_table       = tmp102_id,
index 31e8308ba8990bffbaa6217aafa2bc0e72b62939..ab838d9e28b6389dc6d97dc633ea6259d2126ca3 100644 (file)
@@ -881,6 +881,7 @@ config I2C_XLR
 config I2C_RCAR
        tristate "Renesas R-Car I2C Controller"
        depends on ARCH_SHMOBILE || COMPILE_TEST
+       select I2C_SLAVE
        help
          If you say yes to this option, support will be included for the
          R-Car I2C controller.
index bff20a589621a031b37d1d1a661e4d5ba8e291b7..958c8db4ec30740e2d9aae00a7835256700d3424 100644 (file)
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
        int ret;
 
        pm_runtime_get_sync(&adap->dev);
-       clk_prepare_enable(i2c->clk);
+       ret = clk_enable(i2c->clk);
+       if (ret)
+               return ret;
 
        for (retry = 0; retry < adap->retries; retry++) {
 
                ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
 
                if (ret != -EAGAIN) {
-                       clk_disable_unprepare(i2c->clk);
+                       clk_disable(i2c->clk);
                        pm_runtime_put(&adap->dev);
                        return ret;
                }
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
                udelay(100);
        }
 
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        pm_runtime_put(&adap->dev);
        return -EREMOTEIO;
 }
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
 
        clk_prepare_enable(i2c->clk);
        ret = s3c24xx_i2c_init(i2c);
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        if (ret != 0) {
                dev_err(&pdev->dev, "I2C controller init failed\n");
                return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
                i2c->irq = ret = platform_get_irq(pdev, 0);
                if (ret <= 0) {
                        dev_err(&pdev->dev, "cannot find IRQ\n");
+                       clk_unprepare(i2c->clk);
                        return ret;
                }
 
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
 
                if (ret != 0) {
                        dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+                       clk_unprepare(i2c->clk);
                        return ret;
                }
        }
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        ret = s3c24xx_i2c_register_cpufreq(i2c);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
+               clk_unprepare(i2c->clk);
                return ret;
        }
 
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to add bus to i2c core\n");
                s3c24xx_i2c_deregister_cpufreq(i2c);
+               clk_unprepare(i2c->clk);
                return ret;
        }
 
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
 {
        struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
 
+       clk_unprepare(i2c->clk);
+
        pm_runtime_disable(&i2c->adap.dev);
        pm_runtime_disable(&pdev->dev);
 
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+       int ret;
 
        if (!IS_ERR(i2c->sysreg))
                regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
 
-       clk_prepare_enable(i2c->clk);
+       ret = clk_enable(i2c->clk);
+       if (ret)
+               return ret;
        s3c24xx_i2c_init(i2c);
-       clk_disable_unprepare(i2c->clk);
+       clk_disable(i2c->clk);
        i2c->suspended = 0;
 
        return 0;
index 440d5dbc8b5f0c90ca3dd4341cb4a4e4858f791e..007818b3e1745bd1cb0e41a7581572aa70b2f4ea 100644 (file)
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
        int pos;
        int sr;
        bool send_stop;
+       bool stop_after_dma;
 
        struct resource *res;
        struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
 
        if (pd->pos == pd->msg->len) {
                /* Send stop if we haven't yet (DMA case) */
-               if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+               if (pd->send_stop && pd->stop_after_dma)
                        i2c_op(pd, OP_TX_STOP, 0);
                return 1;
        }
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
                real_pos = pd->pos - 2;
 
                if (pd->pos == pd->msg->len) {
+                       if (pd->stop_after_dma) {
+                               /* Simulate PIO end condition after DMA transfer */
+                               i2c_op(pd, OP_RX_STOP, 0);
+                               pd->pos++;
+                               break;
+                       }
+
                        if (real_pos < 0) {
                                i2c_op(pd, OP_RX_STOP, 0);
                                break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
 
        sh_mobile_i2c_dma_unmap(pd);
        pd->pos = pd->msg->len;
+       pd->stop_after_dma = true;
 
        iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
 }
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
                bool do_start = pd->send_stop || !i;
                msg = &msgs[i];
                pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
+               pd->stop_after_dma = false;
 
                err = start_ch(pd, msg, do_start);
                if (err)
index 39d25a8cb1ad355e099b8958afa2b5e8c2c16f75..e9eae57a2b50f77e3d25c4d9fcfa003728464740 100644 (file)
@@ -2972,6 +2972,7 @@ trace:
 }
 EXPORT_SYMBOL(i2c_smbus_xfer);
 
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
 int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
 {
        int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
        return ret;
 }
 EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+#endif
 
 MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
 MODULE_DESCRIPTION("I2C-Bus main module");
index 6631400b5f02028f804343099793c780ae4c0ee5..cf9b09db092f4e9969666565ba1562220d6b2a46 100644 (file)
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count >= attr->size)
+       if (off + count > attr->size)
                return -EFBIG;
 
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
        struct eeprom_data *eeprom;
        unsigned long flags;
 
-       if (off + count >= attr->size)
+       if (off + count > attr->size)
                return -EFBIG;
 
        eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
index b716b08156446e186c9ae608f3f4e6343c6f200f..643c08a025a52d015431b8a27be1ddcacbd36845 100644 (file)
@@ -258,6 +258,5 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
 
 IB_UVERBS_DECLARE_EX_CMD(create_flow);
 IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
-IB_UVERBS_DECLARE_EX_CMD(query_device);
 
 #endif /* UVERBS_H */
index 532d8eba8b0203ab65a2a8ed1f096389253b1dca..b7943ff16ed3f2edece8ec4cc3c7931594bd943a 100644 (file)
@@ -400,52 +400,6 @@ err:
        return ret;
 }
 
-static void copy_query_dev_fields(struct ib_uverbs_file *file,
-                                 struct ib_uverbs_query_device_resp *resp,
-                                 struct ib_device_attr *attr)
-{
-       resp->fw_ver            = attr->fw_ver;
-       resp->node_guid         = file->device->ib_dev->node_guid;
-       resp->sys_image_guid    = attr->sys_image_guid;
-       resp->max_mr_size       = attr->max_mr_size;
-       resp->page_size_cap     = attr->page_size_cap;
-       resp->vendor_id         = attr->vendor_id;
-       resp->vendor_part_id    = attr->vendor_part_id;
-       resp->hw_ver            = attr->hw_ver;
-       resp->max_qp            = attr->max_qp;
-       resp->max_qp_wr         = attr->max_qp_wr;
-       resp->device_cap_flags  = attr->device_cap_flags;
-       resp->max_sge           = attr->max_sge;
-       resp->max_sge_rd        = attr->max_sge_rd;
-       resp->max_cq            = attr->max_cq;
-       resp->max_cqe           = attr->max_cqe;
-       resp->max_mr            = attr->max_mr;
-       resp->max_pd            = attr->max_pd;
-       resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
-       resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
-       resp->max_res_rd_atom   = attr->max_res_rd_atom;
-       resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
-       resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
-       resp->atomic_cap                = attr->atomic_cap;
-       resp->max_ee                    = attr->max_ee;
-       resp->max_rdd                   = attr->max_rdd;
-       resp->max_mw                    = attr->max_mw;
-       resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
-       resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
-       resp->max_mcast_grp             = attr->max_mcast_grp;
-       resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
-       resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
-       resp->max_ah                    = attr->max_ah;
-       resp->max_fmr                   = attr->max_fmr;
-       resp->max_map_per_fmr           = attr->max_map_per_fmr;
-       resp->max_srq                   = attr->max_srq;
-       resp->max_srq_wr                = attr->max_srq_wr;
-       resp->max_srq_sge               = attr->max_srq_sge;
-       resp->max_pkeys                 = attr->max_pkeys;
-       resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
-       resp->phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
-}
-
 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                               const char __user *buf,
                               int in_len, int out_len)
@@ -466,7 +420,47 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                return ret;
 
        memset(&resp, 0, sizeof resp);
-       copy_query_dev_fields(file, &resp, &attr);
+
+       resp.fw_ver                    = attr.fw_ver;
+       resp.node_guid                 = file->device->ib_dev->node_guid;
+       resp.sys_image_guid            = attr.sys_image_guid;
+       resp.max_mr_size               = attr.max_mr_size;
+       resp.page_size_cap             = attr.page_size_cap;
+       resp.vendor_id                 = attr.vendor_id;
+       resp.vendor_part_id            = attr.vendor_part_id;
+       resp.hw_ver                    = attr.hw_ver;
+       resp.max_qp                    = attr.max_qp;
+       resp.max_qp_wr                 = attr.max_qp_wr;
+       resp.device_cap_flags          = attr.device_cap_flags;
+       resp.max_sge                   = attr.max_sge;
+       resp.max_sge_rd                = attr.max_sge_rd;
+       resp.max_cq                    = attr.max_cq;
+       resp.max_cqe                   = attr.max_cqe;
+       resp.max_mr                    = attr.max_mr;
+       resp.max_pd                    = attr.max_pd;
+       resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
+       resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
+       resp.max_res_rd_atom           = attr.max_res_rd_atom;
+       resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
+       resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
+       resp.atomic_cap                = attr.atomic_cap;
+       resp.max_ee                    = attr.max_ee;
+       resp.max_rdd                   = attr.max_rdd;
+       resp.max_mw                    = attr.max_mw;
+       resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
+       resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
+       resp.max_mcast_grp             = attr.max_mcast_grp;
+       resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
+       resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
+       resp.max_ah                    = attr.max_ah;
+       resp.max_fmr                   = attr.max_fmr;
+       resp.max_map_per_fmr           = attr.max_map_per_fmr;
+       resp.max_srq                   = attr.max_srq;
+       resp.max_srq_wr                = attr.max_srq_wr;
+       resp.max_srq_sge               = attr.max_srq_sge;
+       resp.max_pkeys                 = attr.max_pkeys;
+       resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
+       resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
@@ -3293,52 +3287,3 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
-
-int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
-                             struct ib_udata *ucore,
-                             struct ib_udata *uhw)
-{
-       struct ib_uverbs_ex_query_device_resp resp;
-       struct ib_uverbs_ex_query_device  cmd;
-       struct ib_device_attr attr;
-       struct ib_device *device;
-       int err;
-
-       device = file->device->ib_dev;
-       if (ucore->inlen < sizeof(cmd))
-               return -EINVAL;
-
-       err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
-       if (err)
-               return err;
-
-       if (cmd.reserved)
-               return -EINVAL;
-
-       err = device->query_device(device, &attr);
-       if (err)
-               return err;
-
-       memset(&resp, 0, sizeof(resp));
-       copy_query_dev_fields(file, &resp.base, &attr);
-       resp.comp_mask = 0;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       if (cmd.comp_mask & IB_USER_VERBS_EX_QUERY_DEVICE_ODP) {
-               resp.odp_caps.general_caps = attr.odp_caps.general_caps;
-               resp.odp_caps.per_transport_caps.rc_odp_caps =
-                       attr.odp_caps.per_transport_caps.rc_odp_caps;
-               resp.odp_caps.per_transport_caps.uc_odp_caps =
-                       attr.odp_caps.per_transport_caps.uc_odp_caps;
-               resp.odp_caps.per_transport_caps.ud_odp_caps =
-                       attr.odp_caps.per_transport_caps.ud_odp_caps;
-               resp.comp_mask |= IB_USER_VERBS_EX_QUERY_DEVICE_ODP;
-       }
-#endif
-
-       err = ib_copy_to_udata(ucore, &resp, sizeof(resp));
-       if (err)
-               return err;
-
-       return 0;
-}
index e6c23b9eab336818fa785bae49f5c78c47221fbb..5db1a8cc388da0c5de517bf69b3d8136b94a1bbf 100644 (file)
@@ -123,7 +123,6 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
                                    struct ib_udata *uhw) = {
        [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
        [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
-       [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
index 8a87404e9c76e763709e478b17b00485f38e678f..03bf81211a5401c366522c68213ecf27cdf4b326 100644 (file)
@@ -1331,8 +1331,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
-       dev->ib_dev.uverbs_ex_cmd_mask =
-               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 8ba80a6d3a46d17daf233945e76d8760c107a752..d7562beb542367faf1b93d7ba66e8ef879c73bf4 100644 (file)
@@ -98,15 +98,9 @@ enum {
 
        IPOIB_MCAST_FLAG_FOUND    = 0,  /* used in set_multicast_list */
        IPOIB_MCAST_FLAG_SENDONLY = 1,
-       /*
-        * For IPOIB_MCAST_FLAG_BUSY
-        * When set, in flight join and mcast->mc is unreliable
-        * When clear and mcast->mc IS_ERR_OR_NULL, need to restart or
-        *   haven't started yet
-        * When clear and mcast->mc is valid pointer, join was successful
-        */
-       IPOIB_MCAST_FLAG_BUSY     = 2,
+       IPOIB_MCAST_FLAG_BUSY     = 2,  /* joining or already joined */
        IPOIB_MCAST_FLAG_ATTACHED = 3,
+       IPOIB_MCAST_JOIN_STARTED  = 4,
 
        MAX_SEND_CQE              = 16,
        IPOIB_CM_COPYBREAK        = 256,
@@ -323,7 +317,6 @@ struct ipoib_dev_priv {
        struct list_head multicast_list;
        struct rb_root multicast_tree;
 
-       struct workqueue_struct *wq;
        struct delayed_work mcast_task;
        struct work_struct carrier_on_task;
        struct work_struct flush_light;
@@ -484,10 +477,10 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work);
 void ipoib_pkey_event(struct work_struct *work);
 void ipoib_ib_dev_cleanup(struct net_device *dev);
 
-int ipoib_ib_dev_open(struct net_device *dev);
+int ipoib_ib_dev_open(struct net_device *dev, int flush);
 int ipoib_ib_dev_up(struct net_device *dev);
-int ipoib_ib_dev_down(struct net_device *dev);
-int ipoib_ib_dev_stop(struct net_device *dev);
+int ipoib_ib_dev_down(struct net_device *dev, int flush);
+int ipoib_ib_dev_stop(struct net_device *dev, int flush);
 void ipoib_pkey_dev_check_presence(struct net_device *dev);
 
 int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
@@ -499,7 +492,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb);
 
 void ipoib_mcast_restart_task(struct work_struct *work);
 int ipoib_mcast_start_thread(struct net_device *dev);
-int ipoib_mcast_stop_thread(struct net_device *dev);
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
 
 void ipoib_mcast_dev_down(struct net_device *dev);
 void ipoib_mcast_dev_flush(struct net_device *dev);
index 56959adb6c7da51ccbb6d20307247b7cb69ad55a..933efcea0d03f11b4da3967b8eedc137da21e08a 100644 (file)
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
        }
 
        spin_lock_irq(&priv->lock);
-       queue_delayed_work(priv->wq,
+       queue_delayed_work(ipoib_workqueue,
                           &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        /* Add this entry to passive ids list head, but do not re-add it
         * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                        spin_lock_irqsave(&priv->lock, flags);
                        list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
                        ipoib_cm_start_rx_drain(priv);
-                       queue_work(priv->wq, &priv->cm.rx_reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
                        spin_unlock_irqrestore(&priv->lock, flags);
                } else
                        ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                                spin_lock_irqsave(&priv->lock, flags);
                                list_move(&p->list, &priv->cm.rx_reap_list);
                                spin_unlock_irqrestore(&priv->lock, flags);
-                               queue_work(priv->wq, &priv->cm.rx_reap_task);
+                               queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
                        }
                        return;
                }
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
                if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                        list_move(&tx->list, &priv->cm.reap_list);
-                       queue_work(priv->wq, &priv->cm.reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.reap_task);
                }
 
                clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
 
                if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                        list_move(&tx->list, &priv->cm.reap_list);
-                       queue_work(priv->wq, &priv->cm.reap_task);
+                       queue_work(ipoib_workqueue, &priv->cm.reap_task);
                }
 
                spin_unlock_irqrestore(&priv->lock, flags);
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
        tx->dev = dev;
        list_add(&tx->list, &priv->cm.start_list);
        set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
-       queue_work(priv->wq, &priv->cm.start_task);
+       queue_work(ipoib_workqueue, &priv->cm.start_task);
        return tx;
 }
 
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
        if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
                spin_lock_irqsave(&priv->lock, flags);
                list_move(&tx->list, &priv->cm.reap_list);
-               queue_work(priv->wq, &priv->cm.reap_task);
+               queue_work(ipoib_workqueue, &priv->cm.reap_task);
                ipoib_dbg(priv, "Reap connection for gid %pI6\n",
                          tx->neigh->daddr + 4);
                tx->neigh = NULL;
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
 
        skb_queue_tail(&priv->cm.skb_queue, skb);
        if (e)
-               queue_work(priv->wq, &priv->cm.skb_task);
+               queue_work(ipoib_workqueue, &priv->cm.skb_task);
 }
 
 static void ipoib_cm_rx_reap(struct work_struct *work)
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
        }
 
        if (!list_empty(&priv->cm.passive_ids))
-               queue_delayed_work(priv->wq,
+               queue_delayed_work(ipoib_workqueue,
                                   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
        spin_unlock_irq(&priv->lock);
 }
index fe65abb5150c76b2eb941b3b2331930bc5b2b81e..72626c3481749b962fe96b79722d7c8e9c99c585 100644 (file)
@@ -655,7 +655,7 @@ void ipoib_reap_ah(struct work_struct *work)
        __ipoib_reap_ah(dev);
 
        if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->ah_reap_task,
+               queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
                                   round_jiffies_relative(HZ));
 }
 
@@ -664,7 +664,7 @@ static void ipoib_ib_tx_timer_func(unsigned long ctx)
        drain_tx_cq((struct net_device *)ctx);
 }
 
-int ipoib_ib_dev_open(struct net_device *dev)
+int ipoib_ib_dev_open(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int ret;
@@ -696,7 +696,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
        }
 
        clear_bit(IPOIB_STOP_REAPER, &priv->flags);
-       queue_delayed_work(priv->wq, &priv->ah_reap_task,
+       queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task,
                           round_jiffies_relative(HZ));
 
        if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
@@ -706,7 +706,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
 dev_stop:
        if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
                napi_enable(&priv->napi);
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_stop(dev, flush);
        return -1;
 }
 
@@ -738,7 +738,7 @@ int ipoib_ib_dev_up(struct net_device *dev)
        return ipoib_mcast_start_thread(dev);
 }
 
-int ipoib_ib_dev_down(struct net_device *dev)
+int ipoib_ib_dev_down(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -747,7 +747,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
        clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
        netif_carrier_off(dev);
 
-       ipoib_mcast_stop_thread(dev);
+       ipoib_mcast_stop_thread(dev, flush);
        ipoib_mcast_dev_flush(dev);
 
        ipoib_flush_paths(dev);
@@ -807,7 +807,7 @@ void ipoib_drain_cq(struct net_device *dev)
        local_bh_enable();
 }
 
-int ipoib_ib_dev_stop(struct net_device *dev)
+int ipoib_ib_dev_stop(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_qp_attr qp_attr;
@@ -880,7 +880,8 @@ timeout:
        /* Wait for all AHs to be reaped */
        set_bit(IPOIB_STOP_REAPER, &priv->flags);
        cancel_delayed_work(&priv->ah_reap_task);
-       flush_workqueue(priv->wq);
+       if (flush)
+               flush_workqueue(ipoib_workqueue);
 
        begin = jiffies;
 
@@ -917,7 +918,7 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
                    (unsigned long) dev);
 
        if (dev->flags & IFF_UP) {
-               if (ipoib_ib_dev_open(dev)) {
+               if (ipoib_ib_dev_open(dev, 1)) {
                        ipoib_transport_dev_cleanup(dev);
                        return -ENODEV;
                }
@@ -1039,12 +1040,12 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
        }
 
        if (level >= IPOIB_FLUSH_NORMAL)
-               ipoib_ib_dev_down(dev);
+               ipoib_ib_dev_down(dev, 0);
 
        if (level == IPOIB_FLUSH_HEAVY) {
                if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
-                       ipoib_ib_dev_stop(dev);
-               if (ipoib_ib_dev_open(dev) != 0)
+                       ipoib_ib_dev_stop(dev, 0);
+               if (ipoib_ib_dev_open(dev, 0) != 0)
                        return;
                if (netif_queue_stopped(dev))
                        netif_start_queue(dev);
@@ -1096,7 +1097,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
         */
        ipoib_flush_paths(dev);
 
-       ipoib_mcast_stop_thread(dev);
+       ipoib_mcast_stop_thread(dev, 1);
        ipoib_mcast_dev_flush(dev);
 
        ipoib_transport_dev_cleanup(dev);
index 6bad17d4d5880886f88ef48d8424abe4347cdc50..58b5aa3b6f2dded5d2e6d15aff080551aa9eddd9 100644 (file)
@@ -108,7 +108,7 @@ int ipoib_open(struct net_device *dev)
 
        set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
 
-       if (ipoib_ib_dev_open(dev)) {
+       if (ipoib_ib_dev_open(dev, 1)) {
                if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
                        return 0;
                goto err_disable;
@@ -139,7 +139,7 @@ int ipoib_open(struct net_device *dev)
        return 0;
 
 err_stop:
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_stop(dev, 1);
 
 err_disable:
        clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
@@ -157,8 +157,8 @@ static int ipoib_stop(struct net_device *dev)
 
        netif_stop_queue(dev);
 
-       ipoib_ib_dev_down(dev);
-       ipoib_ib_dev_stop(dev);
+       ipoib_ib_dev_down(dev, 1);
+       ipoib_ib_dev_stop(dev, 0);
 
        if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
                struct ipoib_dev_priv *cpriv;
@@ -839,7 +839,7 @@ static void ipoib_set_mcast_list(struct net_device *dev)
                return;
        }
 
-       queue_work(priv->wq, &priv->restart_task);
+       queue_work(ipoib_workqueue, &priv->restart_task);
 }
 
 static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
@@ -954,7 +954,7 @@ static void ipoib_reap_neigh(struct work_struct *work)
        __ipoib_reap_neigh(priv);
 
        if (!test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+               queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
                                   arp_tbl.gc_interval);
 }
 
@@ -1133,7 +1133,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
 
        /* start garbage collection */
        clear_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
-       queue_delayed_work(priv->wq, &priv->neigh_reap_task,
+       queue_delayed_work(ipoib_workqueue, &priv->neigh_reap_task,
                           arp_tbl.gc_interval);
 
        return 0;
@@ -1262,13 +1262,15 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
+       if (ipoib_neigh_hash_init(priv) < 0)
+               goto out;
        /* Allocate RX/TX "rings" to hold queued skbs */
        priv->rx_ring = kzalloc(ipoib_recvq_size * sizeof *priv->rx_ring,
                                GFP_KERNEL);
        if (!priv->rx_ring) {
                printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
                       ca->name, ipoib_recvq_size);
-               goto out;
+               goto out_neigh_hash_cleanup;
        }
 
        priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring);
@@ -1283,24 +1285,16 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
        if (ipoib_ib_dev_init(dev, ca, port))
                goto out_tx_ring_cleanup;
 
-       /*
-        * Must be after ipoib_ib_dev_init so we can allocate a per
-        * device wq there and use it here
-        */
-       if (ipoib_neigh_hash_init(priv) < 0)
-               goto out_dev_uninit;
-
        return 0;
 
-out_dev_uninit:
-       ipoib_ib_dev_cleanup(dev);
-
 out_tx_ring_cleanup:
        vfree(priv->tx_ring);
 
 out_rx_ring_cleanup:
        kfree(priv->rx_ring);
 
+out_neigh_hash_cleanup:
+       ipoib_neigh_hash_uninit(dev);
 out:
        return -ENOMEM;
 }
@@ -1323,12 +1317,6 @@ void ipoib_dev_cleanup(struct net_device *dev)
        }
        unregister_netdevice_many(&head);
 
-       /*
-        * Must be before ipoib_ib_dev_cleanup or we delete an in use
-        * work queue
-        */
-       ipoib_neigh_hash_uninit(dev);
-
        ipoib_ib_dev_cleanup(dev);
 
        kfree(priv->rx_ring);
@@ -1336,6 +1324,8 @@ void ipoib_dev_cleanup(struct net_device *dev)
 
        priv->rx_ring = NULL;
        priv->tx_ring = NULL;
+
+       ipoib_neigh_hash_uninit(dev);
 }
 
 static const struct header_ops ipoib_header_ops = {
@@ -1646,7 +1636,7 @@ register_failed:
        /* Stop GC if started before flush */
        set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
        cancel_delayed_work(&priv->neigh_reap_task);
-       flush_workqueue(priv->wq);
+       flush_workqueue(ipoib_workqueue);
 
 event_failed:
        ipoib_dev_cleanup(priv->dev);
@@ -1717,7 +1707,7 @@ static void ipoib_remove_one(struct ib_device *device)
                /* Stop GC */
                set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags);
                cancel_delayed_work(&priv->neigh_reap_task);
-               flush_workqueue(priv->wq);
+               flush_workqueue(ipoib_workqueue);
 
                unregister_netdev(priv->dev);
                free_netdev(priv->dev);
@@ -1758,13 +1748,8 @@ static int __init ipoib_init_module(void)
         * unregister_netdev() and linkwatch_event take the rtnl lock,
         * so flush_scheduled_work() can deadlock during device
         * removal.
-        *
-        * In addition, bringing one device up and another down at the
-        * same time can deadlock a single workqueue, so we have this
-        * global fallback workqueue, but we also attempt to open a
-        * per device workqueue each time we bring an interface up
         */
-       ipoib_workqueue = create_singlethread_workqueue("ipoib_flush");
+       ipoib_workqueue = create_singlethread_workqueue("ipoib");
        if (!ipoib_workqueue) {
                ret = -ENOMEM;
                goto err_fs;
index bc50dd0d0e4dad7790725b0414d807d42fe82493..ffb83b5f7e805e411f1506d66a53f8465b90c439 100644 (file)
@@ -190,6 +190,12 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
                spin_unlock_irq(&priv->lock);
                priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
                set_qkey = 1;
+
+               if (!ipoib_cm_admin_enabled(dev)) {
+                       rtnl_lock();
+                       dev_set_mtu(dev, min(priv->mcast_mtu, priv->admin_mtu));
+                       rtnl_unlock();
+               }
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
@@ -271,27 +277,16 @@ ipoib_mcast_sendonly_join_complete(int status,
        struct ipoib_mcast *mcast = multicast->context;
        struct net_device *dev = mcast->dev;
 
-       /*
-        * We have to take the mutex to force mcast_sendonly_join to
-        * return from ib_sa_multicast_join and set mcast->mc to a
-        * valid value.  Otherwise we were racing with ourselves in
-        * that we might fail here, but get a valid return from
-        * ib_sa_multicast_join after we had cleared mcast->mc here,
-        * resulting in mis-matched joins and leaves and a deadlock
-        */
-       mutex_lock(&mcast_mutex);
-
        /* We trap for port events ourselves. */
        if (status == -ENETRESET)
-               goto out;
+               return 0;
 
        if (!status)
                status = ipoib_mcast_join_finish(mcast, &multicast->rec);
 
        if (status) {
                if (mcast->logcount++ < 20)
-                       ipoib_dbg_mcast(netdev_priv(dev), "sendonly multicast "
-                                       "join failed for %pI6, status %d\n",
+                       ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for %pI6, status %d\n",
                                        mcast->mcmember.mgid.raw, status);
 
                /* Flush out any queued packets */
@@ -301,15 +296,11 @@ ipoib_mcast_sendonly_join_complete(int status,
                        dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
                }
                netif_tx_unlock_bh(dev);
+
+               /* Clear the busy flag so we try again */
+               status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
+                                           &mcast->flags);
        }
-out:
-       clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-       if (status)
-               mcast->mc = NULL;
-       complete(&mcast->done);
-       if (status == -ENETRESET)
-               status = 0;
-       mutex_unlock(&mcast_mutex);
        return status;
 }
 
@@ -327,14 +318,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        int ret = 0;
 
        if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
-               ipoib_dbg_mcast(priv, "device shutting down, no sendonly "
-                               "multicast joins\n");
+               ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
                return -ENODEV;
        }
 
-       if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
-               ipoib_dbg_mcast(priv, "multicast entry busy, skipping "
-                               "sendonly join\n");
+       if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
+               ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
                return -EBUSY;
        }
 
@@ -342,9 +331,6 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        rec.port_gid = priv->local_gid;
        rec.pkey     = cpu_to_be16(priv->pkey);
 
-       mutex_lock(&mcast_mutex);
-       init_completion(&mcast->done);
-       set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
        mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
                                         priv->port, &rec,
                                         IB_SA_MCMEMBER_REC_MGID        |
@@ -357,14 +343,12 @@ static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
        if (IS_ERR(mcast->mc)) {
                ret = PTR_ERR(mcast->mc);
                clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-               complete(&mcast->done);
-               ipoib_warn(priv, "ib_sa_join_multicast for sendonly join "
-                          "failed (ret = %d)\n", ret);
+               ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
+                          ret);
        } else {
-               ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting "
-                               "sendonly join\n", mcast->mcmember.mgid.raw);
+               ipoib_dbg_mcast(priv, "no multicast record for %pI6, starting join\n",
+                               mcast->mcmember.mgid.raw);
        }
-       mutex_unlock(&mcast_mutex);
 
        return ret;
 }
@@ -375,29 +359,18 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
                                                   carrier_on_task);
        struct ib_port_attr attr;
 
+       /*
+        * Take rtnl_lock to avoid racing with ipoib_stop() and
+        * turning the carrier back on while a device is being
+        * removed.
+        */
        if (ib_query_port(priv->ca, priv->port, &attr) ||
            attr.state != IB_PORT_ACTIVE) {
                ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
                return;
        }
 
-       /*
-        * Take rtnl_lock to avoid racing with ipoib_stop() and
-        * turning the carrier back on while a device is being
-        * removed.  However, ipoib_stop() will attempt to flush
-        * the workqueue while holding the rtnl lock, so loop
-        * on trylock until either we get the lock or we see
-        * FLAG_ADMIN_UP go away as that signals that we are bailing
-        * and can safely ignore the carrier on work.
-        */
-       while (!rtnl_trylock()) {
-               if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
-                       return;
-               else
-                       msleep(20);
-       }
-       if (!ipoib_cm_admin_enabled(priv->dev))
-               dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
+       rtnl_lock();
        netif_carrier_on(priv->dev);
        rtnl_unlock();
 }
@@ -412,63 +385,60 @@ static int ipoib_mcast_join_complete(int status,
        ipoib_dbg_mcast(priv, "join completion for %pI6 (status %d)\n",
                        mcast->mcmember.mgid.raw, status);
 
-       /*
-        * We have to take the mutex to force mcast_join to
-        * return from ib_sa_multicast_join and set mcast->mc to a
-        * valid value.  Otherwise we were racing with ourselves in
-        * that we might fail here, but get a valid return from
-        * ib_sa_multicast_join after we had cleared mcast->mc here,
-        * resulting in mis-matched joins and leaves and a deadlock
-        */
-       mutex_lock(&mcast_mutex);
-
        /* We trap for port events ourselves. */
-       if (status == -ENETRESET)
+       if (status == -ENETRESET) {
+               status = 0;
                goto out;
+       }
 
        if (!status)
                status = ipoib_mcast_join_finish(mcast, &multicast->rec);
 
        if (!status) {
                mcast->backoff = 1;
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task, 0);
+               mutex_unlock(&mcast_mutex);
 
                /*
-                * Defer carrier on work to priv->wq to avoid a
+                * Defer carrier on work to ipoib_workqueue to avoid a
                 * deadlock on rtnl_lock here.
                 */
                if (mcast == priv->broadcast)
-                       queue_work(priv->wq, &priv->carrier_on_task);
-       } else {
-               if (mcast->logcount++ < 20) {
-                       if (status == -ETIMEDOUT || status == -EAGAIN) {
-                               ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
-                                               mcast->mcmember.mgid.raw, status);
-                       } else {
-                               ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
-                                          mcast->mcmember.mgid.raw, status);
-                       }
-               }
+                       queue_work(ipoib_workqueue, &priv->carrier_on_task);
 
-               mcast->backoff *= 2;
-               if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
-                       mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+               status = 0;
+               goto out;
        }
-out:
+
+       if (mcast->logcount++ < 20) {
+               if (status == -ETIMEDOUT || status == -EAGAIN) {
+                       ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
+                                       mcast->mcmember.mgid.raw, status);
+               } else {
+                       ipoib_warn(priv, "multicast join failed for %pI6, status %d\n",
+                                  mcast->mcmember.mgid.raw, status);
+               }
+       }
+
+       mcast->backoff *= 2;
+       if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
+               mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
+
+       /* Clear the busy flag so we try again */
+       status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+
+       mutex_lock(&mcast_mutex);
        spin_lock_irq(&priv->lock);
-       clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
-       if (status)
-               mcast->mc = NULL;
-       complete(&mcast->done);
-       if (status == -ENETRESET)
-               status = 0;
-       if (status && test_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->mcast_task,
+       if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
                                   mcast->backoff * HZ);
        spin_unlock_irq(&priv->lock);
        mutex_unlock(&mcast_mutex);
-
+out:
+       complete(&mcast->done);
        return status;
 }
 
@@ -517,9 +487,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                rec.hop_limit     = priv->broadcast->mcmember.hop_limit;
        }
 
-       mutex_lock(&mcast_mutex);
-       init_completion(&mcast->done);
        set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+       init_completion(&mcast->done);
+       set_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags);
+
        mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
                                         &rec, comp_mask, GFP_KERNEL,
                                         ipoib_mcast_join_complete, mcast);
@@ -533,11 +504,13 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
                        mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
 
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task,
+                       queue_delayed_work(ipoib_workqueue,
+                                          &priv->mcast_task,
                                           mcast->backoff * HZ);
+               mutex_unlock(&mcast_mutex);
        }
-       mutex_unlock(&mcast_mutex);
 }
 
 void ipoib_mcast_join_task(struct work_struct *work)
@@ -574,8 +547,8 @@ void ipoib_mcast_join_task(struct work_struct *work)
                        ipoib_warn(priv, "failed to allocate broadcast group\n");
                        mutex_lock(&mcast_mutex);
                        if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
-                               queue_delayed_work(priv->wq, &priv->mcast_task,
-                                                  HZ);
+                               queue_delayed_work(ipoib_workqueue,
+                                                  &priv->mcast_task, HZ);
                        mutex_unlock(&mcast_mutex);
                        return;
                }
@@ -590,8 +563,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
        }
 
        if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
-               if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
-                   !test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
+               if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
                        ipoib_mcast_join(dev, priv->broadcast, 0);
                return;
        }
@@ -599,33 +571,23 @@ void ipoib_mcast_join_task(struct work_struct *work)
        while (1) {
                struct ipoib_mcast *mcast = NULL;
 
-               /*
-                * Need the mutex so our flags are consistent, need the
-                * priv->lock so we don't race with list removals in either
-                * mcast_dev_flush or mcast_restart_task
-                */
-               mutex_lock(&mcast_mutex);
                spin_lock_irq(&priv->lock);
                list_for_each_entry(mcast, &priv->multicast_list, list) {
-                       if (IS_ERR_OR_NULL(mcast->mc) &&
-                           !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
-                           !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
+                       if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
+                           && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
+                           && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
                                /* Found the next unjoined group */
                                break;
                        }
                }
                spin_unlock_irq(&priv->lock);
-               mutex_unlock(&mcast_mutex);
 
                if (&mcast->list == &priv->multicast_list) {
                        /* All done */
                        break;
                }
 
-               if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
-                       ipoib_mcast_sendonly_join(mcast);
-               else
-                       ipoib_mcast_join(dev, mcast, 1);
+               ipoib_mcast_join(dev, mcast, 1);
                return;
        }
 
@@ -642,13 +604,13 @@ int ipoib_mcast_start_thread(struct net_device *dev)
 
        mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-               queue_delayed_work(priv->wq, &priv->mcast_task, 0);
+               queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
        mutex_unlock(&mcast_mutex);
 
        return 0;
 }
 
-int ipoib_mcast_stop_thread(struct net_device *dev)
+int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
@@ -659,7 +621,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
        cancel_delayed_work(&priv->mcast_task);
        mutex_unlock(&mcast_mutex);
 
-       flush_workqueue(priv->wq);
+       if (flush)
+               flush_workqueue(ipoib_workqueue);
 
        return 0;
 }
@@ -670,9 +633,6 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
        int ret = 0;
 
        if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-               ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
-
-       if (!IS_ERR_OR_NULL(mcast->mc))
                ib_sa_free_multicast(mcast->mc);
 
        if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
@@ -725,8 +685,6 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
                __ipoib_mcast_add(dev, mcast);
                list_add_tail(&mcast->list, &priv->multicast_list);
-               if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
-                       queue_delayed_work(priv->wq, &priv->mcast_task, 0);
        }
 
        if (!mcast->ah) {
@@ -740,6 +698,8 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
                if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
                        ipoib_dbg_mcast(priv, "no address vector, "
                                        "but multicast join already started\n");
+               else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
+                       ipoib_mcast_sendonly_join(mcast);
 
                /*
                 * If lookup completes between here and out:, don't
@@ -799,12 +759,9 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
+       /* seperate between the wait to the leave*/
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
+               if (test_bit(IPOIB_MCAST_JOIN_STARTED, &mcast->flags))
                        wait_for_completion(&mcast->done);
 
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
@@ -837,6 +794,8 @@ void ipoib_mcast_restart_task(struct work_struct *work)
 
        ipoib_dbg_mcast(priv, "restarting multicast task\n");
 
+       ipoib_mcast_stop_thread(dev, 0);
+
        local_irq_save(flags);
        netif_addr_lock(dev);
        spin_lock(&priv->lock);
@@ -921,38 +880,14 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        netif_addr_unlock(dev);
        local_irq_restore(flags);
 
-       /*
-        * make sure the in-flight joins have finished before we attempt
-        * to leave
-        */
-       list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
-               if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
-                       wait_for_completion(&mcast->done);
-
-       /*
-        * We have to cancel outside of the spinlock, but we have to
-        * take the rtnl lock or else we race with the removal of
-        * entries from the remove list in mcast_dev_flush as part
-        * of ipoib_stop().  We detect the drop of the ADMIN_UP flag
-        * to signal that we have hit this particular race, and we
-        * return since we know we don't need to do anything else
-        * anyway.
-        */
-       while (!rtnl_trylock()) {
-               if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
-                       return;
-               else
-                       msleep(20);
-       }
+       /* We have to cancel outside of the spinlock */
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
                ipoib_mcast_leave(mcast->dev, mcast);
                ipoib_mcast_free(mcast);
        }
-       /*
-        * Restart our join task if needed
-        */
-       ipoib_mcast_start_thread(dev);
-       rtnl_unlock();
+
+       if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
+               ipoib_mcast_start_thread(dev);
 }
 
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
index b72a753eb41dc3031608269c56434ed507b96f5f..c56d5d44c53b3f11725b6d6da220ea2c440fe496 100644 (file)
@@ -145,20 +145,10 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        int ret, size;
        int i;
 
-       /*
-        * the various IPoIB tasks assume they will never race against
-        * themselves, so always use a single thread workqueue
-        */
-       priv->wq = create_singlethread_workqueue("ipoib_wq");
-       if (!priv->wq) {
-               printk(KERN_WARNING "ipoib: failed to allocate device WQ\n");
-               return -ENODEV;
-       }
-
        priv->pd = ib_alloc_pd(priv->ca);
        if (IS_ERR(priv->pd)) {
                printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
-               goto out_free_wq;
+               return -ENODEV;
        }
 
        priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
@@ -252,10 +242,6 @@ out_free_mr:
 
 out_free_pd:
        ib_dealloc_pd(priv->pd);
-
-out_free_wq:
-       destroy_workqueue(priv->wq);
-       priv->wq = NULL;
        return -ENODEV;
 }
 
@@ -284,12 +270,6 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
 
        if (ib_dealloc_pd(priv->pd))
                ipoib_warn(priv, "ib_dealloc_pd failed\n");
-
-       if (priv->wq) {
-               flush_workqueue(priv->wq);
-               destroy_workqueue(priv->wq);
-               priv->wq = NULL;
-       }
 }
 
 void ipoib_event(struct ib_event_handler *handler,
index 77ecf6d322370ea8566ade345da953b13cc6362d..6e22682c8255cfff41e1fc49f6a76198911aafc5 100644 (file)
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
  * Asus UX31               0x361f00        20, 15, 0e      clickpad
  * Asus UX32VD             0x361f02        00, 15, 0e      clickpad
  * Avatar AVIU-145A2       0x361f00        ?               clickpad
+ * Fujitsu LIFEBOOK E544   0x470f00        d0, 12, 09      2 hw buttons
+ * Fujitsu LIFEBOOK E554   0x570f01        40, 14, 0c      2 hw buttons
  * Fujitsu H730            0x570f00        c0, 14, 0c      3 hw buttons (**)
  * Gigabyte U2442          0x450f01        58, 17, 0c      2 hw buttons
  * Lenovo L430             0x350f02        b9, 15, 0c      2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
                },
        },
+       {
+               /* Fujitsu LIFEBOOK E554  does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+               },
+       },
+       {
+               /* Fujitsu LIFEBOOK E544  does not work with crc_enabled == 0 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+               },
+       },
 #endif
        { }
 };
index f9472920d986368f7aa83eb7d0621489d774b050..23e26e0768b54af037990dbe4999d0486470f56c 100644 (file)
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                1232, 5710, 1156, 4696
        },
        {
-               (const char * const []){"LEN0034", "LEN0036", "LEN0039",
-                                       "LEN2002", "LEN2004", NULL},
+               (const char * const []){"LEN0034", "LEN0036", "LEN0037",
+                                       "LEN0039", "LEN2002", "LEN2004",
+                                       NULL},
                1024, 5112, 2024, 4832
        },
        {
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
        "LEN0035", /* X240 */
        "LEN0036", /* T440 */
-       "LEN0037",
+       "LEN0037", /* X1 Carbon 2nd */
        "LEN0038",
        "LEN0039", /* T440s */
        "LEN0041",
index 764857b4e2682fa5d3ecc74f77d4f41a92eb34bb..c11556563ef0633f746edc9bc9e3ebca80879e8b 100644 (file)
@@ -151,6 +151,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
                },
        },
+       {
+               /* Medion Akoya E7225 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+               },
+       },
        {
                /* Blue FB5601 */
                .matches = {
index 98024856df07fc89e744cb1d7b2356a72146de51..59de6364a9109be4baba8f36fd7e331a13261128 100644 (file)
@@ -4284,7 +4284,6 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id)
 }
 
 struct irq_remap_ops amd_iommu_irq_ops = {
-       .supported              = amd_iommu_supported,
        .prepare                = amd_iommu_prepare,
        .enable                 = amd_iommu_enable,
        .disable                = amd_iommu_disable,
index b0522f15730fbbc0b6dda1aa1a100cf9af6b426d..9a20248e7068fbe54fc38a7f76342777efe567b8 100644 (file)
@@ -2014,9 +2014,6 @@ static bool detect_ivrs(void)
        /* Make sure ACS will be enabled during PCI probe */
        pci_request_acs();
 
-       if (!disable_irq_remap)
-               amd_iommu_irq_remap = true;
-
        return true;
 }
 
@@ -2123,12 +2120,14 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
 #ifdef CONFIG_IRQ_REMAP
 int __init amd_iommu_prepare(void)
 {
-       return iommu_go_to_state(IOMMU_ACPI_FINISHED);
-}
+       int ret;
 
-int __init amd_iommu_supported(void)
-{
-       return amd_iommu_irq_remap ? 1 : 0;
+       amd_iommu_irq_remap = true;
+
+       ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
+       if (ret)
+               return ret;
+       return amd_iommu_irq_remap ? 0 : -ENODEV;
 }
 
 int __init amd_iommu_enable(void)
index 95ed6deae47fe76ac4a7a068b1b6f27e7f8a54c3..861af9d8338a84b8cc19b24a88a9e0684dd49021 100644 (file)
@@ -33,7 +33,6 @@ extern void amd_iommu_init_notifier(void);
 extern void amd_iommu_init_api(void);
 
 /* Needed for interrupt remapping */
-extern int amd_iommu_supported(void);
 extern int amd_iommu_prepare(void);
 extern int amd_iommu_enable(void);
 extern void amd_iommu_disable(void);
index a55b207b9425e30bfdce4c3cab4e26d58ce4eaa6..14de1ab223c864c00a866ebbba6d68d3fc50cb6c 100644 (file)
@@ -32,8 +32,9 @@ struct hpet_scope {
 };
 
 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
-#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
+#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
 
+static int __read_mostly eim_mode;
 static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
 static struct hpet_scope ir_hpet[MAX_HPET_TBS];
 
@@ -481,11 +482,11 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
        if (iommu->ir_table)
                return 0;
 
-       ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
+       ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
        if (!ir_table)
                return -ENOMEM;
 
-       pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
+       pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
                                 INTR_REMAP_PAGE_ORDER);
 
        if (!pages) {
@@ -566,13 +567,27 @@ static int __init dmar_x2apic_optout(void)
        return dmar->flags & DMAR_X2APIC_OPT_OUT;
 }
 
-static int __init intel_irq_remapping_supported(void)
+static void __init intel_cleanup_irq_remapping(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+
+       for_each_iommu(iommu, drhd) {
+               if (ecap_ir_support(iommu->ecap)) {
+                       iommu_disable_irq_remapping(iommu);
+                       intel_teardown_irq_remapping(iommu);
+               }
+       }
+
+       if (x2apic_supported())
+               pr_warn("Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
+}
+
+static int __init intel_prepare_irq_remapping(void)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
 
-       if (disable_irq_remap)
-               return 0;
        if (irq_remap_broken) {
                printk(KERN_WARNING
                        "This system BIOS has enabled interrupt remapping\n"
@@ -581,38 +596,45 @@ static int __init intel_irq_remapping_supported(void)
                        "interrupt remapping is being disabled.  Please\n"
                        "contact your BIOS vendor for an update\n");
                add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
-               disable_irq_remap = 1;
-               return 0;
+               return -ENODEV;
        }
 
+       if (dmar_table_init() < 0)
+               return -ENODEV;
+
        if (!dmar_ir_support())
-               return 0;
+               return -ENODEV;
+
+       if (parse_ioapics_under_ir() != 1) {
+               printk(KERN_INFO "Not enabling interrupt remapping\n");
+               goto error;
+       }
 
+       /* First make sure all IOMMUs support IRQ remapping */
        for_each_iommu(iommu, drhd)
                if (!ecap_ir_support(iommu->ecap))
-                       return 0;
+                       goto error;
 
-       return 1;
+       /* Do the allocations early */
+       for_each_iommu(iommu, drhd)
+               if (intel_setup_irq_remapping(iommu))
+                       goto error;
+
+       return 0;
+
+error:
+       intel_cleanup_irq_remapping();
+       return -ENODEV;
 }
 
 static int __init intel_enable_irq_remapping(void)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
-       bool x2apic_present;
        int setup = 0;
        int eim = 0;
 
-       x2apic_present = x2apic_supported();
-
-       if (parse_ioapics_under_ir() != 1) {
-               printk(KERN_INFO "Not enable interrupt remapping\n");
-               goto error;
-       }
-
-       if (x2apic_present) {
-               pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
-
+       if (x2apic_supported()) {
                eim = !dmar_x2apic_optout();
                if (!eim)
                        printk(KERN_WARNING
@@ -646,16 +668,15 @@ static int __init intel_enable_irq_remapping(void)
        /*
         * check for the Interrupt-remapping support
         */
-       for_each_iommu(iommu, drhd) {
-               if (!ecap_ir_support(iommu->ecap))
-                       continue;
-
+       for_each_iommu(iommu, drhd)
                if (eim && !ecap_eim_support(iommu->ecap)) {
                        printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, "
                               " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap);
-                       goto error;
+                       eim = 0;
                }
-       }
+       eim_mode = eim;
+       if (eim)
+               pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
 
        /*
         * Enable queued invalidation for all the DRHD's.
@@ -675,12 +696,6 @@ static int __init intel_enable_irq_remapping(void)
         * Setup Interrupt-remapping for all the DRHD's now.
         */
        for_each_iommu(iommu, drhd) {
-               if (!ecap_ir_support(iommu->ecap))
-                       continue;
-
-               if (intel_setup_irq_remapping(iommu))
-                       goto error;
-
                iommu_set_irq_remapping(iommu, eim);
                setup = 1;
        }
@@ -702,15 +717,7 @@ static int __init intel_enable_irq_remapping(void)
        return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
 
 error:
-       for_each_iommu(iommu, drhd)
-               if (ecap_ir_support(iommu->ecap)) {
-                       iommu_disable_irq_remapping(iommu);
-                       intel_teardown_irq_remapping(iommu);
-               }
-
-       if (x2apic_present)
-               pr_warn("Failed to enable irq remapping.  You are vulnerable to irq-injection attacks.\n");
-
+       intel_cleanup_irq_remapping();
        return -1;
 }
 
@@ -1199,8 +1206,7 @@ static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
 }
 
 struct irq_remap_ops intel_irq_remap_ops = {
-       .supported              = intel_irq_remapping_supported,
-       .prepare                = dmar_table_init,
+       .prepare                = intel_prepare_irq_remapping,
        .enable                 = intel_enable_irq_remapping,
        .disable                = disable_irq_remapping,
        .reenable               = reenable_irq_remapping,
index 89c4846683be521a1b3fd968afcf9a031e69043a..390079ee13507747388f635bf67c1c44dfb6c068 100644 (file)
 #include "irq_remapping.h"
 
 int irq_remapping_enabled;
-
-int disable_irq_remap;
 int irq_remap_broken;
 int disable_sourceid_checking;
 int no_x2apic_optout;
 
+static int disable_irq_remap;
 static struct irq_remap_ops *remap_ops;
 
 static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
@@ -194,45 +193,32 @@ static __init int setup_irqremap(char *str)
 }
 early_param("intremap", setup_irqremap);
 
-void __init setup_irq_remapping_ops(void)
-{
-       remap_ops = &intel_irq_remap_ops;
-
-#ifdef CONFIG_AMD_IOMMU
-       if (amd_iommu_irq_ops.prepare() == 0)
-               remap_ops = &amd_iommu_irq_ops;
-#endif
-}
-
 void set_irq_remapping_broken(void)
 {
        irq_remap_broken = 1;
 }
 
-int irq_remapping_supported(void)
+int __init irq_remapping_prepare(void)
 {
        if (disable_irq_remap)
-               return 0;
-
-       if (!remap_ops || !remap_ops->supported)
-               return 0;
-
-       return remap_ops->supported();
-}
+               return -ENOSYS;
 
-int __init irq_remapping_prepare(void)
-{
-       if (!remap_ops || !remap_ops->prepare)
-               return -ENODEV;
+       if (intel_irq_remap_ops.prepare() == 0)
+               remap_ops = &intel_irq_remap_ops;
+       else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
+                amd_iommu_irq_ops.prepare() == 0)
+               remap_ops = &amd_iommu_irq_ops;
+       else
+               return -ENOSYS;
 
-       return remap_ops->prepare();
+       return 0;
 }
 
 int __init irq_remapping_enable(void)
 {
        int ret;
 
-       if (!remap_ops || !remap_ops->enable)
+       if (!remap_ops->enable)
                return -ENODEV;
 
        ret = remap_ops->enable();
@@ -245,22 +231,16 @@ int __init irq_remapping_enable(void)
 
 void irq_remapping_disable(void)
 {
-       if (!irq_remapping_enabled ||
-           !remap_ops ||
-           !remap_ops->disable)
-               return;
-
-       remap_ops->disable();
+       if (irq_remapping_enabled && remap_ops->disable)
+               remap_ops->disable();
 }
 
 int irq_remapping_reenable(int mode)
 {
-       if (!irq_remapping_enabled ||
-           !remap_ops ||
-           !remap_ops->reenable)
-               return 0;
+       if (irq_remapping_enabled && remap_ops->reenable)
+               return remap_ops->reenable(mode);
 
-       return remap_ops->reenable(mode);
+       return 0;
 }
 
 int __init irq_remap_enable_fault_handling(void)
@@ -268,7 +248,7 @@ int __init irq_remap_enable_fault_handling(void)
        if (!irq_remapping_enabled)
                return 0;
 
-       if (!remap_ops || !remap_ops->enable_faulting)
+       if (!remap_ops->enable_faulting)
                return -ENODEV;
 
        return remap_ops->enable_faulting();
@@ -279,7 +259,7 @@ int setup_ioapic_remapped_entry(int irq,
                                unsigned int destination, int vector,
                                struct io_apic_irq_attr *attr)
 {
-       if (!remap_ops || !remap_ops->setup_ioapic_entry)
+       if (!remap_ops->setup_ioapic_entry)
                return -ENODEV;
 
        return remap_ops->setup_ioapic_entry(irq, entry, destination,
@@ -289,8 +269,7 @@ int setup_ioapic_remapped_entry(int irq,
 static int set_remapped_irq_affinity(struct irq_data *data,
                                     const struct cpumask *mask, bool force)
 {
-       if (!config_enabled(CONFIG_SMP) || !remap_ops ||
-           !remap_ops->set_affinity)
+       if (!config_enabled(CONFIG_SMP) || !remap_ops->set_affinity)
                return 0;
 
        return remap_ops->set_affinity(data, mask, force);
@@ -300,10 +279,7 @@ void free_remapped_irq(int irq)
 {
        struct irq_cfg *cfg = irq_cfg(irq);
 
-       if (!remap_ops || !remap_ops->free_irq)
-               return;
-
-       if (irq_remapped(cfg))
+       if (irq_remapped(cfg) && remap_ops->free_irq)
                remap_ops->free_irq(irq);
 }
 
@@ -315,13 +291,13 @@ void compose_remapped_msi_msg(struct pci_dev *pdev,
 
        if (!irq_remapped(cfg))
                native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
-       else if (remap_ops && remap_ops->compose_msi_msg)
+       else if (remap_ops->compose_msi_msg)
                remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
 }
 
 static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
 {
-       if (!remap_ops || !remap_ops->msi_alloc_irq)
+       if (!remap_ops->msi_alloc_irq)
                return -ENODEV;
 
        return remap_ops->msi_alloc_irq(pdev, irq, nvec);
@@ -330,7 +306,7 @@ static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
 static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
                                  int index, int sub_handle)
 {
-       if (!remap_ops || !remap_ops->msi_setup_irq)
+       if (!remap_ops->msi_setup_irq)
                return -ENODEV;
 
        return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle);
@@ -340,7 +316,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
 {
        int ret;
 
-       if (!remap_ops || !remap_ops->alloc_hpet_msi)
+       if (!remap_ops->alloc_hpet_msi)
                return -ENODEV;
 
        ret = remap_ops->alloc_hpet_msi(irq, id);
index fde250f86e6034c5511ce392c255e73be70d2eaa..c448eb48340a555460546965a4f827ef91f8e302 100644 (file)
@@ -31,16 +31,12 @@ struct cpumask;
 struct pci_dev;
 struct msi_msg;
 
-extern int disable_irq_remap;
 extern int irq_remap_broken;
 extern int disable_sourceid_checking;
 extern int no_x2apic_optout;
 extern int irq_remapping_enabled;
 
 struct irq_remap_ops {
-       /* Check whether Interrupt Remapping is supported */
-       int (*supported)(void);
-
        /* Initializes hardware and makes it ready for remapping interrupts */
        int  (*prepare)(void);
 
@@ -89,7 +85,6 @@ extern struct irq_remap_ops amd_iommu_irq_ops;
 #else  /* CONFIG_IRQ_REMAP */
 
 #define irq_remapping_enabled 0
-#define disable_irq_remap     1
 #define irq_remap_broken      0
 
 #endif /* CONFIG_IRQ_REMAP */
index f722a0c466cfee07de8f982a9c223478d1765c25..c48da057dbb1e5b6be38e6b28daced1046454687 100644 (file)
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
        .attach_dev     = gart_iommu_attach_dev,
        .detach_dev     = gart_iommu_detach_dev,
        .map            = gart_iommu_map,
+       .map_sg         = default_iommu_map_sg,
        .unmap          = gart_iommu_unmap,
        .iova_to_phys   = gart_iommu_iova_to_phys,
        .pgsize_bitmap  = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
        do_gart_setup(gart, NULL);
 
        gart_handle = gart;
-       bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
        return 0;
 }
 
index 2b0468e3df6a6a727ca7fda39de7f920fbb3c40e..56b96c63dc4bbc5479a3f51ef712fcbb70b4f5b6 100644 (file)
@@ -37,6 +37,7 @@ static struct irq_domain *gic_irq_domain;
 static int gic_shared_intrs;
 static int gic_vpes;
 static unsigned int gic_cpu_pin;
+static unsigned int timer_cpu_pin;
 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
 
 static void __gic_irq_dispatch(void);
@@ -616,6 +617,8 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
                        gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
                        break;
                case GIC_LOCAL_INT_TIMER:
+                       /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+                       val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
                        gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
                        break;
                case GIC_LOCAL_INT_PERFCTR:
@@ -713,12 +716,36 @@ static void __init __gic_init(unsigned long gic_base_addr,
        if (cpu_has_veic) {
                /* Always use vector 1 in EIC mode */
                gic_cpu_pin = 0;
+               timer_cpu_pin = gic_cpu_pin;
                set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
                               __gic_irq_dispatch);
        } else {
                gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
                irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
                                        gic_irq_dispatch);
+               /*
+                * With the CMP implementation of SMP (deprecated), other CPUs
+                * are started by the bootloader and put into a timer based
+                * waiting poll loop. We must not re-route those CPU's local
+                * timer interrupts as the wait instruction will never finish,
+                * so just handle whatever CPU interrupt it is routed to by
+                * default.
+                *
+                * This workaround should be removed when CMP support is
+                * dropped.
+                */
+               if (IS_ENABLED(CONFIG_MIPS_CMP) &&
+                   gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
+                       timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
+                                                        GIC_VPE_TIMER_MAP)) &
+                                       GIC_MAP_MSK;
+                       irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
+                                               GIC_CPU_PIN_OFFSET +
+                                               timer_cpu_pin,
+                                               gic_irq_dispatch);
+               } else {
+                       timer_cpu_pin = gic_cpu_pin;
+               }
        }
 
        gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
index 0b380603a578543bcd275d248ade94f3f07b24a8..d7c286656a25721ec58fa16a5f3fdb277a0747a5 100644 (file)
@@ -1474,7 +1474,7 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
                                        add_ai(plci, &parms[5]);
                                        sig_req(plci, REJECT, 0);
                                }
-                               else if (Reject == 1 || Reject > 9)
+                               else if (Reject == 1 || Reject >= 9)
                                {
                                        add_ai(plci, &parms[5]);
                                        sig_req(plci, HANGUP, 0);
index 5bdedf6df153cf25d54e91a2cca5d9e3b6b8b1cf..c355a226a0247c824770457179731bc05d3a0667 100644 (file)
@@ -5,6 +5,7 @@
 menuconfig MD
        bool "Multiple devices driver support (RAID and LVM)"
        depends on BLOCK
+       select SRCU
        help
          Support multiple physical spindles through a single logical device.
          Required for RAID and logical volume management.
index da3604e73e8abafbd8e127dbfc659360d23310e5..1695ee5f3ffc30b883c83c1926745ba22e48a7e1 100644 (file)
@@ -72,6 +72,19 @@ __acquires(bitmap->lock)
        /* this page has not been allocated yet */
 
        spin_unlock_irq(&bitmap->lock);
+       /* It is possible that this is being called inside a
+        * prepare_to_wait/finish_wait loop from raid5c:make_request().
+        * In general it is not permitted to sleep in that context as it
+        * can cause the loop to spin freely.
+        * That doesn't apply here as we can only reach this point
+        * once with any loop.
+        * When this function completes, either bp[page].map or
+        * bp[page].hijacked.  In either case, this function will
+        * abort before getting to this point again.  So there is
+        * no risk of a free-spin, and so it is safe to assert
+        * that sleeping here is allowed.
+        */
+       sched_annotate_sleep();
        mappage = kzalloc(PAGE_SIZE, GFP_NOIO);
        spin_lock_irq(&bitmap->lock);
 
index 21b156242e42668b5f8a1b43910374fe2c7a77f6..c1c010498a21b99a9bf730b0cd4277776abc159a 100644 (file)
@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd) {
                DMERR("could not allocate metadata struct");
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        atomic_set(&cmd->ref_count, 1);
@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
                return cmd;
 
        cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
-       if (cmd) {
+       if (!IS_ERR(cmd)) {
                mutex_lock(&table_lock);
                cmd2 = lookup(bdev);
                if (cmd2) {
@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 {
        struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
                                                       may_format_device, policy_hint_size);
-       if (cmd && !same_params(cmd, data_block_size)) {
+
+       if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
                dm_cache_metadata_close(cmd);
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        return cmd;
index 493478989dbd4349b23716aa3dbdd92e0d1bc37f..07705ee181e3d2837c47954626276f9dea52cac0 100644 (file)
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
+       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+               DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+                     dm_device_name(pool->pool_md));
+               return -EINVAL;
+       }
+
        if (!strcasecmp(argv[0], "create_thin"))
                r = process_create_thin_mesg(argc, argv, pool);
 
index c1b0d52bfcb0f7b014bbc48f4823e3b9b028dcce..b98765f6f77fd9f1e11ecdcd8809928e7b821716 100644 (file)
@@ -3195,6 +3195,11 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                          (unsigned long long)sh->sector,
                                          rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
        }
+
+       if (rcw > disks && rmw > disks &&
+           !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+               set_bit(STRIPE_DELAYED, &sh->state);
+
        /* now if nothing is locked, and if we have enough data,
         * we can start a write request
         */
index d6607ee9c85506bb4fcc5d2077b8dc4e582d4b4b..84673ebcf428846fadf26ae881c39172fd45d612 100644 (file)
@@ -197,6 +197,7 @@ config NETCONSOLE_DYNAMIC
 
 config NETPOLL
        def_bool NETCONSOLE
+       select SRCU
 
 config NET_POLL_CONTROLLER
        def_bool NETPOLL
index 5e40a8b68cbe1964a242868ee978551442a3bfd0..b3b922adc0e4f68ed15ff34537c21e1bd7e5e81f 100644 (file)
@@ -1415,7 +1415,6 @@ static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
 
        cfhsi = netdev_priv(dev);
        cfhsi_netlink_parms(data, cfhsi);
-       dev_net_set(cfhsi->ndev, src_net);
 
        get_ops = symbol_get(cfhsi_get_ops);
        if (!get_ops) {
index 7a5e4aa5415e2652a13d9624a70003a5e2f6ad3d..77f1f6048dddf0bdf94d9b771f252ef70147be78 100644 (file)
@@ -45,7 +45,7 @@ config AMD8111_ETH
 
 config LANCE
        tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !ARM
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
          the Ethernet-HOWTO, available from
@@ -142,7 +142,7 @@ config PCMCIA_NMCLAN
 
 config NI65
        tristate "NI6510 support"
-       depends on ISA && ISA_DMA_API
+       depends on ISA && ISA_DMA_API && !ARM
        ---help---
          If you have a network (Ethernet) card of this type, say Y and read
          the Ethernet-HOWTO, available from
index 5b22764ba88d2304c3502cd987420e7f955fd793..27245efe9f50098eee594cb844e5e1647978bf3d 100644 (file)
@@ -952,6 +952,8 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
   do {
     /* WARNING: MACE_IR is a READ/CLEAR port! */
     status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+    if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
+      return IRQ_NONE;
 
     pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
 
index 7bb5f07dbeef3e806174047cc883e3fb6ca149c3..e5ffb2ccb67d1d053d47f0f6cc219e86fb6b086b 100644 (file)
@@ -523,6 +523,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
        hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
        hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
        hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+       hw_feat->rss           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
        hw_feat->tc_cnt        = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
        hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
                                                  HASHTBLSZ);
@@ -552,13 +553,14 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
                break;
        }
 
-       /* The Queue and Channel counts are zero based so increment them
+       /* The Queue, Channel and TC counts are zero based so increment them
         * to get the actual number
         */
        hw_feat->rx_q_cnt++;
        hw_feat->tx_q_cnt++;
        hw_feat->rx_ch_cnt++;
        hw_feat->tx_ch_cnt++;
+       hw_feat->tc_cnt++;
 
        DBGPR("<--xgbe_get_all_hw_features\n");
 }
index 83a50280bb7098149624f4bfe341822845e2148f..793f3b73eeff61216b2deaed21da60330f9ae898 100644 (file)
@@ -369,6 +369,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
                if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
                        break;
 
+               /* read fpqnum field after dataaddr field */
+               dma_rmb();
                if (is_rx_desc(raw_desc))
                        ret = xgene_enet_rx_frame(ring, raw_desc);
                else
index 7403dff8f14a4cf38090f8b32c57645d91b1770e..905ac5f5d9a60037746998d2f038ee738c04ce4f 100644 (file)
@@ -32,7 +32,8 @@ config CS89x0
          will be called cs89x0.
 
 config CS89x0_PLATFORM
-       bool "CS89x0 platform driver support"
+       bool "CS89x0 platform driver support" if HAS_IOPORT_MAP
+       default !HAS_IOPORT_MAP
        depends on CS89x0
        help
          Say Y to compile the cs89x0 driver as a platform driver. This
index 3e1a9c1a67a95ffdaddbcca9739a9e37dee66eac..fda12fb32ec77a8538a0f1d1370d2e653c91856c 100644 (file)
@@ -1586,7 +1586,7 @@ static int gfar_write_filer_table(struct gfar_private *priv,
                return -EBUSY;
 
        /* Fill regular entries */
-       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+       for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop);
             i++)
                gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
        /* Fill the rest with fall-troughs */
index 63c807c9b21c0f7d68f330bab3be65f2d806f173..edea13b0ee85277a2ca6b66ddba50f928efc6afe 100644 (file)
@@ -1907,7 +1907,8 @@ static void igbvf_watchdog_task(struct work_struct *work)
 
 static int igbvf_tso(struct igbvf_adapter *adapter,
                      struct igbvf_ring *tx_ring,
-                     struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+                    struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
+                    __be16 protocol)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        struct igbvf_buffer *buffer_info;
@@ -1927,7 +1928,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
        l4len = tcp_hdrlen(skb);
        *hdr_len += l4len;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -1958,7 +1959,7 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
        tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
 
@@ -1984,7 +1985,8 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
 
 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                                  struct igbvf_ring *tx_ring,
-                                 struct sk_buff *skb, u32 tx_flags)
+                                struct sk_buff *skb, u32 tx_flags,
+                                __be16 protocol)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -2011,7 +2013,7 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
                tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       switch (skb->protocol) {
+                       switch (protocol) {
                        case htons(ETH_P_IP):
                                tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
                                if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -2211,6 +2213,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
        u8 hdr_len = 0;
        int count = 0;
        int tso = 0;
+       __be16 protocol = vlan_get_protocol(skb);
 
        if (test_bit(__IGBVF_DOWN, &adapter->state)) {
                dev_kfree_skb_any(skb);
@@ -2239,13 +2242,13 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
                tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
        }
 
-       if (skb->protocol == htons(ETH_P_IP))
+       if (protocol == htons(ETH_P_IP))
                tx_flags |= IGBVF_TX_FLAGS_IPV4;
 
        first = tx_ring->next_to_use;
 
        tso = skb_is_gso(skb) ?
-               igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
+               igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
        if (unlikely(tso < 0)) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -2253,7 +2256,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
 
        if (tso)
                tx_flags |= IGBVF_TX_FLAGS_TSO;
-       else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+       else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) &&
                 (skb->ip_summed == CHECKSUM_PARTIAL))
                tx_flags |= IGBVF_TX_FLAGS_CSUM;
 
index 2ed2c7de230444f88c3f06451d7cc8a7167f5f05..67b02bde179e9df1472f5b751b30e11b8f3ba34f 100644 (file)
@@ -7227,11 +7227,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                if (!vhdr)
                        goto out_drop;
 
-               protocol = vhdr->h_vlan_encapsulated_proto;
                tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
                                  IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_SW_VLAN;
        }
+       protocol = vlan_get_protocol(skb);
 
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
            adapter->ptp_clock &&
index 62a0d8e0f17da5ce75d48f9ff39fca5354c4447f..38c7a0be81977e91a901ea0d93b11db7c6652f51 100644 (file)
@@ -3099,7 +3099,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       if (first->protocol == htons(ETH_P_IP)) {
                struct iphdr *iph = ip_hdr(skb);
                iph->tot_len = 0;
                iph->check = 0;
@@ -3156,7 +3156,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                u8 l4_hdr = 0;
-               switch (skb->protocol) {
+               switch (first->protocol) {
                case htons(ETH_P_IP):
                        vlan_macip_lens |= skb_network_header_len(skb);
                        type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
index bdd4eea2247cc1b09c19551a21d1c08f2c65e9d5..210691c89b6cbfdb3b7ac000cb75013e1890545f 100644 (file)
@@ -235,7 +235,8 @@ do {                                                                        \
 extern int mlx4_log_num_mgm_entry_size;
 extern int log_mtts_per_seg;
 
-#define MLX4_MAX_NUM_SLAVES    (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define MLX4_MAX_NUM_SLAVES    (min(MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF, \
+                                    MLX4_MFUNC_MAX))
 #define ALL_SLAVES 0xff
 
 struct mlx4_bitmap {
index 18e5de72e9b4c2c9b95848bf444597251942039e..4e1f58cf19ce4013deb51010fd130cc6f2f0860c 100644 (file)
@@ -967,7 +967,12 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
                                              budget);
        work_done = qlcnic_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                        qlcnic_enable_sds_intr(adapter, sds_ring);
@@ -992,6 +997,9 @@ static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
                napi_complete(&tx_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                        qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+               work_done = budget;
        }
 
        return work_done;
@@ -1950,7 +1958,12 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
 
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
@@ -1973,7 +1986,12 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
 
        tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
-       if ((work_done < budget) && tx_complete) {
+
+       /* Check if we need a repoll */
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
@@ -1995,6 +2013,9 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
                napi_complete(&tx_ring->napi);
                if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
                        qlcnic_enable_tx_intr(adapter, tx_ring);
+       } else {
+               /* need a repoll */
+               work_done = budget;
        }
 
        return work_done;
index 6c904a6cad2a177036b42190cffb17a25e194708..ef5aed3b122530a785d971c30adfa1d4c9186f08 100644 (file)
@@ -2351,23 +2351,29 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
 {
        struct ql_adapter *qdev = netdev_priv(ndev);
        int status = 0;
+       bool need_restart = netif_running(ndev);
 
-       status = ql_adapter_down(qdev);
-       if (status) {
-               netif_err(qdev, link, qdev->ndev,
-                         "Failed to bring down the adapter\n");
-               return status;
+       if (need_restart) {
+               status = ql_adapter_down(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring down the adapter\n");
+                       return status;
+               }
        }
 
        /* update the features with resent change */
        ndev->features = features;
 
-       status = ql_adapter_up(qdev);
-       if (status) {
-               netif_err(qdev, link, qdev->ndev,
-                         "Failed to bring up the adapter\n");
-               return status;
+       if (need_restart) {
+               status = ql_adapter_up(qdev);
+               if (status) {
+                       netif_err(qdev, link, qdev->ndev,
+                                 "Failed to bring up the adapter\n");
+                       return status;
+               }
        }
+
        return status;
 }
 
index d2835bf7b4fbef1744bf2bd6d840acfe47863a39..3699b98d5b2c26c9c50220fa6f69c5d97ec1beb3 100644 (file)
@@ -1119,6 +1119,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
                        skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
                        skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
                }
+               nskb->queue_mapping = skb->queue_mapping;
                dev_kfree_skb(skb);
                skb = nskb;
        }
index 9f49c0129a78a63f9a473012162ab74260f9449a..7cd4eb38abfa1591ed23b16141e9adf68aa204fe 100644 (file)
@@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device,
        u64 req_id;
        unsigned int section_index = NETVSC_INVALID_INDEX;
        u32 msg_size = 0;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        u16 q_idx = packet->q_idx;
 
 
@@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device,
                                                           packet);
                        skb = (struct sk_buff *)
                              (unsigned long)packet->send_completion_tid;
-                       if (skb)
-                               dev_kfree_skb_any(skb);
                        packet->page_buf_cnt = 0;
                }
        }
@@ -810,6 +808,13 @@ int netvsc_send(struct hv_device *device,
                           packet, ret);
        }
 
+       if (ret != 0) {
+               if (section_index != NETVSC_INVALID_INDEX)
+                       netvsc_free_send_slot(net_device, section_index);
+       } else if (skb) {
+               dev_kfree_skb_any(skb);
+       }
+
        return ret;
 }
 
index 7df221788cd4dc7ae4d8c3ed2f53789296d00deb..919f4fccc322a9b227dcbddaa854ca14b4e18247 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/uio.h>
 
-#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/rtnetlink.h>
 #include <net/sock.h>
@@ -81,7 +80,7 @@ static struct cdev macvtap_cdev;
 static const struct proto_ops macvtap_socket_ops;
 
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
-                     NETIF_F_TSO6)
+                     NETIF_F_TSO6 | NETIF_F_UFO)
 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
 
@@ -586,11 +585,7 @@ static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q,
                        gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-                       pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
-                                    current->comm);
                        gso_type = SKB_GSO_UDP;
-                       if (skb->protocol == htons(ETH_P_IPV6))
-                               ipv6_proxy_select_ident(skb);
                        break;
                default:
                        return -EINVAL;
@@ -636,6 +631,8 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (sinfo->gso_type & SKB_GSO_TCPV6)
                        vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (sinfo->gso_type & SKB_GSO_UDP)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -965,6 +962,9 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
                        if (arg & TUN_F_TSO6)
                                feature_mask |= NETIF_F_TSO6;
                }
+
+               if (arg & TUN_F_UFO)
+                       feature_mask |= NETIF_F_UFO;
        }
 
        /* tun/tap driver inverts the usage for TSO offloads, where
@@ -975,7 +975,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
         * When user space turns off TSO, we turn off GSO/LRO so that
         * user-space will not receive TSO frames.
         */
-       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
+       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
                features |= RX_OFFLOADS;
        else
                features &= ~RX_OFFLOADS;
@@ -1090,7 +1090,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
        case TUNSETOFFLOAD:
                /* let the user check for future flags */
                if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN))
+                           TUN_F_TSO_ECN | TUN_F_UFO))
                        return -EINVAL;
 
                rtnl_lock();
index 602c625d95d5e26ba0c79f9ae6af2dd92f1f3c93..b5edc7f96a392d0080400ed4285cfb84e86d9e5c 100644 (file)
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
        /*
         * See if we managed to reduce the size of the packet.
         */
-       if (olen < isize) {
+       if (olen < isize && olen <= osize) {
                state->stats.comp_bytes += olen;
                state->stats.comp_packets++;
        } else {
index 8c8dc16839a79473e976d9de2132cc6df911e418..10f9e4021b5ab9799c2e445fa1f6f9c4799293b3 100644 (file)
@@ -65,7 +65,6 @@
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
 #include <linux/rcupdate.h>
-#include <net/ipv6.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
@@ -187,7 +186,7 @@ struct tun_struct {
        struct net_device       *dev;
        netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
-                         NETIF_F_TSO6)
+                         NETIF_F_TSO6|NETIF_F_UFO)
 
        int                     vnet_hdr_sz;
        int                     sndbuf;
@@ -1167,8 +1166,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
-       skb_reset_network_header(skb);
-
        if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                pr_debug("GSO!\n");
                switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1179,20 +1176,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-               {
-                       static bool warned;
-
-                       if (!warned) {
-                               warned = true;
-                               netdev_warn(tun->dev,
-                                           "%s: using disabled UFO feature; please fix this program\n",
-                                           current->comm);
-                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-                       if (skb->protocol == htons(ETH_P_IPV6))
-                               ipv6_proxy_select_ident(skb);
                        break;
-               }
                default:
                        tun->dev->stats.rx_frame_errors++;
                        kfree_skb(skb);
@@ -1221,6 +1206,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
        }
 
+       skb_reset_network_header(skb);
        skb_probe_transport_header(skb, 0);
 
        rxhash = skb_get_hash(skb);
@@ -1298,6 +1284,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                        else if (sinfo->gso_type & SKB_GSO_TCPV6)
                                gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+                       else if (sinfo->gso_type & SKB_GSO_UDP)
+                               gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                        else {
                                pr_err("unexpected GSO type: "
                                       "0x%x, gso_size %d, hdr_len %d\n",
@@ -1746,6 +1734,11 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
                                features |= NETIF_F_TSO6;
                        arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
                }
+
+               if (arg & TUN_F_UFO) {
+                       features |= NETIF_F_UFO;
+                       arg &= ~TUN_F_UFO;
+               }
        }
 
        /* This gives the user a way to test for new features in future by
index 99b69af142742523d873762bcdd9c58c0b2b8b0e..4a1e9c489f1f455388ffee289d65e1d6b36cba42 100644 (file)
@@ -77,7 +77,7 @@ static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
                int ret;
 
                udelay(1);
-               ret = sr_read_reg(dev, EPCR, &tmp);
+               ret = sr_read_reg(dev, SR_EPCR, &tmp);
                if (ret < 0)
                        return ret;
 
@@ -98,15 +98,15 @@ static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
 
        mutex_lock(&dev->phy_mutex);
 
-       sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
-       sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+       sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+       sr_write_reg(dev, SR_EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
 
        ret = wait_phy_eeprom_ready(dev, phy);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPCR, 0x0);
-       ret = sr_read(dev, EPDR, 2, value);
+       sr_write_reg(dev, SR_EPCR, 0x0);
+       ret = sr_read(dev, SR_EPDR, 2, value);
 
        netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
                   phy, reg, *value, ret);
@@ -123,19 +123,19 @@ static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
 
        mutex_lock(&dev->phy_mutex);
 
-       ret = sr_write(dev, EPDR, 2, &value);
+       ret = sr_write(dev, SR_EPDR, 2, &value);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
-       sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
+       sr_write_reg(dev, SR_EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+       sr_write_reg(dev, SR_EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
                    (EPCR_WEP | EPCR_ERPRW));
 
        ret = wait_phy_eeprom_ready(dev, phy);
        if (ret < 0)
                goto out_unlock;
 
-       sr_write_reg(dev, EPCR, 0x0);
+       sr_write_reg(dev, SR_EPCR, 0x0);
 
 out_unlock:
        mutex_unlock(&dev->phy_mutex);
@@ -188,7 +188,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
        if (loc == MII_BMSR) {
                u8 value;
 
-               sr_read_reg(dev, NSR, &value);
+               sr_read_reg(dev, SR_NSR, &value);
                if (value & NSR_LINKST)
                        rc = 1;
        }
@@ -228,7 +228,7 @@ static u32 sr9700_get_link(struct net_device *netdev)
        int rc = 0;
 
        /* Get the Link Status directly */
-       sr_read_reg(dev, NSR, &value);
+       sr_read_reg(dev, SR_NSR, &value);
        if (value & NSR_LINKST)
                rc = 1;
 
@@ -281,8 +281,8 @@ static void sr9700_set_multicast(struct net_device *netdev)
                }
        }
 
-       sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
-       sr_write_reg_async(dev, RCR, rx_ctl);
+       sr_write_async(dev, SR_MAR, SR_MCAST_SIZE, hashes);
+       sr_write_reg_async(dev, SR_RCR, rx_ctl);
 }
 
 static int sr9700_set_mac_address(struct net_device *netdev, void *p)
@@ -297,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
        }
 
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-       sr_write_async(dev, PAR, 6, netdev->dev_addr);
+       sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
 
        return 0;
 }
@@ -340,7 +340,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
        mii->phy_id_mask = 0x1f;
        mii->reg_num_mask = 0x1f;
 
-       sr_write_reg(dev, NCR, NCR_RST);
+       sr_write_reg(dev, SR_NCR, NCR_RST);
        udelay(20);
 
        /* read MAC
@@ -348,17 +348,17 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
         * EEPROM automatically to PAR. In case there is no EEPROM externally,
         * a default MAC address is stored in PAR for making chip work properly.
         */
-       if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+       if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
                netdev_err(netdev, "Error reading MAC address\n");
                ret = -ENODEV;
                goto out;
        }
 
        /* power up and reset phy */
-       sr_write_reg(dev, PRR, PRR_PHY_RST);
+       sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
        /* at least 10ms, here 20ms for safe */
        mdelay(20);
-       sr_write_reg(dev, PRR, 0);
+       sr_write_reg(dev, SR_PRR, 0);
        /* at least 1ms, here 2ms for reading right register */
        udelay(2 * 1000);
 
index fd687c575e742efce8d99e2fa28bb74dbba0e7dd..258b030277e753d56b7961a04128dce2d97da70a 100644 (file)
 /* sr9700 spec. register table on Linux platform */
 
 /* Network Control Reg */
-#define        NCR                     0x00
+#define        SR_NCR                  0x00
 #define                NCR_RST                 (1 << 0)
 #define                NCR_LBK                 (3 << 1)
 #define                NCR_FDX                 (1 << 3)
 #define                NCR_WAKEEN              (1 << 6)
 /* Network Status Reg */
-#define        NSR                     0x01
+#define        SR_NSR                  0x01
 #define                NSR_RXRDY               (1 << 0)
 #define                NSR_RXOV                (1 << 1)
 #define                NSR_TX1END              (1 << 2)
@@ -30,7 +30,7 @@
 #define                NSR_LINKST              (1 << 6)
 #define                NSR_SPEED               (1 << 7)
 /* Tx Control Reg */
-#define        TCR                     0x02
+#define        SR_TCR                  0x02
 #define                TCR_CRC_DIS             (1 << 1)
 #define                TCR_PAD_DIS             (1 << 2)
 #define                TCR_LC_CARE             (1 << 3)
@@ -38,7 +38,7 @@
 #define                TCR_EXCECM              (1 << 5)
 #define                TCR_LF_EN               (1 << 6)
 /* Tx Status Reg for Packet Index 1 */
-#define        TSR1            0x03
+#define        SR_TSR1         0x03
 #define                TSR1_EC                 (1 << 2)
 #define                TSR1_COL                (1 << 3)
 #define                TSR1_LC                 (1 << 4)
@@ -46,7 +46,7 @@
 #define                TSR1_LOC                (1 << 6)
 #define                TSR1_TLF                (1 << 7)
 /* Tx Status Reg for Packet Index 2 */
-#define        TSR2            0x04
+#define        SR_TSR2         0x04
 #define                TSR2_EC                 (1 << 2)
 #define                TSR2_COL                (1 << 3)
 #define                TSR2_LC                 (1 << 4)
@@ -54,7 +54,7 @@
 #define                TSR2_LOC                (1 << 6)
 #define                TSR2_TLF                (1 << 7)
 /* Rx Control Reg*/
-#define        RCR                     0x05
+#define        SR_RCR                  0x05
 #define                RCR_RXEN                (1 << 0)
 #define                RCR_PRMSC               (1 << 1)
 #define                RCR_RUNT                (1 << 2)
 #define                RCR_DIS_CRC             (1 << 4)
 #define                RCR_DIS_LONG    (1 << 5)
 /* Rx Status Reg */
-#define        RSR                     0x06
+#define        SR_RSR                  0x06
 #define                RSR_AE                  (1 << 2)
 #define                RSR_MF                  (1 << 6)
 #define                RSR_RF                  (1 << 7)
 /* Rx Overflow Counter Reg */
-#define        ROCR            0x07
+#define        SR_ROCR         0x07
 #define                ROCR_ROC                (0x7F << 0)
 #define                ROCR_RXFU               (1 << 7)
 /* Back Pressure Threshold Reg */
-#define        BPTR            0x08
+#define        SR_BPTR         0x08
 #define                BPTR_JPT                (0x0F << 0)
 #define                BPTR_BPHW               (0x0F << 4)
 /* Flow Control Threshold Reg */
-#define        FCTR            0x09
+#define        SR_FCTR         0x09
 #define                FCTR_LWOT               (0x0F << 0)
 #define                FCTR_HWOT               (0x0F << 4)
 /* rx/tx Flow Control Reg */
-#define        FCR                     0x0A
+#define        SR_FCR                  0x0A
 #define                FCR_FLCE                (1 << 0)
 #define                FCR_BKPA                (1 << 4)
 #define                FCR_TXPEN               (1 << 5)
 #define                FCR_TXPF                (1 << 6)
 #define                FCR_TXP0                (1 << 7)
 /* Eeprom & Phy Control Reg */
-#define        EPCR            0x0B
+#define        SR_EPCR         0x0B
 #define                EPCR_ERRE               (1 << 0)
 #define                EPCR_ERPRW              (1 << 1)
 #define                EPCR_ERPRR              (1 << 2)
 #define                EPCR_EPOS               (1 << 3)
 #define                EPCR_WEP                (1 << 4)
 /* Eeprom & Phy Address Reg */
-#define        EPAR            0x0C
+#define        SR_EPAR         0x0C
 #define                EPAR_EROA               (0x3F << 0)
 #define                EPAR_PHY_ADR_MASK       (0x03 << 6)
 #define                EPAR_PHY_ADR            (0x01 << 6)
 /* Eeprom &    Phy Data Reg */
-#define        EPDR            0x0D    /* 0x0D ~ 0x0E for Data Reg Low & High */
+#define        SR_EPDR         0x0D    /* 0x0D ~ 0x0E for Data Reg Low & High */
 /* Wakeup Control Reg */
-#define        WCR                     0x0F
+#define        SR_WCR                  0x0F
 #define                WCR_MAGICST             (1 << 0)
 #define                WCR_LINKST              (1 << 2)
 #define                WCR_MAGICEN             (1 << 3)
 #define                WCR_LINKEN              (1 << 5)
 /* Physical Address Reg */
-#define        PAR                     0x10    /* 0x10 ~ 0x15 6 bytes for PAR */
+#define        SR_PAR                  0x10    /* 0x10 ~ 0x15 6 bytes for PAR */
 /* Multicast Address Reg */
-#define        MAR                     0x16    /* 0x16 ~ 0x1D 8 bytes for MAR */
+#define        SR_MAR                  0x16    /* 0x16 ~ 0x1D 8 bytes for MAR */
 /* 0x1e unused */
 /* Phy Reset Reg */
-#define        PRR                     0x1F
+#define        SR_PRR                  0x1F
 #define                PRR_PHY_RST             (1 << 0)
 /* Tx sdram Write Pointer Address Low */
-#define        TWPAL           0x20
+#define        SR_TWPAL                0x20
 /* Tx sdram Write Pointer Address High */
-#define        TWPAH           0x21
+#define        SR_TWPAH                0x21
 /* Tx sdram Read Pointer Address Low */
-#define        TRPAL           0x22
+#define        SR_TRPAL                0x22
 /* Tx sdram Read Pointer Address High */
-#define        TRPAH           0x23
+#define        SR_TRPAH                0x23
 /* Rx sdram Write Pointer Address Low */
-#define        RWPAL           0x24
+#define        SR_RWPAL                0x24
 /* Rx sdram Write Pointer Address High */
-#define        RWPAH           0x25
+#define        SR_RWPAH                0x25
 /* Rx sdram Read Pointer Address Low */
-#define        RRPAL           0x26
+#define        SR_RRPAL                0x26
 /* Rx sdram Read Pointer Address High */
-#define        RRPAH           0x27
+#define        SR_RRPAH                0x27
 /* Vendor ID register */
-#define        VID                     0x28    /* 0x28 ~ 0x29 2 bytes for VID */
+#define        SR_VID                  0x28    /* 0x28 ~ 0x29 2 bytes for VID */
 /* Product ID register */
-#define        PID                     0x2A    /* 0x2A ~ 0x2B 2 bytes for PID */
+#define        SR_PID                  0x2A    /* 0x2A ~ 0x2B 2 bytes for PID */
 /* CHIP Revision register */
-#define        CHIPR           0x2C
+#define        SR_CHIPR                0x2C
 /* 0x2D --> 0xEF unused */
 /* USB Device Address */
-#define        USBDA           0xF0
+#define        SR_USBDA                0xF0
 #define                USBDA_USBFA             (0x7F << 0)
 /* RX packet Counter Reg */
-#define        RXC                     0xF1
+#define        SR_RXC                  0xF1
 /* Tx packet Counter & USB Status Reg */
-#define        TXC_USBS        0xF2
+#define        SR_TXC_USBS             0xF2
 #define                TXC_USBS_TXC0           (1 << 0)
 #define                TXC_USBS_TXC1           (1 << 1)
 #define                TXC_USBS_TXC2           (1 << 2)
 #define                TXC_USBS_SUSFLAG        (1 << 6)
 #define                TXC_USBS_RXFAULT        (1 << 7)
 /* USB Control register */
-#define        USBC            0xF4
+#define        SR_USBC                 0xF4
 #define                USBC_EP3NAK             (1 << 4)
 #define                USBC_EP3ACK             (1 << 5)
 
index 5ca97713bfb33b5d5a7770f3ae6a2000a19df423..059fdf1bf5eed9ff91c728c2a471f03791b5877c 100644 (file)
@@ -490,17 +490,8 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
-               {
-                       static bool warned;
-
-                       if (!warned) {
-                               warned = true;
-                               netdev_warn(dev,
-                                           "host using disabled UFO feature; please fix it\n");
-                       }
                        skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                        break;
-               }
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
                        break;
@@ -888,6 +879,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
                else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
                        hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+                       hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
                else
                        BUG();
                if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1748,7 +1741,7 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
 
                if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
-                       dev->hw_features |= NETIF_F_TSO
+                       dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
                                | NETIF_F_TSO_ECN | NETIF_F_TSO6;
                }
                /* Individual feature bits: what can host handle? */
@@ -1758,9 +1751,11 @@ static int virtnet_probe(struct virtio_device *vdev)
                        dev->hw_features |= NETIF_F_TSO6;
                if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
                        dev->hw_features |= NETIF_F_TSO_ECN;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+                       dev->hw_features |= NETIF_F_UFO;
 
                if (gso)
-                       dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
+                       dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
                /* (!csum && gso) case will be fixed by register_netdev() */
        }
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1798,7 +1793,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        /* If we can receive ANY GSO packets, we must allocate large ones. */
        if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
-           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
                vi->big_packets = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1994,9 +1990,9 @@ static struct virtio_device_id id_table[] = {
 static unsigned int features[] = {
        VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
        VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
-       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
+       VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
        VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
-       VIRTIO_NET_F_GUEST_ECN,
+       VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
        VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
        VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
        VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
index 7fbd89fbe107878f5c2be4358bb13420ca6389ec..a8c755dcab1417a27f8939096a23ded65a6af65d 100644 (file)
@@ -2432,10 +2432,10 @@ static void vxlan_sock_work(struct work_struct *work)
        dev_put(vxlan->dev);
 }
 
-static int vxlan_newlink(struct net *net, struct net_device *dev,
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                         struct nlattr *tb[], struct nlattr *data[])
 {
-       struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+       struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_rdst *dst = &vxlan->default_dst;
        __u32 vni;
@@ -2445,7 +2445,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (!data[IFLA_VXLAN_ID])
                return -EINVAL;
 
-       vxlan->net = dev_net(dev);
+       vxlan->net = src_net;
 
        vni = nla_get_u32(data[IFLA_VXLAN_ID]);
        dst->remote_vni = vni;
@@ -2481,7 +2481,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
        if (data[IFLA_VXLAN_LINK] &&
            (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
                struct net_device *lowerdev
-                        = __dev_get_by_index(net, dst->remote_ifindex);
+                        = __dev_get_by_index(src_net, dst->remote_ifindex);
 
                if (!lowerdev) {
                        pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2557,7 +2557,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
                vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
-       if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+       if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
                           vxlan->dst_port)) {
                pr_info("duplicate VNI %u\n", vni);
                return -EEXIST;
index 94e234975c6114d0e46fde4c71c34625596fe6f4..a2fdd15f285a2367a5b25c3f10e69a1327348ed1 100644 (file)
@@ -25,7 +25,7 @@ if WAN
 # There is no way to detect a comtrol sv11 - force it modular for now.
 config HOSTESS_SV11
        tristate "Comtrol Hostess SV-11 support"
-       depends on ISA && m && ISA_DMA_API && INET && HDLC
+       depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
        help
          Driver for Comtrol Hostess SV-11 network card which
          operates on low speed synchronous serial links at up to
@@ -37,7 +37,7 @@ config HOSTESS_SV11
 # The COSA/SRP driver has not been tested as non-modular yet.
 config COSA
        tristate "COSA/SRP sync serial boards support"
-       depends on ISA && m && ISA_DMA_API && HDLC
+       depends on ISA && m && ISA_DMA_API && HDLC && VIRT_TO_BUS
        ---help---
          Driver for COSA and SRP synchronous serial boards.
 
@@ -87,7 +87,7 @@ config LANMEDIA
 # There is no way to detect a Sealevel board. Force it modular
 config SEALEVEL_4021
        tristate "Sealevel Systems 4021 support"
-       depends on ISA && m && ISA_DMA_API && INET && HDLC
+       depends on ISA && m && ISA_DMA_API && INET && HDLC && VIRT_TO_BUS
        help
          This is a driver for the Sealevel Systems ACB 56 serial I/O adapter.
 
index 9259a732e8a4a6d20740a1ad9170962040b8d1f8..037f74f0fcf68fee152822b94dc85872b88fab92 100644 (file)
@@ -578,6 +578,7 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
                goto err_rx_unbind;
        }
        queue->task = task;
+       get_task_struct(task);
 
        task = kthread_create(xenvif_dealloc_kthread,
                              (void *)queue, "%s-dealloc", queue->name);
@@ -634,6 +635,7 @@ void xenvif_disconnect(struct xenvif *vif)
 
                if (queue->task) {
                        kthread_stop(queue->task);
+                       put_task_struct(queue->task);
                        queue->task = NULL;
                }
 
index 908e65e9b8219783ae4b99e5d06e4701478c830d..c8ce701a7efb35280d5d4cac33a7463e89b243d2 100644 (file)
@@ -2109,8 +2109,7 @@ int xenvif_kthread_guest_rx(void *data)
                 */
                if (unlikely(vif->disabled && queue->id == 0)) {
                        xenvif_carrier_off(vif);
-                       xenvif_rx_queue_purge(queue);
-                       continue;
+                       break;
                }
 
                if (!skb_queue_empty(&queue->rx_queue))
index df781cdf13c1871e265eb933e8baadde09e2dd0a..17ca98657a2866820233d2760f339d26a2278cf7 100644 (file)
@@ -283,6 +283,9 @@ static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
        struct msi_msg msg;
        struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
 
+       if (desc->msi_attrib.is_msix)
+               return -EINVAL;
+
        irq = assign_irq(1, desc, &pos);
        if (irq < 0)
                return irq;
index e52356aa09b87adc778a29bbc3ca40c1ffdf638d..903d5078b5ede8872fc71ec662ecadf230e13950 100644 (file)
@@ -324,18 +324,52 @@ static void quirk_s3_64M(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_868,           quirk_s3_64M);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,     PCI_DEVICE_ID_S3_968,           quirk_s3_64M);
 
+static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
+                    const char *name)
+{
+       u32 region;
+       struct pci_bus_region bus_region;
+       struct resource *res = dev->resource + pos;
+
+       pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region);
+
+       if (!region)
+               return;
+
+       res->name = pci_name(dev);
+       res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
+       res->flags |=
+               (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
+       region &= ~(size - 1);
+
+       /* Convert from PCI bus to resource space */
+       bus_region.start = region;
+       bus_region.end = region + size - 1;
+       pcibios_bus_to_resource(dev->bus, res, &bus_region);
+
+       dev_info(&dev->dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
+                name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
+}
+
 /*
  * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
  * ver. 1.33  20070103) don't set the correct ISA PCI region header info.
  * BAR0 should be 8 bytes; instead, it may be set to something like 8k
  * (which conflicts w/ BAR1's memory range).
+ *
+ * CS553x's ISA PCI BARs may also be read-only (ref:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward).
  */
 static void quirk_cs5536_vsa(struct pci_dev *dev)
 {
+       static char *name = "CS5536 ISA bridge";
+
        if (pci_resource_len(dev, 0) != 8) {
-               struct resource *res = &dev->resource[0];
-               res->end = res->start + 8 - 1;
-               dev_info(&dev->dev, "CS5536 ISA bridge bug detected (incorrect header); workaround applied\n");
+               quirk_io(dev, 0,   8, name);    /* SMB */
+               quirk_io(dev, 1, 256, name);    /* GPIO */
+               quirk_io(dev, 2,  64, name);    /* MFGPT */
+               dev_info(&dev->dev, "%s bug detected (incorrect header); workaround applied\n",
+                        name);
        }
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
index dfd021e8268f40e8f8900bdb5e69f79c389accd6..f4cd0b9b2438b3548fafa4746b6c6a6cf8874bb4 100644 (file)
@@ -177,7 +177,7 @@ struct at91_pinctrl {
        struct device           *dev;
        struct pinctrl_dev      *pctl;
 
-       int                     nbanks;
+       int                     nactive_banks;
 
        uint32_t                *mux_mask;
        int                     nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
        int mux;
 
        /* check if it's a valid config */
-       if (pin->bank >= info->nbanks) {
+       if (pin->bank >= gpio_banks) {
                dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
-                       name, index, pin->bank, info->nbanks);
+                       name, index, pin->bank, gpio_banks);
                return -EINVAL;
        }
 
+       if (!gpio_chips[pin->bank]) {
+               dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+                       name, index, pin->bank);
+               return -ENXIO;
+       }
+
        if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
                dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
                        name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
 
        for_each_child_of_node(np, child) {
                if (of_device_is_compatible(child, gpio_compat)) {
-                       info->nbanks++;
+                       if (of_device_is_available(child))
+                               info->nactive_banks++;
                } else {
                        info->nfunctions++;
                        info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
        }
 
        size /= sizeof(*list);
-       if (!size || size % info->nbanks) {
-               dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+       if (!size || size % gpio_banks) {
+               dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
                return -EINVAL;
        }
-       info->nmux = size / info->nbanks;
+       info->nmux = size / gpio_banks;
 
        info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
        if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
                of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
        at91_pinctrl_child_count(info, np);
 
-       if (info->nbanks < 1) {
+       if (gpio_banks < 1) {
                dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
                return -EINVAL;
        }
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
 
        dev_dbg(&pdev->dev, "mux-mask\n");
        tmp = info->mux_mask;
-       for (i = 0; i < info->nbanks; i++) {
+       for (i = 0; i < gpio_banks; i++) {
                for (j = 0; j < info->nmux; j++, tmp++) {
                        dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
                }
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
        if (!info->groups)
                return -ENOMEM;
 
-       dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+       dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
        dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
        dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
 
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
 {
        struct at91_pinctrl *info;
        struct pinctrl_pin_desc *pdesc;
-       int ret, i, j, k;
+       int ret, i, j, k, ngpio_chips_enabled = 0;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
         * to obtain references to the struct gpio_chip * for them, and we
         * need this to proceed.
         */
-       for (i = 0; i < info->nbanks; i++) {
-               if (!gpio_chips[i]) {
-                       dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-                       devm_kfree(&pdev->dev, info);
-                       return -EPROBE_DEFER;
-               }
+       for (i = 0; i < gpio_banks; i++)
+               if (gpio_chips[i])
+                       ngpio_chips_enabled++;
+
+       if (ngpio_chips_enabled < info->nactive_banks) {
+               dev_warn(&pdev->dev,
+                        "All GPIO chips are not registered yet (%d/%d)\n",
+                        ngpio_chips_enabled, info->nactive_banks);
+               devm_kfree(&pdev->dev, info);
+               return -EPROBE_DEFER;
        }
 
        at91_pinctrl_desc.name = dev_name(&pdev->dev);
-       at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+       at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
        at91_pinctrl_desc.pins = pdesc =
                devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
 
        if (!at91_pinctrl_desc.pins)
                return -ENOMEM;
 
-       for (i = 0 , k = 0; i < info->nbanks; i++) {
+       for (i = 0, k = 0; i < gpio_banks; i++) {
                for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
                        pdesc->number = k;
                        pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
        }
 
        /* We will handle a range of GPIO pins */
-       for (i = 0; i < info->nbanks; i++)
-               pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+       for (i = 0; i < gpio_banks; i++)
+               if (gpio_chips[i])
+                       pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
 
        dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
 
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 static int at91_gpio_of_irq_setup(struct platform_device *pdev,
                                  struct at91_gpio_chip *at91_gpio)
 {
+       struct gpio_chip        *gpiochip_prev = NULL;
        struct at91_gpio_chip   *prev = NULL;
        struct irq_data         *d = irq_get_irq_data(at91_gpio->pioc_virq);
-       int ret;
+       int ret, i;
 
        at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
 
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
                return ret;
        }
 
-       /* Setup chained handler */
-       if (at91_gpio->pioc_idx)
-               prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
        /* The top level handler handles one bank of GPIOs, except
         * on some SoC it can handle up to three...
         * We only set up the handler for the first of the list.
         */
-       if (prev && prev->next == at91_gpio)
+       gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+       if (!gpiochip_prev) {
+               /* Then register the chain on the parent IRQ */
+               gpiochip_set_chained_irqchip(&at91_gpio->chip,
+                                            &gpio_irqchip,
+                                            at91_gpio->pioc_virq,
+                                            gpio_irq_handler);
                return 0;
+       }
 
-       /* Then register the chain on the parent IRQ */
-       gpiochip_set_chained_irqchip(&at91_gpio->chip,
-                                    &gpio_irqchip,
-                                    at91_gpio->pioc_virq,
-                                    gpio_irq_handler);
+       prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
 
-       return 0;
+       /* we can only have 2 banks before */
+       for (i = 0; i < 2; i++) {
+               if (prev->next) {
+                       prev = prev->next;
+               } else {
+                       prev->next = at91_gpio;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
 }
 
 /* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
        .ngpio                  = MAX_NB_GPIO_PER_BANK,
 };
 
-static void at91_gpio_probe_fixup(void)
-{
-       unsigned i;
-       struct at91_gpio_chip *at91_gpio, *last = NULL;
-
-       for (i = 0; i < gpio_banks; i++) {
-               at91_gpio = gpio_chips[i];
-
-               /*
-                * GPIO controller are grouped on some SoC:
-                * PIOC, PIOD and PIOE can share the same IRQ line
-                */
-               if (last && last->pioc_virq == at91_gpio->pioc_virq)
-                       last->next = at91_gpio;
-               last = at91_gpio;
-       }
-}
-
 static struct of_device_id at91_gpio_of_match[] = {
        { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
        { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
        gpio_chips[alias_idx] = at91_chip;
        gpio_banks = max(gpio_banks, alias_idx + 1);
 
-       at91_gpio_probe_fixup();
-
        ret = at91_gpio_of_irq_setup(pdev, at91_chip);
        if (ret)
                goto irq_setup_err;
index c3a60b57a865eae77b232ff6d94a2700df3fa5d7..a6f116aa523532b56194407438419c5bd5d238d9 100644 (file)
@@ -414,6 +414,14 @@ config REGULATOR_MAX77802
          Exynos5420/Exynos5800 SoCs to control various voltages.
          It includes support for control of voltage and ramp speed.
 
+config REGULATOR_MAX77843
+       tristate "Maxim 77843 regulator"
+       depends on MFD_MAX77843
+       help
+         This driver controls a Maxim 77843 regulator.
+         The regulator include two 'SAFEOUT' for USB(Universal Serial Bus)
+         This is suitable for Exynos5433 SoC chips.
+
 config REGULATOR_MC13XXX_CORE
        tristate
 
@@ -433,6 +441,15 @@ config REGULATOR_MC13892
          Say y here to support the regulators found on the Freescale MC13892
          PMIC.
 
+config REGULATOR_MT6397
+       tristate "MediaTek MT6397 PMIC"
+       depends on MFD_MT6397
+       help
+         Say y here to select this option to enable the power regulator of
+         MediaTek MT6397 PMIC.
+         This driver supports the control of different power rails of device
+         through regulator interface.
+
 config REGULATOR_PALMAS
        tristate "TI Palmas PMIC Regulators"
        depends on MFD_PALMAS
index 1f28ebfc6f3a09b3b555a23a2633a6402d1c141b..2c4da15e1545a71076616fbd94f3648ad453473a 100644 (file)
@@ -55,9 +55,11 @@ obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
 obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
 obj-$(CONFIG_REGULATOR_MAX77693) += max77693.o
 obj-$(CONFIG_REGULATOR_MAX77802) += max77802.o
+obj-$(CONFIG_REGULATOR_MAX77843) += max77843.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
 obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
 obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
 obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
 obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
index f23d7e1f2ee7c3602cd23bd8d30f305e5bf0d886..e4331f5e5d7d065e25fa835201359696c01bb2e9 100644 (file)
 
 #define AXP20X_FREQ_DCDC_MASK          0x0f
 
-#define AXP20X_DESC_IO(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg,   \
-                      _emask, _enable_val, _disable_val)                       \
+#define AXP20X_DESC_IO(_id, _match, _supply, _min, _max, _step, _vreg, _vmask, \
+                      _ereg, _emask, _enable_val, _disable_val)                \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = (((_max) - (_min)) / (_step) + 1),            \
                .ops            = &axp20x_ops,                                  \
        }
 
-#define AXP20X_DESC(_id, _supply, _min, _max, _step, _vreg, _vmask, _ereg,     \
-                   _emask)                                                     \
+#define AXP20X_DESC(_id, _match, _supply, _min, _max, _step, _vreg, _vmask,    \
+                   _ereg, _emask)                                              \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = (((_max) - (_min)) / (_step) + 1),            \
                .ops            = &axp20x_ops,                                  \
        }
 
-#define AXP20X_DESC_FIXED(_id, _supply, _volt)                                 \
+#define AXP20X_DESC_FIXED(_id, _match, _supply, _volt)                         \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = 1,                                            \
                .ops            = &axp20x_ops_fixed                             \
        }
 
-#define AXP20X_DESC_TABLE(_id, _supply, _table, _vreg, _vmask, _ereg, _emask)  \
+#define AXP20X_DESC_TABLE(_id, _match, _supply, _table, _vreg, _vmask, _ereg,  \
+                         _emask)                                               \
        [AXP20X_##_id] = {                                                      \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
+               .of_match       = of_match_ptr(_match),                         \
+               .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = AXP20X_##_id,                                 \
                .n_voltages     = ARRAY_SIZE(_table),                           \
@@ -127,36 +136,20 @@ static struct regulator_ops axp20x_ops = {
 };
 
 static const struct regulator_desc axp20x_regulators[] = {
-       AXP20X_DESC(DCDC2, "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT, 0x3f,
-                   AXP20X_PWR_OUT_CTRL, 0x10),
-       AXP20X_DESC(DCDC3, "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT, 0x7f,
-                   AXP20X_PWR_OUT_CTRL, 0x02),
-       AXP20X_DESC_FIXED(LDO1, "acin", 1300),
-       AXP20X_DESC(LDO2, "ldo24in", 1800, 3300, 100, AXP20X_LDO24_V_OUT, 0xf0,
-                   AXP20X_PWR_OUT_CTRL, 0x04),
-       AXP20X_DESC(LDO3, "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, 0x7f,
-                   AXP20X_PWR_OUT_CTRL, 0x40),
-       AXP20X_DESC_TABLE(LDO4, "ldo24in", axp20x_ldo4_data, AXP20X_LDO24_V_OUT, 0x0f,
-                         AXP20X_PWR_OUT_CTRL, 0x08),
-       AXP20X_DESC_IO(LDO5, "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, 0xf0,
-                      AXP20X_GPIO0_CTRL, 0x07, AXP20X_IO_ENABLED,
-                      AXP20X_IO_DISABLED),
-};
-
-#define AXP_MATCH(_name, _id) \
-       [AXP20X_##_id] = { \
-               .name           = #_name, \
-               .driver_data    = (void *) &axp20x_regulators[AXP20X_##_id], \
-       }
-
-static struct of_regulator_match axp20x_matches[] = {
-       AXP_MATCH(dcdc2, DCDC2),
-       AXP_MATCH(dcdc3, DCDC3),
-       AXP_MATCH(ldo1, LDO1),
-       AXP_MATCH(ldo2, LDO2),
-       AXP_MATCH(ldo3, LDO3),
-       AXP_MATCH(ldo4, LDO4),
-       AXP_MATCH(ldo5, LDO5),
+       AXP20X_DESC(DCDC2, "dcdc2", "vin2", 700, 2275, 25, AXP20X_DCDC2_V_OUT,
+                   0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
+       AXP20X_DESC(DCDC3, "dcdc3", "vin3", 700, 3500, 25, AXP20X_DCDC3_V_OUT,
+                   0x7f, AXP20X_PWR_OUT_CTRL, 0x02),
+       AXP20X_DESC_FIXED(LDO1, "ldo1", "acin", 1300),
+       AXP20X_DESC(LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
+                   AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
+       AXP20X_DESC(LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT,
+                   0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
+       AXP20X_DESC_TABLE(LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
+                         AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
+       AXP20X_DESC_IO(LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
+                      AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
+                      AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
 };
 
 static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
@@ -193,13 +186,6 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
        if (!regulators) {
                dev_warn(&pdev->dev, "regulators node not found\n");
        } else {
-               ret = of_regulator_match(&pdev->dev, regulators, axp20x_matches,
-                                        ARRAY_SIZE(axp20x_matches));
-               if (ret < 0) {
-                       dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
-                       return ret;
-               }
-
                dcdcfreq = 1500;
                of_property_read_u32(regulators, "x-powers,dcdc-freq", &dcdcfreq);
                ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
@@ -233,23 +219,17 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
 {
        struct regulator_dev *rdev;
        struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
-       struct regulator_config config = { };
-       struct regulator_init_data *init_data;
+       struct regulator_config config = {
+               .dev = pdev->dev.parent,
+               .regmap = axp20x->regmap,
+       };
        int ret, i;
        u32 workmode;
 
-       ret = axp20x_regulator_parse_dt(pdev);
-       if (ret)
-               return ret;
+       /* This only sets the dcdc freq. Ignore any errors */
+       axp20x_regulator_parse_dt(pdev);
 
        for (i = 0; i < AXP20X_REG_ID_MAX; i++) {
-               init_data = axp20x_matches[i].init_data;
-
-               config.dev = pdev->dev.parent;
-               config.init_data = init_data;
-               config.regmap = axp20x->regmap;
-               config.of_node = axp20x_matches[i].of_node;
-
                rdev = devm_regulator_register(&pdev->dev, &axp20x_regulators[i],
                                               &config);
                if (IS_ERR(rdev)) {
@@ -259,7 +239,8 @@ static int axp20x_regulator_probe(struct platform_device *pdev)
                        return PTR_ERR(rdev);
                }
 
-               ret = of_property_read_u32(axp20x_matches[i].of_node, "x-powers,dcdc-workmode",
+               ret = of_property_read_u32(rdev->dev.of_node,
+                                          "x-powers,dcdc-workmode",
                                           &workmode);
                if (!ret) {
                        if (axp20x_set_dcdc_workmode(rdev, i, workmode))
index 9c48fb32f6601bf4065db65cd2a099bc57f3812d..b899947d839d87b03608d1f9bf4b4208cf57aa01 100644 (file)
@@ -632,49 +632,34 @@ static ssize_t regulator_bypass_show(struct device *dev,
 static DEVICE_ATTR(bypass, 0444,
                   regulator_bypass_show, NULL);
 
-/*
- * These are the only attributes are present for all regulators.
- * Other attributes are a function of regulator functionality.
- */
-static struct attribute *regulator_dev_attrs[] = {
-       &dev_attr_name.attr,
-       &dev_attr_num_users.attr,
-       &dev_attr_type.attr,
-       NULL,
-};
-ATTRIBUTE_GROUPS(regulator_dev);
-
-static void regulator_dev_release(struct device *dev)
-{
-       struct regulator_dev *rdev = dev_get_drvdata(dev);
-       kfree(rdev);
-}
-
-static struct class regulator_class = {
-       .name = "regulator",
-       .dev_release = regulator_dev_release,
-       .dev_groups = regulator_dev_groups,
-};
-
 /* Calculate the new optimum regulator operating mode based on the new total
  * consumer load. All locks held by caller */
-static void drms_uA_update(struct regulator_dev *rdev)
+static int drms_uA_update(struct regulator_dev *rdev)
 {
        struct regulator *sibling;
        int current_uA = 0, output_uV, input_uV, err;
        unsigned int mode;
 
+       /*
+        * first check to see if we can set modes at all, otherwise just
+        * tell the consumer everything is OK.
+        */
        err = regulator_check_drms(rdev);
-       if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
-           (!rdev->desc->ops->get_voltage &&
-            !rdev->desc->ops->get_voltage_sel) ||
-           !rdev->desc->ops->set_mode)
-               return;
+       if (err < 0)
+               return 0;
+
+       if (!rdev->desc->ops->get_optimum_mode)
+               return 0;
+
+       if (!rdev->desc->ops->set_mode)
+               return -EINVAL;
 
        /* get output voltage */
        output_uV = _regulator_get_voltage(rdev);
-       if (output_uV <= 0)
-               return;
+       if (output_uV <= 0) {
+               rdev_err(rdev, "invalid output voltage found\n");
+               return -EINVAL;
+       }
 
        /* get input voltage */
        input_uV = 0;
@@ -682,8 +667,10 @@ static void drms_uA_update(struct regulator_dev *rdev)
                input_uV = regulator_get_voltage(rdev->supply);
        if (input_uV <= 0)
                input_uV = rdev->constraints->input_uV;
-       if (input_uV <= 0)
-               return;
+       if (input_uV <= 0) {
+               rdev_err(rdev, "invalid input voltage found\n");
+               return -EINVAL;
+       }
 
        /* calc total requested load */
        list_for_each_entry(sibling, &rdev->consumer_list, list)
@@ -695,8 +682,17 @@ static void drms_uA_update(struct regulator_dev *rdev)
 
        /* check the new mode is allowed */
        err = regulator_mode_constrain(rdev, &mode);
-       if (err == 0)
-               rdev->desc->ops->set_mode(rdev, mode);
+       if (err < 0) {
+               rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
+                        current_uA, input_uV, output_uV);
+               return err;
+       }
+
+       err = rdev->desc->ops->set_mode(rdev, mode);
+       if (err < 0)
+               rdev_err(rdev, "failed to set optimum mode %x\n", mode);
+
+       return err;
 }
 
 static int suspend_set_state(struct regulator_dev *rdev,
@@ -3026,75 +3022,13 @@ EXPORT_SYMBOL_GPL(regulator_get_mode);
 int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
 {
        struct regulator_dev *rdev = regulator->rdev;
-       struct regulator *consumer;
-       int ret, output_uV, input_uV = 0, total_uA_load = 0;
-       unsigned int mode;
-
-       if (rdev->supply)
-               input_uV = regulator_get_voltage(rdev->supply);
+       int ret;
 
        mutex_lock(&rdev->mutex);
-
-       /*
-        * first check to see if we can set modes at all, otherwise just
-        * tell the consumer everything is OK.
-        */
        regulator->uA_load = uA_load;
-       ret = regulator_check_drms(rdev);
-       if (ret < 0) {
-               ret = 0;
-               goto out;
-       }
-
-       if (!rdev->desc->ops->get_optimum_mode)
-               goto out;
-
-       /*
-        * we can actually do this so any errors are indicators of
-        * potential real failure.
-        */
-       ret = -EINVAL;
-
-       if (!rdev->desc->ops->set_mode)
-               goto out;
-
-       /* get output voltage */
-       output_uV = _regulator_get_voltage(rdev);
-       if (output_uV <= 0) {
-               rdev_err(rdev, "invalid output voltage found\n");
-               goto out;
-       }
-
-       /* No supply? Use constraint voltage */
-       if (input_uV <= 0)
-               input_uV = rdev->constraints->input_uV;
-       if (input_uV <= 0) {
-               rdev_err(rdev, "invalid input voltage found\n");
-               goto out;
-       }
-
-       /* calc total requested load for this regulator */
-       list_for_each_entry(consumer, &rdev->consumer_list, list)
-               total_uA_load += consumer->uA_load;
-
-       mode = rdev->desc->ops->get_optimum_mode(rdev,
-                                                input_uV, output_uV,
-                                                total_uA_load);
-       ret = regulator_mode_constrain(rdev, &mode);
-       if (ret < 0) {
-               rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
-                        total_uA_load, input_uV, output_uV);
-               goto out;
-       }
-
-       ret = rdev->desc->ops->set_mode(rdev, mode);
-       if (ret < 0) {
-               rdev_err(rdev, "failed to set optimum mode %x\n", mode);
-               goto out;
-       }
-       ret = mode;
-out:
+       ret = drms_uA_update(rdev);
        mutex_unlock(&rdev->mutex);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
@@ -3436,126 +3370,136 @@ int regulator_mode_to_status(unsigned int mode)
 }
 EXPORT_SYMBOL_GPL(regulator_mode_to_status);
 
+static struct attribute *regulator_dev_attrs[] = {
+       &dev_attr_name.attr,
+       &dev_attr_num_users.attr,
+       &dev_attr_type.attr,
+       &dev_attr_microvolts.attr,
+       &dev_attr_microamps.attr,
+       &dev_attr_opmode.attr,
+       &dev_attr_state.attr,
+       &dev_attr_status.attr,
+       &dev_attr_bypass.attr,
+       &dev_attr_requested_microamps.attr,
+       &dev_attr_min_microvolts.attr,
+       &dev_attr_max_microvolts.attr,
+       &dev_attr_min_microamps.attr,
+       &dev_attr_max_microamps.attr,
+       &dev_attr_suspend_standby_state.attr,
+       &dev_attr_suspend_mem_state.attr,
+       &dev_attr_suspend_disk_state.attr,
+       &dev_attr_suspend_standby_microvolts.attr,
+       &dev_attr_suspend_mem_microvolts.attr,
+       &dev_attr_suspend_disk_microvolts.attr,
+       &dev_attr_suspend_standby_mode.attr,
+       &dev_attr_suspend_mem_mode.attr,
+       &dev_attr_suspend_disk_mode.attr,
+       NULL
+};
+
 /*
  * To avoid cluttering sysfs (and memory) with useless state, only
  * create attributes that can be meaningfully displayed.
  */
-static int add_regulator_attributes(struct regulator_dev *rdev)
+static umode_t regulator_attr_is_visible(struct kobject *kobj,
+                                        struct attribute *attr, int idx)
 {
-       struct device *dev = &rdev->dev;
+       struct device *dev = kobj_to_dev(kobj);
+       struct regulator_dev *rdev = container_of(dev, struct regulator_dev, dev);
        const struct regulator_ops *ops = rdev->desc->ops;
-       int status = 0;
+       umode_t mode = attr->mode;
+
+       /* these three are always present */
+       if (attr == &dev_attr_name.attr ||
+           attr == &dev_attr_num_users.attr ||
+           attr == &dev_attr_type.attr)
+               return mode;
 
        /* some attributes need specific methods to be displayed */
-       if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
-           (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
-           (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
-               (rdev->desc->fixed_uV && (rdev->desc->n_voltages == 1))) {
-               status = device_create_file(dev, &dev_attr_microvolts);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_current_limit) {
-               status = device_create_file(dev, &dev_attr_microamps);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_mode) {
-               status = device_create_file(dev, &dev_attr_opmode);
-               if (status < 0)
-                       return status;
-       }
-       if (rdev->ena_pin || ops->is_enabled) {
-               status = device_create_file(dev, &dev_attr_state);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_status) {
-               status = device_create_file(dev, &dev_attr_status);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->get_bypass) {
-               status = device_create_file(dev, &dev_attr_bypass);
-               if (status < 0)
-                       return status;
+       if (attr == &dev_attr_microvolts.attr) {
+               if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
+                   (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) ||
+                   (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0) ||
+                   (rdev->desc->fixed_uV && rdev->desc->n_voltages == 1))
+                       return mode;
+               return 0;
        }
 
+       if (attr == &dev_attr_microamps.attr)
+               return ops->get_current_limit ? mode : 0;
+
+       if (attr == &dev_attr_opmode.attr)
+               return ops->get_mode ? mode : 0;
+
+       if (attr == &dev_attr_state.attr)
+               return (rdev->ena_pin || ops->is_enabled) ? mode : 0;
+
+       if (attr == &dev_attr_status.attr)
+               return ops->get_status ? mode : 0;
+
+       if (attr == &dev_attr_bypass.attr)
+               return ops->get_bypass ? mode : 0;
+
        /* some attributes are type-specific */
-       if (rdev->desc->type == REGULATOR_CURRENT) {
-               status = device_create_file(dev, &dev_attr_requested_microamps);
-               if (status < 0)
-                       return status;
-       }
+       if (attr == &dev_attr_requested_microamps.attr)
+               return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
 
        /* all the other attributes exist to support constraints;
         * don't show them if there are no constraints, or if the
         * relevant supporting methods are missing.
         */
        if (!rdev->constraints)
-               return status;
+               return 0;
 
        /* constraints need specific supporting methods */
-       if (ops->set_voltage || ops->set_voltage_sel) {
-               status = device_create_file(dev, &dev_attr_min_microvolts);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev, &dev_attr_max_microvolts);
-               if (status < 0)
-                       return status;
-       }
-       if (ops->set_current_limit) {
-               status = device_create_file(dev, &dev_attr_min_microamps);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev, &dev_attr_max_microamps);
-               if (status < 0)
-                       return status;
-       }
-
-       status = device_create_file(dev, &dev_attr_suspend_standby_state);
-       if (status < 0)
-               return status;
-       status = device_create_file(dev, &dev_attr_suspend_mem_state);
-       if (status < 0)
-               return status;
-       status = device_create_file(dev, &dev_attr_suspend_disk_state);
-       if (status < 0)
-               return status;
+       if (attr == &dev_attr_min_microvolts.attr ||
+           attr == &dev_attr_max_microvolts.attr)
+               return (ops->set_voltage || ops->set_voltage_sel) ? mode : 0;
+
+       if (attr == &dev_attr_min_microamps.attr ||
+           attr == &dev_attr_max_microamps.attr)
+               return ops->set_current_limit ? mode : 0;
 
-       if (ops->set_suspend_voltage) {
-               status = device_create_file(dev,
-                               &dev_attr_suspend_standby_microvolts);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_mem_microvolts);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_disk_microvolts);
-               if (status < 0)
-                       return status;
-       }
-
-       if (ops->set_suspend_mode) {
-               status = device_create_file(dev,
-                               &dev_attr_suspend_standby_mode);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_mem_mode);
-               if (status < 0)
-                       return status;
-               status = device_create_file(dev,
-                               &dev_attr_suspend_disk_mode);
-               if (status < 0)
-                       return status;
-       }
-
-       return status;
+       if (attr == &dev_attr_suspend_standby_state.attr ||
+           attr == &dev_attr_suspend_mem_state.attr ||
+           attr == &dev_attr_suspend_disk_state.attr)
+               return mode;
+
+       if (attr == &dev_attr_suspend_standby_microvolts.attr ||
+           attr == &dev_attr_suspend_mem_microvolts.attr ||
+           attr == &dev_attr_suspend_disk_microvolts.attr)
+               return ops->set_suspend_voltage ? mode : 0;
+
+       if (attr == &dev_attr_suspend_standby_mode.attr ||
+           attr == &dev_attr_suspend_mem_mode.attr ||
+           attr == &dev_attr_suspend_disk_mode.attr)
+               return ops->set_suspend_mode ? mode : 0;
+
+       return mode;
 }
 
+static const struct attribute_group regulator_dev_group = {
+       .attrs = regulator_dev_attrs,
+       .is_visible = regulator_attr_is_visible,
+};
+
+static const struct attribute_group *regulator_dev_groups[] = {
+       &regulator_dev_group,
+       NULL
+};
+
+static void regulator_dev_release(struct device *dev)
+{
+       struct regulator_dev *rdev = dev_get_drvdata(dev);
+       kfree(rdev);
+}
+
+static struct class regulator_class = {
+       .name = "regulator",
+       .dev_release = regulator_dev_release,
+       .dev_groups = regulator_dev_groups,
+};
+
 static void rdev_init_debugfs(struct regulator_dev *rdev)
 {
        rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
@@ -3575,7 +3519,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
- * @config: runtime configuration for regulator
+ * @cfg: runtime configuration for regulator
  *
  * Called by regulator drivers to register a regulator.
  * Returns a valid pointer to struct regulator_dev on success
@@ -3583,20 +3527,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
  */
 struct regulator_dev *
 regulator_register(const struct regulator_desc *regulator_desc,
-                  const struct regulator_config *config)
+                  const struct regulator_config *cfg)
 {
        const struct regulation_constraints *constraints = NULL;
        const struct regulator_init_data *init_data;
-       static atomic_t regulator_no = ATOMIC_INIT(0);
+       struct regulator_config *config = NULL;
+       static atomic_t regulator_no = ATOMIC_INIT(-1);
        struct regulator_dev *rdev;
        struct device *dev;
        int ret, i;
        const char *supply = NULL;
 
-       if (regulator_desc == NULL || config == NULL)
+       if (regulator_desc == NULL || cfg == NULL)
                return ERR_PTR(-EINVAL);
 
-       dev = config->dev;
+       dev = cfg->dev;
        WARN_ON(!dev);
 
        if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
@@ -3626,7 +3571,17 @@ regulator_register(const struct regulator_desc *regulator_desc,
        if (rdev == NULL)
                return ERR_PTR(-ENOMEM);
 
-       init_data = regulator_of_get_init_data(dev, regulator_desc,
+       /*
+        * Duplicate the config so the driver could override it after
+        * parsing init data.
+        */
+       config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
+       if (config == NULL) {
+               kfree(rdev);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init_data = regulator_of_get_init_data(dev, regulator_desc, config,
                                               &rdev->dev.of_node);
        if (!init_data) {
                init_data = config->init_data;
@@ -3660,8 +3615,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
        /* register with sysfs */
        rdev->dev.class = &regulator_class;
        rdev->dev.parent = dev;
-       dev_set_name(&rdev->dev, "regulator.%d",
-                    atomic_inc_return(&regulator_no) - 1);
+       dev_set_name(&rdev->dev, "regulator.%lu",
+                   (unsigned long) atomic_inc_return(&regulator_no));
        ret = device_register(&rdev->dev);
        if (ret != 0) {
                put_device(&rdev->dev);
@@ -3694,11 +3649,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
        if (ret < 0)
                goto scrub;
 
-       /* add attributes supported by this regulator */
-       ret = add_regulator_attributes(rdev);
-       if (ret < 0)
-               goto scrub;
-
        if (init_data && init_data->supply_regulator)
                supply = init_data->supply_regulator;
        else if (regulator_desc->supply_name)
@@ -3754,6 +3704,7 @@ add_dev:
        rdev_init_debugfs(rdev);
 out:
        mutex_unlock(&regulator_list_mutex);
+       kfree(config);
        return rdev;
 
 unset_supplies:
index c78d2106d6cb66aa6b5ca1b8df25c244c73a3686..01343419555ee3363d7bc0d3c10df731fbd74587 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/regmap.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/of_gpio.h>
 #include <linux/regulator/of_regulator.h>
 #include <linux/regulator/da9211.h>
 #include "da9211-regulator.h"
@@ -276,7 +277,10 @@ static struct da9211_pdata *da9211_parse_regulators_dt(
                        continue;
 
                pdata->init_data[n] = da9211_matches[i].init_data;
-
+               pdata->reg_node[n] = da9211_matches[i].of_node;
+               pdata->gpio_ren[n] =
+                       of_get_named_gpio(da9211_matches[i].of_node,
+                               "enable-gpios", 0);
                n++;
        }
 
@@ -364,7 +368,15 @@ static int da9211_regulator_init(struct da9211 *chip)
                config.dev = chip->dev;
                config.driver_data = chip;
                config.regmap = chip->regmap;
-               config.of_node = chip->dev->of_node;
+               config.of_node = chip->pdata->reg_node[i];
+
+               if (gpio_is_valid(chip->pdata->gpio_ren[i])) {
+                       config.ena_gpio = chip->pdata->gpio_ren[i];
+                       config.ena_gpio_initialized = true;
+               } else {
+                       config.ena_gpio = -EINVAL;
+                       config.ena_gpio_initialized = false;
+               }
 
                chip->rdev[i] = devm_regulator_register(chip->dev,
                        &da9211_regulators[i], &config);
index 6c43ab2d51211653eb1dd2374ff7844f6ff1c634..3c25db89a021af927f183b9f363b270b76e691c4 100644 (file)
@@ -147,7 +147,7 @@ static unsigned int fan53555_get_mode(struct regulator_dev *rdev)
                return REGULATOR_MODE_NORMAL;
 }
 
-static int slew_rates[] = {
+static const int slew_rates[] = {
        64000,
        32000,
        16000,
@@ -296,7 +296,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
        return PTR_ERR_OR_ZERO(di->rdev);
 }
 
-static struct regmap_config fan53555_regmap_config = {
+static const struct regmap_config fan53555_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
index 80ba2a35a04bb3e6c7164380fe44d3220c54dafd..c74ac873402370b5057464b22a694672cccf4d73 100644 (file)
@@ -38,11 +38,13 @@ struct regulator {
 #ifdef CONFIG_OF
 struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                                 const struct regulator_desc *desc,
+                                struct regulator_config *config,
                                 struct device_node **node);
 #else
 static inline struct regulator_init_data *
 regulator_of_get_init_data(struct device *dev,
                           const struct regulator_desc *desc,
+                          struct regulator_config *config,
                           struct device_node **node)
 {
        return NULL;
index 92fefd98da58e146755210cad01fa8639d1f5c02..6e3a15fe00f1ce37d51d47d0c66ee26c39a8b367 100644 (file)
@@ -177,8 +177,10 @@ static int isl9305_i2c_probe(struct i2c_client *i2c,
 
 #ifdef CONFIG_OF
 static const struct of_device_id isl9305_dt_ids[] = {
-       { .compatible = "isl,isl9305" },
-       { .compatible = "isl,isl9305h" },
+       { .compatible = "isl,isl9305" }, /* for backward compat., don't use */
+       { .compatible = "isil,isl9305" },
+       { .compatible = "isl,isl9305h" }, /* for backward compat., don't use */
+       { .compatible = "isil,isl9305h" },
        {},
 };
 #endif
index 021d64d856bb68d72745e83a8258d3c228412943..3de328ab41f3c69dde58349d6aa4fb0d18736617 100644 (file)
@@ -106,7 +106,6 @@ struct lp872x {
        struct device *dev;
        enum lp872x_id chipid;
        struct lp872x_platform_data *pdata;
-       struct regulator_dev **regulators;
        int num_regulators;
        enum lp872x_dvs_state dvs_pin;
        int dvs_gpio;
@@ -801,8 +800,6 @@ static int lp872x_regulator_register(struct lp872x *lp)
                        dev_err(lp->dev, "regulator register err");
                        return PTR_ERR(rdev);
                }
-
-               *(lp->regulators + i) = rdev;
        }
 
        return 0;
@@ -906,7 +903,7 @@ static struct lp872x_platform_data
 static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 {
        struct lp872x *lp;
-       int ret, size, num_regulators;
+       int ret;
        const int lp872x_num_regulators[] = {
                [LP8720] = LP8720_NUM_REGULATORS,
                [LP8725] = LP8725_NUM_REGULATORS,
@@ -918,38 +915,27 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
 
        lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
        if (!lp)
-               goto err_mem;
-
-       num_regulators = lp872x_num_regulators[id->driver_data];
-       size = sizeof(struct regulator_dev *) * num_regulators;
+               return -ENOMEM;
 
-       lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL);
-       if (!lp->regulators)
-               goto err_mem;
+       lp->num_regulators = lp872x_num_regulators[id->driver_data];
 
        lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
        if (IS_ERR(lp->regmap)) {
                ret = PTR_ERR(lp->regmap);
                dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
-               goto err_dev;
+               return ret;
        }
 
        lp->dev = &cl->dev;
        lp->pdata = dev_get_platdata(&cl->dev);
        lp->chipid = id->driver_data;
-       lp->num_regulators = num_regulators;
        i2c_set_clientdata(cl, lp);
 
        ret = lp872x_config(lp);
        if (ret)
-               goto err_dev;
+               return ret;
 
        return lp872x_regulator_register(lp);
-
-err_mem:
-       return -ENOMEM;
-err_dev:
-       return ret;
 }
 
 static const struct of_device_id lp872x_dt_ids[] = {
index bf9a44c5fdd299872a1015b50b097aa8dc0e9fc6..b3678d289619330ffca48f338b9ddf362cd76986 100644 (file)
@@ -103,6 +103,8 @@ static struct regulator_ops max14577_charger_ops = {
 static const struct regulator_desc max14577_supported_regulators[] = {
        [MAX14577_SAFEOUT] = {
                .name           = "SAFEOUT",
+               .of_match       = of_match_ptr("SAFEOUT"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_SAFEOUT,
                .ops            = &max14577_safeout_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -114,6 +116,8 @@ static const struct regulator_desc max14577_supported_regulators[] = {
        },
        [MAX14577_CHARGER] = {
                .name           = "CHARGER",
+               .of_match       = of_match_ptr("CHARGER"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_CHARGER,
                .ops            = &max14577_charger_ops,
                .type           = REGULATOR_CURRENT,
@@ -137,6 +141,8 @@ static struct regulator_ops max77836_ldo_ops = {
 static const struct regulator_desc max77836_supported_regulators[] = {
        [MAX14577_SAFEOUT] = {
                .name           = "SAFEOUT",
+               .of_match       = of_match_ptr("SAFEOUT"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_SAFEOUT,
                .ops            = &max14577_safeout_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -148,6 +154,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
        },
        [MAX14577_CHARGER] = {
                .name           = "CHARGER",
+               .of_match       = of_match_ptr("CHARGER"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX14577_CHARGER,
                .ops            = &max14577_charger_ops,
                .type           = REGULATOR_CURRENT,
@@ -157,6 +165,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
        },
        [MAX77836_LDO1] = {
                .name           = "LDO1",
+               .of_match       = of_match_ptr("LDO1"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX77836_LDO1,
                .ops            = &max77836_ldo_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -171,6 +181,8 @@ static const struct regulator_desc max77836_supported_regulators[] = {
        },
        [MAX77836_LDO2] = {
                .name           = "LDO2",
+               .of_match       = of_match_ptr("LDO2"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = MAX77836_LDO2,
                .ops            = &max77836_ldo_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -198,43 +210,6 @@ static struct of_regulator_match max77836_regulator_matches[] = {
        { .name = "LDO2", },
 };
 
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
-               enum maxim_device_type dev_type)
-{
-       int ret;
-       struct device_node *np;
-       struct of_regulator_match *regulator_matches;
-       unsigned int regulator_matches_size;
-
-       np = of_get_child_by_name(pdev->dev.parent->of_node, "regulators");
-       if (!np) {
-               dev_err(&pdev->dev, "Failed to get child OF node for regulators\n");
-               return -EINVAL;
-       }
-
-       switch (dev_type) {
-       case MAXIM_DEVICE_TYPE_MAX77836:
-               regulator_matches = max77836_regulator_matches;
-               regulator_matches_size = ARRAY_SIZE(max77836_regulator_matches);
-               break;
-       case MAXIM_DEVICE_TYPE_MAX14577:
-       default:
-               regulator_matches = max14577_regulator_matches;
-               regulator_matches_size = ARRAY_SIZE(max14577_regulator_matches);
-       }
-
-       ret = of_regulator_match(&pdev->dev, np, regulator_matches,
-                       regulator_matches_size);
-       if (ret < 0)
-               dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
-       else
-               ret = 0;
-
-       of_node_put(np);
-
-       return ret;
-}
-
 static inline struct regulator_init_data *match_init_data(int index,
                enum maxim_device_type dev_type)
 {
@@ -261,11 +236,6 @@ static inline struct device_node *match_of_node(int index,
        }
 }
 #else /* CONFIG_OF */
-static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev,
-               enum maxim_device_type dev_type)
-{
-       return 0;
-}
 static inline struct regulator_init_data *match_init_data(int index,
                enum maxim_device_type dev_type)
 {
@@ -308,16 +278,12 @@ static int max14577_regulator_probe(struct platform_device *pdev)
 {
        struct max14577 *max14577 = dev_get_drvdata(pdev->dev.parent);
        struct max14577_platform_data *pdata = dev_get_platdata(max14577->dev);
-       int i, ret;
+       int i, ret = 0;
        struct regulator_config config = {};
        const struct regulator_desc *supported_regulators;
        unsigned int supported_regulators_size;
        enum maxim_device_type dev_type = max14577->dev_type;
 
-       ret = max14577_regulator_dt_parse_pdata(pdev, dev_type);
-       if (ret)
-               return ret;
-
        switch (dev_type) {
        case MAXIM_DEVICE_TYPE_MAX77836:
                supported_regulators = max77836_supported_regulators;
@@ -329,7 +295,7 @@ static int max14577_regulator_probe(struct platform_device *pdev)
                supported_regulators_size = ARRAY_SIZE(max14577_supported_regulators);
        }
 
-       config.dev = &pdev->dev;
+       config.dev = max14577->dev;
        config.driver_data = max14577;
 
        for (i = 0; i < supported_regulators_size; i++) {
index 10d206266ac27770ec25efa16e94f5eb658a891b..15fb1416bfbde99c9724644dd25bf106695fda97 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/bug.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
+#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #define MAX77686_DVS_MINUV     600000
 #define MAX77686_DVS_UVSTEP    12500
 
+/*
+ * Value for configuring buck[89] and LDO{20,21,22} as GPIO control.
+ * It is the same as 'off' for other regulators.
+ */
+#define MAX77686_GPIO_CONTROL          0x0
 /*
  * Values used for configuring LDOs and bucks.
  * Forcing low power mode: LDO1, 3-5, 9, 13, 17-26
@@ -82,6 +88,8 @@ enum max77686_ramp_rate {
 };
 
 struct max77686_data {
+       u64 gpio_enabled:MAX77686_REGULATORS;
+
        /* Array indexed by regulator id */
        unsigned int opmode[MAX77686_REGULATORS];
 };
@@ -100,6 +108,26 @@ static unsigned int max77686_get_opmode_shift(int id)
        }
 }
 
+/*
+ * When regulator is configured for GPIO control then it
+ * replaces "normal" mode. Any change from low power mode to normal
+ * should actually change to GPIO control.
+ * Map normal mode to proper value for such regulators.
+ */
+static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
+                                            int id)
+{
+       switch (id) {
+       case MAX77686_BUCK8:
+       case MAX77686_BUCK9:
+       case MAX77686_LDO20 ... MAX77686_LDO22:
+               if (max77686->gpio_enabled & (1 << id))
+                       return MAX77686_GPIO_CONTROL;
+       }
+
+       return MAX77686_NORMAL;
+}
+
 /* Some BUCKs and LDOs supports Normal[ON/OFF] mode during suspend */
 static int max77686_set_suspend_disable(struct regulator_dev *rdev)
 {
@@ -136,7 +164,7 @@ static int max77686_set_suspend_mode(struct regulator_dev *rdev,
                val = MAX77686_LDO_LOWPOWER_PWRREQ;
                break;
        case REGULATOR_MODE_NORMAL:                     /* ON in Normal Mode */
-               val = MAX77686_NORMAL;
+               val = max77686_map_normal_mode(max77686, id);
                break;
        default:
                pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -160,7 +188,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
 {
        unsigned int val;
        struct max77686_data *max77686 = rdev_get_drvdata(rdev);
-       int ret;
+       int ret, id = rdev_get_id(rdev);
 
        switch (mode) {
        case REGULATOR_MODE_STANDBY:                    /* switch off */
@@ -170,7 +198,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
                val = MAX77686_LDO_LOWPOWER_PWRREQ;
                break;
        case REGULATOR_MODE_NORMAL:                     /* ON in Normal Mode */
-               val = MAX77686_NORMAL;
+               val = max77686_map_normal_mode(max77686, id);
                break;
        default:
                pr_warn("%s: regulator_suspend_mode : 0x%x not supported\n",
@@ -184,7 +212,7 @@ static int max77686_ldo_set_suspend_mode(struct regulator_dev *rdev,
        if (ret)
                return ret;
 
-       max77686->opmode[rdev_get_id(rdev)] = val;
+       max77686->opmode[id] = val;
        return 0;
 }
 
@@ -197,7 +225,7 @@ static int max77686_enable(struct regulator_dev *rdev)
        shift = max77686_get_opmode_shift(id);
 
        if (max77686->opmode[id] == MAX77686_OFF_PWRREQ)
-               max77686->opmode[id] = MAX77686_NORMAL;
+               max77686->opmode[id] = max77686_map_normal_mode(max77686, id);
 
        return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
                                  rdev->desc->enable_mask,
@@ -229,6 +257,36 @@ static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
                                  MAX77686_RAMP_RATE_MASK, ramp_value << 6);
 }
 
+static int max77686_of_parse_cb(struct device_node *np,
+               const struct regulator_desc *desc,
+               struct regulator_config *config)
+{
+       struct max77686_data *max77686 = config->driver_data;
+
+       switch (desc->id) {
+       case MAX77686_BUCK8:
+       case MAX77686_BUCK9:
+       case MAX77686_LDO20 ... MAX77686_LDO22:
+               config->ena_gpio = of_get_named_gpio(np,
+                                       "maxim,ena-gpios", 0);
+               config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
+               config->ena_gpio_initialized = true;
+               break;
+       default:
+               return 0;
+       }
+
+       if (gpio_is_valid(config->ena_gpio)) {
+               max77686->gpio_enabled |= (1 << desc->id);
+
+               return regmap_update_bits(config->regmap, desc->enable_reg,
+                                         desc->enable_mask,
+                                         MAX77686_GPIO_CONTROL);
+       }
+
+       return 0;
+}
+
 static struct regulator_ops max77686_ops = {
        .list_voltage           = regulator_list_voltage_linear,
        .map_voltage            = regulator_map_voltage_linear,
@@ -283,6 +341,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
        .name           = "LDO"#num,                                    \
        .of_match       = of_match_ptr("LDO"#num),                      \
        .regulators_node        = of_match_ptr("voltage-regulators"),   \
+       .of_parse_cb    = max77686_of_parse_cb,                         \
        .id             = MAX77686_LDO##num,                            \
        .ops            = &max77686_ops,                                \
        .type           = REGULATOR_VOLTAGE,                            \
@@ -355,6 +414,7 @@ static struct regulator_ops max77686_buck_dvs_ops = {
        .name           = "BUCK"#num,                                   \
        .of_match       = of_match_ptr("BUCK"#num),                     \
        .regulators_node        = of_match_ptr("voltage-regulators"),   \
+       .of_parse_cb    = max77686_of_parse_cb,                         \
        .id             = MAX77686_BUCK##num,                           \
        .ops            = &max77686_ops,                                \
        .type           = REGULATOR_VOLTAGE,                            \
diff --git a/drivers/regulator/max77843.c b/drivers/regulator/max77843.c
new file mode 100644 (file)
index 0000000..c132ef5
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * max77843.c - Regulator driver for the Maxim MAX77843
+ *
+ * Copyright (C) 2015 Samsung Electronics
+ * Author: Jaewon Kim <jaewon02.kim@samsung.com>
+ * Author: Beomho Seo <beomho.seo@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max77843-private.h>
+#include <linux/regulator/of_regulator.h>
+
+enum max77843_regulator_type {
+       MAX77843_SAFEOUT1 = 0,
+       MAX77843_SAFEOUT2,
+       MAX77843_CHARGER,
+
+       MAX77843_NUM,
+};
+
+static const unsigned int max77843_safeout_voltage_table[] = {
+       4850000,
+       4900000,
+       4950000,
+       3300000,
+};
+
+static int max77843_reg_is_enabled(struct regulator_dev *rdev)
+{
+       struct regmap *regmap = rdev->regmap;
+       int ret;
+       unsigned int reg;
+
+       ret = regmap_read(regmap, rdev->desc->enable_reg, &reg);
+       if (ret) {
+               dev_err(&rdev->dev, "Fialed to read charger register\n");
+               return ret;
+       }
+
+       return (reg & rdev->desc->enable_mask) == rdev->desc->enable_mask;
+}
+
+static int max77843_reg_get_current_limit(struct regulator_dev *rdev)
+{
+       struct regmap *regmap = rdev->regmap;
+       unsigned int chg_min_uA = rdev->constraints->min_uA;
+       unsigned int chg_max_uA = rdev->constraints->max_uA;
+       unsigned int val;
+       int ret;
+       unsigned int reg, sel;
+
+       ret = regmap_read(regmap, MAX77843_CHG_REG_CHG_CNFG_02, &reg);
+       if (ret) {
+               dev_err(&rdev->dev, "Failed to read charger register\n");
+               return ret;
+       }
+
+       sel = reg & MAX77843_CHG_FAST_CHG_CURRENT_MASK;
+
+       if (sel < 0x03)
+               sel = 0;
+       else
+               sel -= 2;
+
+       val = chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel;
+       if (val > chg_max_uA)
+               return -EINVAL;
+
+       return val;
+}
+
+static int max77843_reg_set_current_limit(struct regulator_dev *rdev,
+               int min_uA, int max_uA)
+{
+       struct regmap *regmap = rdev->regmap;
+       unsigned int chg_min_uA = rdev->constraints->min_uA;
+       int sel = 0;
+
+       while (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel < min_uA)
+               sel++;
+
+       if (chg_min_uA + MAX77843_CHG_FAST_CHG_CURRENT_STEP * sel > max_uA)
+               return -EINVAL;
+
+       sel += 2;
+
+       return regmap_write(regmap, MAX77843_CHG_REG_CHG_CNFG_02, sel);
+}
+
+static struct regulator_ops max77843_charger_ops = {
+       .is_enabled             = max77843_reg_is_enabled,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .get_current_limit      = max77843_reg_get_current_limit,
+       .set_current_limit      = max77843_reg_set_current_limit,
+};
+
+static struct regulator_ops max77843_regulator_ops = {
+       .is_enabled             = regulator_is_enabled_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .list_voltage           = regulator_list_voltage_table,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc max77843_supported_regulators[] = {
+       [MAX77843_SAFEOUT1] = {
+               .name           = "SAFEOUT1",
+               .id             = MAX77843_SAFEOUT1,
+               .ops            = &max77843_regulator_ops,
+               .of_match       = of_match_ptr("SAFEOUT1"),
+               .regulators_node = of_match_ptr("regulators"),
+               .type           = REGULATOR_VOLTAGE,
+               .owner          = THIS_MODULE,
+               .n_voltages     = ARRAY_SIZE(max77843_safeout_voltage_table),
+               .volt_table     = max77843_safeout_voltage_table,
+               .enable_reg     = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .enable_mask    = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT1,
+               .vsel_reg       = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .vsel_mask      = MAX77843_REG_SAFEOUTCTRL_SAFEOUT1_MASK,
+       },
+       [MAX77843_SAFEOUT2] = {
+               .name           = "SAFEOUT2",
+               .id             = MAX77843_SAFEOUT2,
+               .ops            = &max77843_regulator_ops,
+               .of_match       = of_match_ptr("SAFEOUT2"),
+               .regulators_node = of_match_ptr("regulators"),
+               .type           = REGULATOR_VOLTAGE,
+               .owner          = THIS_MODULE,
+               .n_voltages     = ARRAY_SIZE(max77843_safeout_voltage_table),
+               .volt_table     = max77843_safeout_voltage_table,
+               .enable_reg     = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .enable_mask    = MAX77843_REG_SAFEOUTCTRL_ENSAFEOUT2,
+               .vsel_reg       = MAX77843_SYS_REG_SAFEOUTCTRL,
+               .vsel_mask      = MAX77843_REG_SAFEOUTCTRL_SAFEOUT2_MASK,
+       },
+       [MAX77843_CHARGER] = {
+               .name           = "CHARGER",
+               .id             = MAX77843_CHARGER,
+               .ops            = &max77843_charger_ops,
+               .of_match       = of_match_ptr("CHARGER"),
+               .regulators_node = of_match_ptr("regulators"),
+               .type           = REGULATOR_CURRENT,
+               .owner          = THIS_MODULE,
+               .enable_reg     = MAX77843_CHG_REG_CHG_CNFG_00,
+               .enable_mask    = MAX77843_CHG_MASK,
+       },
+};
+
+static struct regmap *max77843_get_regmap(struct max77843 *max77843, int reg_id)
+{
+       switch (reg_id) {
+       case MAX77843_SAFEOUT1:
+       case MAX77843_SAFEOUT2:
+               return max77843->regmap;
+       case MAX77843_CHARGER:
+               return max77843->regmap_chg;
+       default:
+               return max77843->regmap;
+       }
+}
+
+static int max77843_regulator_probe(struct platform_device *pdev)
+{
+       struct max77843 *max77843 = dev_get_drvdata(pdev->dev.parent);
+       struct regulator_config config = {};
+       int i;
+
+       config.dev = max77843->dev;
+       config.driver_data = max77843;
+
+       for (i = 0; i < ARRAY_SIZE(max77843_supported_regulators); i++) {
+               struct regulator_dev *regulator;
+
+               config.regmap = max77843_get_regmap(max77843,
+                               max77843_supported_regulators[i].id);
+
+               regulator = devm_regulator_register(&pdev->dev,
+                               &max77843_supported_regulators[i], &config);
+               if (IS_ERR(regulator)) {
+                       dev_err(&pdev->dev,
+                                       "Failed to regiser regulator-%d\n", i);
+                       return PTR_ERR(regulator);
+               }
+       }
+
+       return 0;
+}
+
+static const struct platform_device_id max77843_regulator_id[] = {
+       { "max77843-regulator", },
+       { /* sentinel */ },
+};
+
+static struct platform_driver max77843_regulator_driver = {
+       .driver = {
+               .name = "max77843-regulator",
+       },
+       .probe          = max77843_regulator_probe,
+       .id_table       = max77843_regulator_id,
+};
+
+static int __init max77843_regulator_init(void)
+{
+       return platform_driver_register(&max77843_regulator_driver);
+}
+subsys_initcall(max77843_regulator_init);
+
+static void __exit max77843_regulator_exit(void)
+{
+       platform_driver_unregister(&max77843_regulator_driver);
+}
+module_exit(max77843_regulator_exit);
+
+MODULE_AUTHOR("Jaewon Kim <jaewon02.kim@samsung.com>");
+MODULE_AUTHOR("Beomho Seo <beomho.seo@samsung.com>");
+MODULE_DESCRIPTION("Maxim MAX77843 regulator driver");
+MODULE_LICENSE("GPL");
index c8bddcc8f911d11d90e32d18e0d191234bae1656..81229579ece9105846b4187d4e3ccacbfbf9a89c 100644 (file)
@@ -115,7 +115,7 @@ static unsigned int max8649_get_mode(struct regulator_dev *rdev)
        return REGULATOR_MODE_NORMAL;
 }
 
-static struct regulator_ops max8649_dcdc_ops = {
+static const struct regulator_ops max8649_dcdc_ops = {
        .set_voltage_sel = regulator_set_voltage_sel_regmap,
        .get_voltage_sel = regulator_get_voltage_sel_regmap,
        .list_voltage   = regulator_list_voltage_linear,
@@ -143,7 +143,7 @@ static struct regulator_desc dcdc_desc = {
        .enable_is_inverted = true,
 };
 
-static struct regmap_config max8649_regmap_config = {
+static const struct regmap_config max8649_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
diff --git a/drivers/regulator/mt6397-regulator.c b/drivers/regulator/mt6397-regulator.c
new file mode 100644 (file)
index 0000000..a5b2f47
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/mt6397/core.h>
+#include <linux/mfd/mt6397/registers.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/mt6397-regulator.h>
+#include <linux/regulator/of_regulator.h>
+
+/*
+ * MT6397 regulators' information
+ *
+ * @desc: standard fields of regulator description.
+ * @qi: Mask for query enable signal status of regulators
+ * @vselon_reg: Register sections for hardware control mode of bucks
+ * @vselctrl_reg: Register for controlling the buck control mode.
+ * @vselctrl_mask: Mask for query buck's voltage control mode.
+ */
+struct mt6397_regulator_info {
+       struct regulator_desc desc;
+       u32 qi;
+       u32 vselon_reg;
+       u32 vselctrl_reg;
+       u32 vselctrl_mask;
+};
+
+#define MT6397_BUCK(match, vreg, min, max, step, volt_ranges, enreg,   \
+               vosel, vosel_mask, voselon, vosel_ctrl)                 \
+[MT6397_ID_##vreg] = {                                                 \
+       .desc = {                                                       \
+               .name = #vreg,                                          \
+               .of_match = of_match_ptr(match),                        \
+               .ops = &mt6397_volt_range_ops,                          \
+               .type = REGULATOR_VOLTAGE,                              \
+               .id = MT6397_ID_##vreg,                                 \
+               .owner = THIS_MODULE,                                   \
+               .n_voltages = (max - min)/step + 1,                     \
+               .linear_ranges = volt_ranges,                           \
+               .n_linear_ranges = ARRAY_SIZE(volt_ranges),             \
+               .vsel_reg = vosel,                                      \
+               .vsel_mask = vosel_mask,                                \
+               .enable_reg = enreg,                                    \
+               .enable_mask = BIT(0),                                  \
+       },                                                              \
+       .qi = BIT(13),                                                  \
+       .vselon_reg = voselon,                                          \
+       .vselctrl_reg = vosel_ctrl,                                     \
+       .vselctrl_mask = BIT(1),                                        \
+}
+
+#define MT6397_LDO(match, vreg, ldo_volt_table, enreg, enbit, vosel,   \
+               vosel_mask)                                             \
+[MT6397_ID_##vreg] = {                                                 \
+       .desc = {                                                       \
+               .name = #vreg,                                          \
+               .of_match = of_match_ptr(match),                        \
+               .ops = &mt6397_volt_table_ops,                          \
+               .type = REGULATOR_VOLTAGE,                              \
+               .id = MT6397_ID_##vreg,                                 \
+               .owner = THIS_MODULE,                                   \
+               .n_voltages = ARRAY_SIZE(ldo_volt_table),               \
+               .volt_table = ldo_volt_table,                           \
+               .vsel_reg = vosel,                                      \
+               .vsel_mask = vosel_mask,                                \
+               .enable_reg = enreg,                                    \
+               .enable_mask = BIT(enbit),                              \
+       },                                                              \
+       .qi = BIT(15),                                                  \
+}
+
+#define MT6397_REG_FIXED(match, vreg, enreg, enbit, volt)              \
+[MT6397_ID_##vreg] = {                                                 \
+       .desc = {                                                       \
+               .name = #vreg,                                          \
+               .of_match = of_match_ptr(match),                        \
+               .ops = &mt6397_volt_fixed_ops,                          \
+               .type = REGULATOR_VOLTAGE,                              \
+               .id = MT6397_ID_##vreg,                                 \
+               .owner = THIS_MODULE,                                   \
+               .n_voltages = 1,                                        \
+               .enable_reg = enreg,                                    \
+               .enable_mask = BIT(enbit),                              \
+               .min_uV = volt,                                         \
+       },                                                              \
+       .qi = BIT(15),                                                  \
+}
+
+static const struct regulator_linear_range buck_volt_range1[] = {
+       REGULATOR_LINEAR_RANGE(700000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range2[] = {
+       REGULATOR_LINEAR_RANGE(800000, 0, 0x7f, 6250),
+};
+
+static const struct regulator_linear_range buck_volt_range3[] = {
+       REGULATOR_LINEAR_RANGE(1500000, 0, 0x1f, 20000),
+};
+
+static const u32 ldo_volt_table1[] = {
+       1500000, 1800000, 2500000, 2800000,
+};
+
+static const u32 ldo_volt_table2[] = {
+       1800000, 3300000,
+};
+
+static const u32 ldo_volt_table3[] = {
+       3000000, 3300000,
+};
+
+static const u32 ldo_volt_table4[] = {
+       1220000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5[] = {
+       1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table5_v2[] = {
+       1200000, 1000000, 1500000, 1800000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static const u32 ldo_volt_table6[] = {
+       1200000, 1300000, 1500000, 1800000, 2500000, 2800000, 3000000, 2000000,
+};
+
+static const u32 ldo_volt_table7[] = {
+       1300000, 1500000, 1800000, 2000000, 2500000, 2800000, 3000000, 3300000,
+};
+
+static int mt6397_get_status(struct regulator_dev *rdev)
+{
+       int ret;
+       u32 regval;
+       struct mt6397_regulator_info *info = rdev_get_drvdata(rdev);
+
+       ret = regmap_read(rdev->regmap, info->desc.enable_reg, &regval);
+       if (ret != 0) {
+               dev_err(&rdev->dev, "Failed to get enable reg: %d\n", ret);
+               return ret;
+       }
+
+       return (regval & info->qi) ? REGULATOR_STATUS_ON : REGULATOR_STATUS_OFF;
+}
+
+static struct regulator_ops mt6397_volt_range_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .map_voltage = regulator_map_voltage_linear_range,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6397_get_status,
+};
+
+static struct regulator_ops mt6397_volt_table_ops = {
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_iterate,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6397_get_status,
+};
+
+static struct regulator_ops mt6397_volt_fixed_ops = {
+       .list_voltage = regulator_list_voltage_linear,
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .get_status = mt6397_get_status,
+};
+
+/* The array is indexed by id(MT6397_ID_XXX) */
+static struct mt6397_regulator_info mt6397_regulators[] = {
+       MT6397_BUCK("buck_vpca15", VPCA15, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VCA15_CON7, MT6397_VCA15_CON9, 0x7f,
+               MT6397_VCA15_CON10, MT6397_VCA15_CON5),
+       MT6397_BUCK("buck_vpca7", VPCA7, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VPCA7_CON7, MT6397_VPCA7_CON9, 0x7f,
+               MT6397_VPCA7_CON10, MT6397_VPCA7_CON5),
+       MT6397_BUCK("buck_vsramca15", VSRAMCA15, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VSRMCA15_CON7, MT6397_VSRMCA15_CON9,
+               0x7f, MT6397_VSRMCA15_CON10, MT6397_VSRMCA15_CON5),
+       MT6397_BUCK("buck_vsramca7", VSRAMCA7, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VSRMCA7_CON7, MT6397_VSRMCA7_CON9,
+               0x7f, MT6397_VSRMCA7_CON10, MT6397_VSRMCA7_CON5),
+       MT6397_BUCK("buck_vcore", VCORE, 700000, 1493750, 6250,
+               buck_volt_range1, MT6397_VCORE_CON7, MT6397_VCORE_CON9, 0x7f,
+               MT6397_VCORE_CON10, MT6397_VCORE_CON5),
+       MT6397_BUCK("buck_vgpu", VGPU, 700000, 1493750, 6250, buck_volt_range1,
+               MT6397_VGPU_CON7, MT6397_VGPU_CON9, 0x7f,
+               MT6397_VGPU_CON10, MT6397_VGPU_CON5),
+       MT6397_BUCK("buck_vdrm", VDRM, 800000, 1593750, 6250, buck_volt_range2,
+               MT6397_VDRM_CON7, MT6397_VDRM_CON9, 0x7f,
+               MT6397_VDRM_CON10, MT6397_VDRM_CON5),
+       MT6397_BUCK("buck_vio18", VIO18, 1500000, 2120000, 20000,
+               buck_volt_range3, MT6397_VIO18_CON7, MT6397_VIO18_CON9, 0x1f,
+               MT6397_VIO18_CON10, MT6397_VIO18_CON5),
+       MT6397_REG_FIXED("ldo_vtcxo", VTCXO, MT6397_ANALDO_CON0, 10, 2800000),
+       MT6397_REG_FIXED("ldo_va28", VA28, MT6397_ANALDO_CON1, 14, 2800000),
+       MT6397_LDO("ldo_vcama", VCAMA, ldo_volt_table1,
+               MT6397_ANALDO_CON2, 15, MT6397_ANALDO_CON6, 0xC0),
+       MT6397_REG_FIXED("ldo_vio28", VIO28, MT6397_DIGLDO_CON0, 14, 2800000),
+       MT6397_REG_FIXED("ldo_vusb", VUSB, MT6397_DIGLDO_CON1, 14, 3300000),
+       MT6397_LDO("ldo_vmc", VMC, ldo_volt_table2,
+               MT6397_DIGLDO_CON2, 12, MT6397_DIGLDO_CON29, 0x10),
+       MT6397_LDO("ldo_vmch", VMCH, ldo_volt_table3,
+               MT6397_DIGLDO_CON3, 14, MT6397_DIGLDO_CON17, 0x80),
+       MT6397_LDO("ldo_vemc3v3", VEMC3V3, ldo_volt_table3,
+               MT6397_DIGLDO_CON4, 14, MT6397_DIGLDO_CON18, 0x10),
+       MT6397_LDO("ldo_vgp1", VGP1, ldo_volt_table4,
+               MT6397_DIGLDO_CON5, 15, MT6397_DIGLDO_CON19, 0xE0),
+       MT6397_LDO("ldo_vgp2", VGP2, ldo_volt_table5,
+               MT6397_DIGLDO_CON6, 15, MT6397_DIGLDO_CON20, 0xE0),
+       MT6397_LDO("ldo_vgp3", VGP3, ldo_volt_table5,
+               MT6397_DIGLDO_CON7, 15, MT6397_DIGLDO_CON21, 0xE0),
+       MT6397_LDO("ldo_vgp4", VGP4, ldo_volt_table5,
+               MT6397_DIGLDO_CON8, 15, MT6397_DIGLDO_CON22, 0xE0),
+       MT6397_LDO("ldo_vgp5", VGP5, ldo_volt_table6,
+               MT6397_DIGLDO_CON9, 15, MT6397_DIGLDO_CON23, 0xE0),
+       MT6397_LDO("ldo_vgp6", VGP6, ldo_volt_table5,
+               MT6397_DIGLDO_CON10, 15, MT6397_DIGLDO_CON33, 0xE0),
+       MT6397_LDO("ldo_vibr", VIBR, ldo_volt_table7,
+               MT6397_DIGLDO_CON24, 15, MT6397_DIGLDO_CON25, 0xE00),
+};
+
+static int mt6397_set_buck_vosel_reg(struct platform_device *pdev)
+{
+       struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+       int i;
+       u32 regval;
+
+       for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
+               if (mt6397_regulators[i].vselctrl_reg) {
+                       if (regmap_read(mt6397->regmap,
+                               mt6397_regulators[i].vselctrl_reg,
+                               &regval) < 0) {
+                               dev_err(&pdev->dev,
+                                       "Failed to read buck ctrl\n");
+                               return -EIO;
+                       }
+
+                       if (regval & mt6397_regulators[i].vselctrl_mask) {
+                               mt6397_regulators[i].desc.vsel_reg =
+                               mt6397_regulators[i].vselon_reg;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int mt6397_regulator_probe(struct platform_device *pdev)
+{
+       struct mt6397_chip *mt6397 = dev_get_drvdata(pdev->dev.parent);
+       struct regulator_config config = {};
+       struct regulator_dev *rdev;
+       int i;
+       u32 reg_value, version;
+
+       /* Query buck controller to select activated voltage register part */
+       if (mt6397_set_buck_vosel_reg(pdev))
+               return -EIO;
+
+       /* Read PMIC chip revision to update constraints and voltage table */
+       if (regmap_read(mt6397->regmap, MT6397_CID, &reg_value) < 0) {
+               dev_err(&pdev->dev, "Failed to read Chip ID\n");
+               return -EIO;
+       }
+       dev_info(&pdev->dev, "Chip ID = 0x%x\n", reg_value);
+
+       version = (reg_value & 0xFF);
+       switch (version) {
+       case MT6397_REGULATOR_ID91:
+               mt6397_regulators[MT6397_ID_VGP2].desc.volt_table =
+               ldo_volt_table5_v2;
+               break;
+       default:
+               break;
+       }
+
+       for (i = 0; i < MT6397_MAX_REGULATOR; i++) {
+               config.dev = &pdev->dev;
+               config.driver_data = &mt6397_regulators[i];
+               config.regmap = mt6397->regmap;
+               rdev = devm_regulator_register(&pdev->dev,
+                               &mt6397_regulators[i].desc, &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev, "failed to register %s\n",
+                               mt6397_regulators[i].desc.name);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static struct platform_driver mt6397_regulator_driver = {
+       .driver = {
+               .name = "mt6397-regulator",
+       },
+       .probe = mt6397_regulator_probe,
+};
+
+module_platform_driver(mt6397_regulator_driver);
+
+MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
+MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mt6397-regulator");
index 91eaaf01052494e6579a87890ac2875cce740018..24e812c48d93076a36039e991c51bb371fb26d6e 100644 (file)
@@ -270,6 +270,7 @@ EXPORT_SYMBOL_GPL(of_regulator_match);
 
 struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                                            const struct regulator_desc *desc,
+                                           struct regulator_config *config,
                                            struct device_node **node)
 {
        struct device_node *search, *child;
@@ -307,6 +308,16 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                        break;
                }
 
+               if (desc->of_parse_cb) {
+                       if (desc->of_parse_cb(child, desc, config)) {
+                               dev_err(dev,
+                                       "driver callback failed to parse DT for regulator %s\n",
+                                       child->name);
+                               init_data = NULL;
+                               break;
+                       }
+               }
+
                of_node_get(child);
                *node = child;
                break;
index c879dff597eeaba773468b66a71526a9ca1fb5e8..8cc8d1877c446a48737b5d337f9faf91dc6c37e1 100644 (file)
@@ -56,7 +56,7 @@
 #define PFUZE100_VGEN5VOL      0x70
 #define PFUZE100_VGEN6VOL      0x71
 
-enum chips { PFUZE100, PFUZE200 };
+enum chips { PFUZE100, PFUZE200, PFUZE3000 = 3 };
 
 struct pfuze_regulator {
        struct regulator_desc desc;
@@ -80,9 +80,18 @@ static const int pfuze100_vsnvs[] = {
        1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
 };
 
+static const int pfuze3000_sw2lo[] = {
+       1500000, 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000,
+};
+
+static const int pfuze3000_sw2hi[] = {
+       2500000, 2800000, 2850000, 3000000, 3100000, 3150000, 3200000, 3300000,
+};
+
 static const struct i2c_device_id pfuze_device_id[] = {
        {.name = "pfuze100", .driver_data = PFUZE100},
        {.name = "pfuze200", .driver_data = PFUZE200},
+       {.name = "pfuze3000", .driver_data = PFUZE3000},
        { }
 };
 MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
@@ -90,6 +99,7 @@ MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
 static const struct of_device_id pfuze_dt_ids[] = {
        { .compatible = "fsl,pfuze100", .data = (void *)PFUZE100},
        { .compatible = "fsl,pfuze200", .data = (void *)PFUZE200},
+       { .compatible = "fsl,pfuze3000", .data = (void *)PFUZE3000},
        { }
 };
 MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
@@ -219,6 +229,60 @@ static struct regulator_ops pfuze100_swb_regulator_ops = {
                .stby_mask = 0x20,      \
        }
 
+#define PFUZE3000_VCC_REG(_chip, _name, base, min, max, step)  {       \
+       .desc = {       \
+               .name = #_name, \
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .ops = &pfuze100_ldo_regulator_ops,     \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = _chip ## _ ## _name,      \
+               .owner = THIS_MODULE,   \
+               .min_uV = (min),        \
+               .uV_step = (step),      \
+               .vsel_reg = (base),     \
+               .vsel_mask = 0x3,       \
+               .enable_reg = (base),   \
+               .enable_mask = 0x10,    \
+       },      \
+       .stby_reg = (base),     \
+       .stby_mask = 0x20,      \
+}
+
+
+#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step)  {       \
+       .desc = {       \
+               .name = #_name,\
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .ops = &pfuze100_sw_regulator_ops,      \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = _chip ## _ ## _name,      \
+               .owner = THIS_MODULE,   \
+               .min_uV = (min),        \
+               .uV_step = (step),      \
+               .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+               .vsel_mask = 0x7,       \
+       },      \
+       .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+       .stby_mask = 0x7,       \
+}
+
+#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step)  {       \
+       .desc = {       \
+               .name = #_name,\
+               .n_voltages = ((max) - (min)) / (step) + 1,     \
+               .ops = &pfuze100_sw_regulator_ops,      \
+               .type = REGULATOR_VOLTAGE,      \
+               .id = _chip ## _ ## _name,      \
+               .owner = THIS_MODULE,   \
+               .min_uV = (min),        \
+               .uV_step = (step),      \
+               .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+               .vsel_mask = 0xf,       \
+       },      \
+       .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+       .stby_mask = 0xf,       \
+}
+
 /* PFUZE100 */
 static struct pfuze_regulator pfuze100_regulators[] = {
        PFUZE100_SW_REG(PFUZE100, SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
@@ -254,6 +318,22 @@ static struct pfuze_regulator pfuze200_regulators[] = {
        PFUZE100_VGEN_REG(PFUZE200, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
 };
 
+static struct pfuze_regulator pfuze3000_regulators[] = {
+       PFUZE100_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 700000, 1475000, 25000),
+       PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
+       PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
+       PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
+       PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+       PFUZE100_FIXED_REG(PFUZE3000, VREFDDR, PFUZE100_VREFDDRCON, 750000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+       PFUZE3000_VCC_REG(PFUZE3000, VCCSD, PFUZE100_VGEN3VOL, 2850000, 3300000, 150000),
+       PFUZE3000_VCC_REG(PFUZE3000, V33, PFUZE100_VGEN4VOL, 2850000, 3300000, 150000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO3, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+       PFUZE100_VGEN_REG(PFUZE3000, VLDO4, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
 static struct pfuze_regulator *pfuze_regulators;
 
 #ifdef CONFIG_OF
@@ -294,6 +374,24 @@ static struct of_regulator_match pfuze200_matches[] = {
        { .name = "vgen6",      },
 };
 
+/* PFUZE3000 */
+static struct of_regulator_match pfuze3000_matches[] = {
+
+       { .name = "sw1a",       },
+       { .name = "sw1b",       },
+       { .name = "sw2",        },
+       { .name = "sw3",        },
+       { .name = "swbst",      },
+       { .name = "vsnvs",      },
+       { .name = "vrefddr",    },
+       { .name = "vldo1",      },
+       { .name = "vldo2",      },
+       { .name = "vccsd",      },
+       { .name = "v33",        },
+       { .name = "vldo3",      },
+       { .name = "vldo4",      },
+};
+
 static struct of_regulator_match *pfuze_matches;
 
 static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
@@ -313,6 +411,11 @@ static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
        }
 
        switch (chip->chip_id) {
+       case PFUZE3000:
+               pfuze_matches = pfuze3000_matches;
+               ret = of_regulator_match(dev, parent, pfuze3000_matches,
+                                        ARRAY_SIZE(pfuze3000_matches));
+               break;
        case PFUZE200:
                pfuze_matches = pfuze200_matches;
                ret = of_regulator_match(dev, parent, pfuze200_matches,
@@ -378,7 +481,8 @@ static int pfuze_identify(struct pfuze_chip *pfuze_chip)
                 * as ID=8 in PFUZE100
                 */
                dev_info(pfuze_chip->dev, "Assuming misprogrammed ID=0x8");
-       } else if ((value & 0x0f) != pfuze_chip->chip_id) {
+       } else if ((value & 0x0f) != pfuze_chip->chip_id &&
+                  (value & 0xf0) >> 4 != pfuze_chip->chip_id) {
                /* device id NOT match with your setting */
                dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
                return -ENODEV;
@@ -417,7 +521,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
        int i, ret;
        const struct of_device_id *match;
        u32 regulator_num;
-       u32 sw_check_start, sw_check_end;
+       u32 sw_check_start, sw_check_end, sw_hi = 0x40;
 
        pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
                        GFP_KERNEL);
@@ -458,13 +562,19 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
 
        /* use the right regulators after identify the right device */
        switch (pfuze_chip->chip_id) {
+       case PFUZE3000:
+               pfuze_regulators = pfuze3000_regulators;
+               regulator_num = ARRAY_SIZE(pfuze3000_regulators);
+               sw_check_start = PFUZE3000_SW2;
+               sw_check_end = PFUZE3000_SW2;
+               sw_hi = 1 << 3;
+               break;
        case PFUZE200:
                pfuze_regulators = pfuze200_regulators;
                regulator_num = ARRAY_SIZE(pfuze200_regulators);
                sw_check_start = PFUZE200_SW2;
                sw_check_end = PFUZE200_SW3B;
                break;
-
        case PFUZE100:
        default:
                pfuze_regulators = pfuze100_regulators;
@@ -474,7 +584,8 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                break;
        }
        dev_info(&client->dev, "pfuze%s found.\n",
-               (pfuze_chip->chip_id == PFUZE100) ? "100" : "200");
+               (pfuze_chip->chip_id == PFUZE100) ? "100" :
+               ((pfuze_chip->chip_id == PFUZE200) ? "200" : "3000"));
 
        memcpy(pfuze_chip->regulator_descs, pfuze_regulators,
                sizeof(pfuze_chip->regulator_descs));
@@ -498,10 +609,15 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
                /* SW2~SW4 high bit check and modify the voltage value table */
                if (i >= sw_check_start && i <= sw_check_end) {
                        regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
-                       if (val & 0x40) {
-                               desc->min_uV = 800000;
-                               desc->uV_step = 50000;
-                               desc->n_voltages = 51;
+                       if (val & sw_hi) {
+                               if (pfuze_chip->chip_id == PFUZE3000) {
+                                       desc->volt_table = pfuze3000_sw2hi;
+                                       desc->n_voltages = ARRAY_SIZE(pfuze3000_sw2hi);
+                               } else {
+                                       desc->min_uV = 800000;
+                                       desc->uV_step = 50000;
+                                       desc->n_voltages = 51;
+                               }
                        }
                }
 
index 8364ff331a81838c6e84fe822b17d434bfb74aa7..e8647f7cf25e27378af00ec7799195c2a29e995a 100644 (file)
@@ -227,9 +227,11 @@ static int rpm_reg_set_mV_sel(struct regulator_dev *rdev,
                return uV;
 
        mutex_lock(&vreg->lock);
-       vreg->uV = uV;
        if (vreg->is_enabled)
-               ret = rpm_reg_write(vreg, req, vreg->uV / 1000);
+               ret = rpm_reg_write(vreg, req, uV / 1000);
+
+       if (!ret)
+               vreg->uV = uV;
        mutex_unlock(&vreg->lock);
 
        return ret;
@@ -252,9 +254,11 @@ static int rpm_reg_set_uV_sel(struct regulator_dev *rdev,
                return uV;
 
        mutex_lock(&vreg->lock);
-       vreg->uV = uV;
        if (vreg->is_enabled)
-               ret = rpm_reg_write(vreg, req, vreg->uV);
+               ret = rpm_reg_write(vreg, req, uV);
+
+       if (!ret)
+               vreg->uV = uV;
        mutex_unlock(&vreg->lock);
 
        return ret;
@@ -674,6 +678,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
        vreg->desc.owner = THIS_MODULE;
        vreg->desc.type = REGULATOR_VOLTAGE;
        vreg->desc.name = pdev->dev.of_node->name;
+       vreg->desc.supply_name = "vin";
 
        vreg->rpm = dev_get_drvdata(pdev->dev.parent);
        if (!vreg->rpm) {
@@ -768,7 +773,7 @@ static int rpm_reg_probe(struct platform_device *pdev)
                        break;
                }
 
-               if (force_mode < 0) {
+               if (force_mode == -1) {
                        dev_err(&pdev->dev, "invalid force mode\n");
                        return -EINVAL;
                }
index c94a3e0f3b91b4af039740a32a883c55d836a51f..1f93b752a81cdc36a824459ebf66354aea275af8 100644 (file)
@@ -97,7 +97,7 @@ static int rk808_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
                                  RK808_RAMP_RATE_MASK, ramp_value);
 }
 
-int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
+static int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
 {
        unsigned int reg;
        int sel = regulator_map_voltage_linear_range(rdev, uv, uv);
@@ -112,7 +112,7 @@ int rk808_set_suspend_voltage(struct regulator_dev *rdev, int uv)
                                  sel);
 }
 
-int rk808_set_suspend_enable(struct regulator_dev *rdev)
+static int rk808_set_suspend_enable(struct regulator_dev *rdev)
 {
        unsigned int reg;
 
@@ -123,7 +123,7 @@ int rk808_set_suspend_enable(struct regulator_dev *rdev)
                                  0);
 }
 
-int rk808_set_suspend_disable(struct regulator_dev *rdev)
+static int rk808_set_suspend_disable(struct regulator_dev *rdev)
 {
        unsigned int reg;
 
index 870cc49438dbe55d237dde1602010bb20105e104..96d2c18e051a071de50a490181e88151a310bed8 100644 (file)
@@ -36,6 +36,8 @@ static struct regulator_ops rt5033_buck_ops = {
 static const struct regulator_desc rt5033_supported_regulators[] = {
        [RT5033_BUCK] = {
                .name           = "BUCK",
+               .of_match       = of_match_ptr("BUCK"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = RT5033_BUCK,
                .ops            = &rt5033_buck_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -50,6 +52,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
        },
        [RT5033_LDO] = {
                .name           = "LDO",
+               .of_match       = of_match_ptr("LDO"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = RT5033_LDO,
                .ops            = &rt5033_buck_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -64,6 +68,8 @@ static const struct regulator_desc rt5033_supported_regulators[] = {
        },
        [RT5033_SAFE_LDO] = {
                .name           = "SAFE_LDO",
+               .of_match       = of_match_ptr("SAFE_LDO"),
+               .regulators_node = of_match_ptr("regulators"),
                .id             = RT5033_SAFE_LDO,
                .ops            = &rt5033_safe_ldo_ops,
                .type           = REGULATOR_VOLTAGE,
@@ -81,7 +87,7 @@ static int rt5033_regulator_probe(struct platform_device *pdev)
        int ret, i;
        struct regulator_config config = {};
 
-       config.dev = &pdev->dev;
+       config.dev = rt5033->dev;
        config.driver_data = rt5033;
 
        for (i = 0; i < ARRAY_SIZE(rt5033_supported_regulators); i++) {
index 7380af8bd50d4e8d699f7564c6ab4bb3e4edc84b..b941e564b3f3405a647b0d13794eed5d3dffa3fc 100644 (file)
@@ -173,7 +173,7 @@ static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
 }
 
 /* Operations permitted on VDCDCx */
-static struct regulator_ops tps65023_dcdc_ops = {
+static const struct regulator_ops tps65023_dcdc_ops = {
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -184,7 +184,7 @@ static struct regulator_ops tps65023_dcdc_ops = {
 };
 
 /* Operations permitted on LDOx */
-static struct regulator_ops tps65023_ldo_ops = {
+static const struct regulator_ops tps65023_ldo_ops = {
        .is_enabled = regulator_is_enabled_regmap,
        .enable = regulator_enable_regmap,
        .disable = regulator_disable_regmap,
@@ -194,7 +194,7 @@ static struct regulator_ops tps65023_ldo_ops = {
        .map_voltage = regulator_map_voltage_ascend,
 };
 
-static struct regmap_config tps65023_regmap_config = {
+static const struct regmap_config tps65023_regmap_config = {
        .reg_bits = 8,
        .val_bits = 8,
 };
index 4aa60d74004e41ffdd7be050728f9cf63a7fa48c..6c719f23520aa776743dc15411971708406119ec 100644 (file)
@@ -26,7 +26,7 @@ static int __init rtc_hctosys(void)
 {
        int err = -ENODEV;
        struct rtc_time tm;
-       struct timespec tv = {
+       struct timespec64 tv64 = {
                .tv_nsec = NSEC_PER_SEC >> 1,
        };
        struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
@@ -45,25 +45,17 @@ static int __init rtc_hctosys(void)
 
        }
 
-       err = rtc_valid_tm(&tm);
-       if (err) {
-               dev_err(rtc->dev.parent,
-                       "hctosys: invalid date/time\n");
-               goto err_invalid;
-       }
-
-       rtc_tm_to_time(&tm, &tv.tv_sec);
+       tv64.tv_sec = rtc_tm_to_time64(&tm);
 
-       err = do_settimeofday(&tv);
+       err = do_settimeofday64(&tv64);
 
        dev_info(rtc->dev.parent,
                "setting system clock to "
-               "%d-%02d-%02d %02d:%02d:%02d UTC (%u)\n",
+               "%d-%02d-%02d %02d:%02d:%02d UTC (%lld)\n",
                tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
                tm.tm_hour, tm.tm_min, tm.tm_sec,
-               (unsigned int) tv.tv_sec);
+               (long long) tv64.tv_sec);
 
-err_invalid:
 err_read:
        rtc_class_close(rtc);
 
index 45bfc28ee3aa8e09602427ae915c11bb43ac0c59..37215cf983e92926653d1f3206aa8c4c8842a55a 100644 (file)
@@ -73,10 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
        else if (rtc->ops->set_time)
                err = rtc->ops->set_time(rtc->dev.parent, tm);
        else if (rtc->ops->set_mmss) {
-               unsigned long secs;
-               err = rtc_tm_to_time(tm, &secs);
-               if (err == 0)
-                       err = rtc->ops->set_mmss(rtc->dev.parent, secs);
+               time64_t secs64 = rtc_tm_to_time64(tm);
+               err = rtc->ops->set_mmss(rtc->dev.parent, secs64);
        } else
                err = -EINVAL;
 
@@ -105,7 +103,7 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
 
                err = rtc->ops->read_time(rtc->dev.parent, &old);
                if (err == 0) {
-                       rtc_time_to_tm(secs, &new);
+                       rtc_time64_to_tm(secs, &new);
 
                        /*
                         * avoid writing when we're going to change the day of
@@ -157,7 +155,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        int err;
        struct rtc_time before, now;
        int first_time = 1;
-       unsigned long t_now, t_alm;
+       time64_t t_now, t_alm;
        enum { none, day, month, year } missing = none;
        unsigned days;
 
@@ -258,8 +256,8 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        }
 
        /* with luck, no rollover is needed */
-       rtc_tm_to_time(&now, &t_now);
-       rtc_tm_to_time(&alarm->time, &t_alm);
+       t_now = rtc_tm_to_time64(&now);
+       t_alm = rtc_tm_to_time64(&alarm->time);
        if (t_now < t_alm)
                goto done;
 
@@ -273,7 +271,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        case day:
                dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
                t_alm += 24 * 60 * 60;
-               rtc_time_to_tm(t_alm, &alarm->time);
+               rtc_time64_to_tm(t_alm, &alarm->time);
                break;
 
        /* Month rollover ... if it's the 31th, an alarm on the 3rd will
@@ -346,19 +344,19 @@ EXPORT_SYMBOL_GPL(rtc_read_alarm);
 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        struct rtc_time tm;
-       long now, scheduled;
+       time64_t now, scheduled;
        int err;
 
        err = rtc_valid_tm(&alarm->time);
        if (err)
                return err;
-       rtc_tm_to_time(&alarm->time, &scheduled);
+       scheduled = rtc_tm_to_time64(&alarm->time);
 
        /* Make sure we're not setting alarms in the past */
        err = __rtc_read_time(rtc, &tm);
        if (err)
                return err;
-       rtc_tm_to_time(&tm, &now);
+       now = rtc_tm_to_time64(&tm);
        if (scheduled <= now)
                return -ETIME;
        /*
index d049393692517bfea7229f6f0c673de3d5f57847..799c34bcb26f3b54cfc45100775990918727bb53 100644 (file)
@@ -304,12 +304,12 @@ static long rtc_dev_ioctl(struct file *file,
                 * Not supported here.
                 */
                {
-                       unsigned long now, then;
+                       time64_t now, then;
 
                        err = rtc_read_time(rtc, &tm);
                        if (err < 0)
                                return err;
-                       rtc_tm_to_time(&tm, &now);
+                       now = rtc_tm_to_time64(&tm);
 
                        alarm.time.tm_mday = tm.tm_mday;
                        alarm.time.tm_mon = tm.tm_mon;
@@ -317,11 +317,11 @@ static long rtc_dev_ioctl(struct file *file,
                        err  = rtc_valid_tm(&alarm.time);
                        if (err < 0)
                                return err;
-                       rtc_tm_to_time(&alarm.time, &then);
+                       then = rtc_tm_to_time64(&alarm.time);
 
                        /* alarm may need to wrap into tomorrow */
                        if (then < now) {
-                               rtc_time_to_tm(now + 24 * 60 * 60, &tm);
+                               rtc_time64_to_tm(now + 24 * 60 * 60, &tm);
                                alarm.time.tm_mday = tm.tm_mday;
                                alarm.time.tm_mon = tm.tm_mon;
                                alarm.time.tm_year = tm.tm_year;
index bf3e242ccc5cdaf1231c091eb48310c1e61cf88e..eb71872d0361c0dbedd8f994aae72ce6bd0d2c7e 100644 (file)
  *
  * If temporary failure is indicated the caller should try again 'soon'
  */
-int rtc_set_ntp_time(struct timespec now)
+int rtc_set_ntp_time(struct timespec64 now)
 {
        struct rtc_device *rtc;
        struct rtc_time tm;
        int err = -ENODEV;
 
        if (now.tv_nsec < (NSEC_PER_SEC >> 1))
-               rtc_time_to_tm(now.tv_sec, &tm);
+               rtc_time64_to_tm(now.tv_sec, &tm);
        else
-               rtc_time_to_tm(now.tv_sec + 1, &tm);
+               rtc_time64_to_tm(now.tv_sec + 1, &tm);
 
        rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
        if (rtc) {
index 1dba62c5cf6a0b364df79805f5e8ee857ba36b46..1efebc9eedfb384312de87e0bd4548599524b0dc 100644 (file)
@@ -136,11 +136,12 @@ static void __detach_handler (struct kref *kref)
        struct scsi_device_handler *scsi_dh = scsi_dh_data->scsi_dh;
        struct scsi_device *sdev = scsi_dh_data->sdev;
 
+       scsi_dh->detach(sdev);
+
        spin_lock_irq(sdev->request_queue->queue_lock);
        sdev->scsi_dh_data = NULL;
        spin_unlock_irq(sdev->request_queue->queue_lock);
 
-       scsi_dh->detach(sdev);
        sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", scsi_dh->name);
        module_put(scsi_dh->module);
 }
index 399516925d802fea379938b9958778daa7ee37f2..05ea0d49a3a3ddf2f039670db88c4d9f126662f4 100644 (file)
@@ -2800,9 +2800,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
         */
        sd_set_flush_flag(sdkp);
 
-       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
-                               sdkp->max_xfer_blocks);
+       max_xfer = sdkp->max_xfer_blocks;
        max_xfer <<= ilog2(sdp->sector_size) - 9;
+
+       max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+                               max_xfer);
        blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
        set_capacity(disk, sdkp->capacity);
        sd_config_write_same(sdkp);
index 99829985c1a194ebe6edc0e73912077bade693d3..95ccedabba4f9dca37dbd4909e3bc578ec619cd3 100644 (file)
@@ -185,6 +185,16 @@ config SPI_DAVINCI
        help
          SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
 
+config SPI_DLN2
+       tristate "Diolan DLN-2 USB SPI adapter"
+       depends on MFD_DLN2
+       help
+         If you say yes to this option, support will be included for Diolan
+         DLN2, a USB to SPI interface.
+
+         This driver can also be built as a module.  If so, the module
+         will be called spi-dln2.
+
 config SPI_EFM32
        tristate "EFM32 SPI controller"
        depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
@@ -279,7 +289,7 @@ config SPI_FSL_CPM
        depends on FSL_SOC
 
 config SPI_FSL_SPI
-       bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
+       tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
        depends on OF
        select SPI_FSL_LIB
        select SPI_FSL_CPM if FSL_SOC
@@ -292,7 +302,6 @@ config SPI_FSL_SPI
 
 config SPI_FSL_DSPI
        tristate "Freescale DSPI controller"
-       select SPI_BITBANG
        select REGMAP_MMIO
        depends on SOC_VF610 || COMPILE_TEST
        help
@@ -300,7 +309,7 @@ config SPI_FSL_DSPI
          mode. VF610 platform uses the controller.
 
 config SPI_FSL_ESPI
-       bool "Freescale eSPI controller"
+       tristate "Freescale eSPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
@@ -460,7 +469,6 @@ config SPI_S3C24XX_FIQ
 config SPI_S3C64XX
        tristate "Samsung S3C64XX series type SPI"
        depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
-       select S3C64XX_PL080 if ARCH_S3C64XX
        help
          SPI driver for Samsung S3C64XX and newer SoCs.
 
@@ -503,6 +511,13 @@ config SPI_SIRF
        help
          SPI driver for CSR SiRFprimaII SoCs
 
+config SPI_ST_SSC4
+       tristate "STMicroelectronics SPI SSC-based driver"
+       depends on ARCH_STI
+       help
+         STMicroelectronics SoCs support for SPI. If you say yes to
+         this option, support will be included for the SSC driven SPI.
+
 config SPI_SUN4I
        tristate "Allwinner A10 SoCs SPI controller"
        depends on ARCH_SUNXI || COMPILE_TEST
@@ -595,7 +610,6 @@ config SPI_XTENSA_XTFPGA
          16 bit words in SPI mode 0, automatically asserting CS on transfer
          start and deasserting on end.
 
-
 config SPI_NUC900
        tristate "Nuvoton NUC900 series SPI"
        depends on ARCH_W90X900
index 6b9d2ac629cce6ca7e886df22c2fb5b02874373c..d8cbf654976b5296aaa7561eeef6ba480631eb4f 100644 (file)
@@ -27,6 +27,7 @@ obj-$(CONFIG_SPI_CADENCE)             += spi-cadence.o
 obj-$(CONFIG_SPI_CLPS711X)             += spi-clps711x.o
 obj-$(CONFIG_SPI_COLDFIRE_QSPI)                += spi-coldfire-qspi.o
 obj-$(CONFIG_SPI_DAVINCI)              += spi-davinci.o
+obj-$(CONFIG_SPI_DLN2)                 += spi-dln2.o
 obj-$(CONFIG_SPI_DESIGNWARE)           += spi-dw.o
 obj-$(CONFIG_SPI_DW_MMIO)              += spi-dw-mmio.o
 obj-$(CONFIG_SPI_DW_PCI)               += spi-dw-midpci.o
@@ -76,6 +77,7 @@ obj-$(CONFIG_SPI_SH_HSPI)             += spi-sh-hspi.o
 obj-$(CONFIG_SPI_SH_MSIOF)             += spi-sh-msiof.o
 obj-$(CONFIG_SPI_SH_SCI)               += spi-sh-sci.o
 obj-$(CONFIG_SPI_SIRF)         += spi-sirf.o
+obj-$(CONFIG_SPI_ST_SSC4)              += spi-st-ssc4.o
 obj-$(CONFIG_SPI_SUN4I)                        += spi-sun4i.o
 obj-$(CONFIG_SPI_SUN6I)                        += spi-sun6i.o
 obj-$(CONFIG_SPI_TEGRA114)             += spi-tegra114.o
index 23d8f5f56579a83d21040ff8625e69c0c554da3b..9af7841f2e8c6010060c17a038bc8c86c792a729 100644 (file)
@@ -1046,6 +1046,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
        struct atmel_spi_device *asd;
        int                     timeout;
        int                     ret;
+       unsigned long           dma_timeout;
 
        as = spi_master_get_devdata(master);
 
@@ -1103,15 +1104,12 @@ static int atmel_spi_one_transfer(struct spi_master *master,
 
                /* interrupts are disabled, so free the lock for schedule */
                atmel_spi_unlock(as);
-               ret = wait_for_completion_timeout(&as->xfer_completion,
-                                                       SPI_DMA_TIMEOUT);
+               dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
+                                                         SPI_DMA_TIMEOUT);
                atmel_spi_lock(as);
-               if (WARN_ON(ret == 0)) {
-                       dev_err(&spi->dev,
-                               "spi trasfer timeout, err %d\n", ret);
+               if (WARN_ON(dma_timeout == 0)) {
+                       dev_err(&spi->dev, "spi transfer timeout\n");
                        as->done_status = -EIO;
-               } else {
-                       ret = 0;
                }
 
                if (as->done_status)
index 326f47973684aa454c5a69d201150a88bf72763f..f45e085c01a616436f7a6b115e69bb8144012463 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/init.h>
index 98aab457b24d987330280eb70bbf0dbb49f61a81..419a782ab6d50541809f0c8b10b3d8eaeeb1ae89 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 #include <linux/clk.h>
index c20530982e2610c136f57c8b4ad4d343707dc7af..e73e2b052c9ccf0ca3ff2ef46950600aa5ff9f3f 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the
- * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
  */
 
 #include <linux/kernel.h>
index dc7d2c2d643e80e3b6fa65564cc76b435d76ce50..5ef6638d5e8a2698a6c8e85fad41c46d1f03fd06 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/spinlock.h>
index ee4f91ccd8fd53bac3c77a04de9e898d690085bc..9a95862986c8381a82ea394e0a2e90dc5457d5f6 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 41b5dc4445f622d9c29110318698ef0b89128850..688956ff5095c26a8c1101dc4740b310627ed294 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
- *
 */
 
 #include <linux/kernel.h>
index b3707badb1e58c559350919bac55b9451325a6e6..5e991065f5b0166437aceb3f313ed7bd65f9bcbd 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/interrupt.h>
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644 (file)
index 0000000..3b7d91d
--- /dev/null
@@ -0,0 +1,881 @@
+/*
+ * Driver for the Diolan DLN-2 USB-SPI adapter
+ *
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dln2.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <asm/unaligned.h>
+
+#define DLN2_SPI_MODULE_ID             0x02
+#define DLN2_SPI_CMD(cmd)              DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
+
+/* SPI commands */
+#define DLN2_SPI_GET_PORT_COUNT                        DLN2_SPI_CMD(0x00)
+#define DLN2_SPI_ENABLE                                DLN2_SPI_CMD(0x11)
+#define DLN2_SPI_DISABLE                       DLN2_SPI_CMD(0x12)
+#define DLN2_SPI_IS_ENABLED                    DLN2_SPI_CMD(0x13)
+#define DLN2_SPI_SET_MODE                      DLN2_SPI_CMD(0x14)
+#define DLN2_SPI_GET_MODE                      DLN2_SPI_CMD(0x15)
+#define DLN2_SPI_SET_FRAME_SIZE                        DLN2_SPI_CMD(0x16)
+#define DLN2_SPI_GET_FRAME_SIZE                        DLN2_SPI_CMD(0x17)
+#define DLN2_SPI_SET_FREQUENCY                 DLN2_SPI_CMD(0x18)
+#define DLN2_SPI_GET_FREQUENCY                 DLN2_SPI_CMD(0x19)
+#define DLN2_SPI_READ_WRITE                    DLN2_SPI_CMD(0x1A)
+#define DLN2_SPI_READ                          DLN2_SPI_CMD(0x1B)
+#define DLN2_SPI_WRITE                         DLN2_SPI_CMD(0x1C)
+#define DLN2_SPI_SET_DELAY_BETWEEN_SS          DLN2_SPI_CMD(0x20)
+#define DLN2_SPI_GET_DELAY_BETWEEN_SS          DLN2_SPI_CMD(0x21)
+#define DLN2_SPI_SET_DELAY_AFTER_SS            DLN2_SPI_CMD(0x22)
+#define DLN2_SPI_GET_DELAY_AFTER_SS            DLN2_SPI_CMD(0x23)
+#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES      DLN2_SPI_CMD(0x24)
+#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES      DLN2_SPI_CMD(0x25)
+#define DLN2_SPI_SET_SS                                DLN2_SPI_CMD(0x26)
+#define DLN2_SPI_GET_SS                                DLN2_SPI_CMD(0x27)
+#define DLN2_SPI_RELEASE_SS                    DLN2_SPI_CMD(0x28)
+#define DLN2_SPI_SS_VARIABLE_ENABLE            DLN2_SPI_CMD(0x2B)
+#define DLN2_SPI_SS_VARIABLE_DISABLE           DLN2_SPI_CMD(0x2C)
+#define DLN2_SPI_SS_VARIABLE_IS_ENABLED                DLN2_SPI_CMD(0x2D)
+#define DLN2_SPI_SS_AAT_ENABLE                 DLN2_SPI_CMD(0x2E)
+#define DLN2_SPI_SS_AAT_DISABLE                        DLN2_SPI_CMD(0x2F)
+#define DLN2_SPI_SS_AAT_IS_ENABLED             DLN2_SPI_CMD(0x30)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE      DLN2_SPI_CMD(0x31)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE     DLN2_SPI_CMD(0x32)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED  DLN2_SPI_CMD(0x33)
+#define DLN2_SPI_SET_CPHA                      DLN2_SPI_CMD(0x34)
+#define DLN2_SPI_GET_CPHA                      DLN2_SPI_CMD(0x35)
+#define DLN2_SPI_SET_CPOL                      DLN2_SPI_CMD(0x36)
+#define DLN2_SPI_GET_CPOL                      DLN2_SPI_CMD(0x37)
+#define DLN2_SPI_SS_MULTI_ENABLE               DLN2_SPI_CMD(0x38)
+#define DLN2_SPI_SS_MULTI_DISABLE              DLN2_SPI_CMD(0x39)
+#define DLN2_SPI_SS_MULTI_IS_ENABLED           DLN2_SPI_CMD(0x3A)
+#define DLN2_SPI_GET_SUPPORTED_MODES           DLN2_SPI_CMD(0x40)
+#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES     DLN2_SPI_CMD(0x41)
+#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES     DLN2_SPI_CMD(0x42)
+#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES     DLN2_SPI_CMD(0x43)
+#define DLN2_SPI_GET_SS_COUNT                  DLN2_SPI_CMD(0x44)
+#define DLN2_SPI_GET_MIN_FREQUENCY             DLN2_SPI_CMD(0x45)
+#define DLN2_SPI_GET_MAX_FREQUENCY             DLN2_SPI_CMD(0x46)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS      DLN2_SPI_CMD(0x47)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS      DLN2_SPI_CMD(0x48)
+#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS                DLN2_SPI_CMD(0x49)
+#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS                DLN2_SPI_CMD(0x4A)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES  DLN2_SPI_CMD(0x4B)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES  DLN2_SPI_CMD(0x4C)
+
+#define DLN2_SPI_MAX_XFER_SIZE                 256
+#define DLN2_SPI_BUF_SIZE                      (DLN2_SPI_MAX_XFER_SIZE + 16)
+#define DLN2_SPI_ATTR_LEAVE_SS_LOW             BIT(0)
+#define DLN2_TRANSFERS_WAIT_COMPLETE           1
+#define DLN2_TRANSFERS_CANCEL                  0
+#define DLN2_RPM_AUTOSUSPEND_TIMEOUT           2000
+
+struct dln2_spi {
+       struct platform_device *pdev;
+       struct spi_master *master;
+       u8 port;
+
+       /*
+        * This buffer will be used mainly for read/write operations. Since
+        * they're quite large, we cannot use the stack. Protection is not
+        * needed because all SPI communication is serialized by the SPI core.
+        */
+       void *buf;
+
+       u8 bpw;
+       u32 speed;
+       u16 mode;
+       u8 cs;
+};
+
+/*
+ * Enable/Disable SPI module. The disable command will wait for transfers to
+ * complete first.
+ */
+static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
+{
+       u16 cmd;
+       struct {
+               u8 port;
+               u8 wait_for_completion;
+       } tx;
+       unsigned len = sizeof(tx);
+
+       tx.port = dln2->port;
+
+       if (enable) {
+               cmd = DLN2_SPI_ENABLE;
+               len -= sizeof(tx.wait_for_completion);
+       } else {
+               tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
+               cmd = DLN2_SPI_DISABLE;
+       }
+
+       return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
+}
+
+/*
+ * Select/unselect multiple CS lines. The selected lines will be automatically
+ * toggled LOW/HIGH by the board firmware during transfers, provided they're
+ * enabled first.
+ *
+ * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
+ *                       will toggle the lines LOW/HIGH automatically.
+ */
+static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
+{
+       struct {
+               u8 port;
+               u8 cs;
+       } tx;
+
+       tx.port = dln2->port;
+
+       /*
+        * According to Diolan docs, "a slave device can be selected by changing
+        * the corresponding bit value to 0". The rest must be set to 1. Hence
+        * the bitwise NOT in front.
+        */
+       tx.cs = ~cs_mask;
+
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
+}
+
+/*
+ * Select one CS line. The other lines will be un-selected.
+ */
+static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
+{
+       return dln2_spi_cs_set(dln2, BIT(cs));
+}
+
+/*
+ * Enable/disable CS lines for usage. The module has to be disabled first.
+ */
+static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
+{
+       struct {
+               u8 port;
+               u8 cs;
+       } tx;
+       u16 cmd;
+
+       tx.port = dln2->port;
+       tx.cs = cs_mask;
+       cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
+
+       return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
+}
+
+static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
+{
+       u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
+
+       return dln2_spi_cs_enable(dln2, cs_mask, enable);
+}
+
+static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
+{
+       int ret;
+       struct {
+               u8 port;
+       } tx;
+       struct {
+               __le16 cs_count;
+       } rx;
+       unsigned rx_len = sizeof(rx);
+
+       tx.port = dln2->port;
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
+                           &rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx))
+               return -EPROTO;
+
+       *cs_num = le16_to_cpu(rx.cs_count);
+
+       dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
+
+       return 0;
+}
+
+static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
+{
+       int ret;
+       struct {
+               u8 port;
+       } tx;
+       struct {
+               __le32 speed;
+       } rx;
+       unsigned rx_len = sizeof(rx);
+
+       tx.port = dln2->port;
+
+       ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx))
+               return -EPROTO;
+
+       *freq = le32_to_cpu(rx.speed);
+
+       return 0;
+}
+
+/*
+ * Get bus min/max frequencies.
+ */
+static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
+{
+       int ret;
+
+       ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
+       if (ret < 0)
+               return ret;
+
+       ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
+       if (ret < 0)
+               return ret;
+
+       dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
+               *fmin, *fmax);
+
+       return 0;
+}
+
+/*
+ * Set the bus speed. The module will automatically round down to the closest
+ * available frequency and returns it. The module has to be disabled first.
+ */
+static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
+{
+       int ret;
+       struct {
+               u8 port;
+               __le32 speed;
+       } __packed tx;
+       struct {
+               __le32 speed;
+       } rx;
+       int rx_len = sizeof(rx);
+
+       tx.port = dln2->port;
+       tx.speed = cpu_to_le32(speed);
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
+                           &rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx))
+               return -EPROTO;
+
+       return 0;
+}
+
+/*
+ * Change CPOL & CPHA. The module has to be disabled first.
+ */
+static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
+{
+       struct {
+               u8 port;
+               u8 mode;
+       } tx;
+
+       tx.port = dln2->port;
+       tx.mode = mode;
+
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
+}
+
+/*
+ * Change frame size. The module has to be disabled first.
+ */
+static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
+{
+       struct {
+               u8 port;
+               u8 bpw;
+       } tx;
+
+       tx.port = dln2->port;
+       tx.bpw = bpw;
+
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
+                               &tx, sizeof(tx));
+}
+
+static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
+                                             u32 *bpw_mask)
+{
+       int ret;
+       struct {
+               u8 port;
+       } tx;
+       struct {
+               u8 count;
+               u8 frame_sizes[36];
+       } *rx = dln2->buf;
+       unsigned rx_len = sizeof(*rx);
+       int i;
+
+       tx.port = dln2->port;
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
+                           &tx, sizeof(tx), rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(*rx))
+               return -EPROTO;
+       if (rx->count > ARRAY_SIZE(rx->frame_sizes))
+               return -EPROTO;
+
+       *bpw_mask = 0;
+       for (i = 0; i < rx->count; i++)
+               *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
+
+       dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
+
+       return 0;
+}
+
+/*
+ * Copy the data to DLN2 buffer and change the byte order to LE, requested by
+ * DLN2 module. SPI core makes sure that the data length is a multiple of word
+ * size.
+ */
+static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+       memcpy(dln2_buf, src, len);
+#else
+       if (bpw <= 8) {
+               memcpy(dln2_buf, src, len);
+       } else if (bpw <= 16) {
+               __le16 *d = (__le16 *)dln2_buf;
+               u16 *s = (u16 *)src;
+
+               len = len / 2;
+               while (len--)
+                       *d++ = cpu_to_le16p(s++);
+       } else {
+               __le32 *d = (__le32 *)dln2_buf;
+               u32 *s = (u32 *)src;
+
+               len = len / 4;
+               while (len--)
+                       *d++ = cpu_to_le32p(s++);
+       }
+#endif
+
+       return 0;
+}
+
+/*
+ * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
+ * buffer is LE ordered. SPI core makes sure that the data length is a multiple
+ * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
+ * sure we avoid unaligned accesses for 32 bit case.
+ */
+static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+       memcpy(dest, dln2_buf, len);
+#else
+       if (bpw <= 8) {
+               memcpy(dest, dln2_buf, len);
+       } else if (bpw <= 16) {
+               u16 *d = (u16 *)dest;
+               __le16 *s = (__le16 *)dln2_buf;
+
+               len = len / 2;
+               while (len--)
+                       *d++ = le16_to_cpup(s++);
+       } else {
+               u32 *d = (u32 *)dest;
+               __le32 *s = (__le32 *)dln2_buf;
+
+               len = len / 4;
+               while (len--)
+                       *d++ = get_unaligned_le32(s++);
+       }
+#endif
+
+       return 0;
+}
+
+/*
+ * Perform one write operation.
+ */
+static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
+                             u16 data_len, u8 attr)
+{
+       struct {
+               u8 port;
+               __le16 size;
+               u8 attr;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *tx = dln2->buf;
+       unsigned tx_len;
+
+       BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
+
+       if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+               return -EINVAL;
+
+       tx->port = dln2->port;
+       tx->size = cpu_to_le16(data_len);
+       tx->attr = attr;
+
+       dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
+
+       tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+       return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
+}
+
+/*
+ * Perform one read operation.
+ */
+static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
+                            u16 data_len, u8 attr)
+{
+       int ret;
+       struct {
+               u8 port;
+               __le16 size;
+               u8 attr;
+       } __packed tx;
+       struct {
+               __le16 size;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *rx = dln2->buf;
+       unsigned rx_len = sizeof(*rx);
+
+       BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+       if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+               return -EINVAL;
+
+       tx.port = dln2->port;
+       tx.size = cpu_to_le16(data_len);
+       tx.attr = attr;
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
+                           rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx->size) + data_len)
+               return -EPROTO;
+       if (le16_to_cpu(rx->size) != data_len)
+               return -EPROTO;
+
+       dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
+
+       return 0;
+}
+
+/*
+ * Perform one write & read operation.
+ */
+static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
+                                  u8 *rx_data, u16 data_len, u8 attr)
+{
+       int ret;
+       struct {
+               u8 port;
+               __le16 size;
+               u8 attr;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *tx;
+       struct {
+               __le16 size;
+               u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+       } __packed *rx;
+       unsigned tx_len, rx_len;
+
+       BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
+                    sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+       if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+               return -EINVAL;
+
+       /*
+        * Since this is a pseudo full-duplex communication, we're perfectly
+        * safe to use the same buffer for both tx and rx. When DLN2 sends the
+        * response back, with the rx data, we don't need the tx buffer anymore.
+        */
+       tx = dln2->buf;
+       rx = dln2->buf;
+
+       tx->port = dln2->port;
+       tx->size = cpu_to_le16(data_len);
+       tx->attr = attr;
+
+       dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
+
+       tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+       rx_len = sizeof(*rx);
+
+       ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
+                           rx, &rx_len);
+       if (ret < 0)
+               return ret;
+       if (rx_len < sizeof(rx->size) + data_len)
+               return -EPROTO;
+       if (le16_to_cpu(rx->size) != data_len)
+               return -EPROTO;
+
+       dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
+
+       return 0;
+}
+
+/*
+ * Read/Write wrapper. It will automatically split an operation into multiple
+ * single ones due to device buffer constraints.
+ */
+static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
+                        u8 *rx_data, u16 data_len, u8 attr) {
+       int ret;
+       u16 len;
+       u8 temp_attr;
+       u16 remaining = data_len;
+       u16 offset;
+
+       do {
+               if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
+                       len = DLN2_SPI_MAX_XFER_SIZE;
+                       temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+               } else {
+                       len = remaining;
+                       temp_attr = attr;
+               }
+
+               offset = data_len - remaining;
+
+               if (tx_data && rx_data) {
+                       ret = dln2_spi_read_write_one(dln2,
+                                                     tx_data + offset,
+                                                     rx_data + offset,
+                                                     len, temp_attr);
+               } else if (tx_data) {
+                       ret = dln2_spi_write_one(dln2,
+                                                tx_data + offset,
+                                                len, temp_attr);
+               } else if (rx_data) {
+                       ret = dln2_spi_read_one(dln2,
+                                               rx_data + offset,
+                                               len, temp_attr);
+                } else {
+                       return -EINVAL;
+                }
+
+               if (ret < 0)
+                       return ret;
+
+               remaining -= len;
+       } while (remaining);
+
+       return 0;
+}
+
+static int dln2_spi_prepare_message(struct spi_master *master,
+                                   struct spi_message *message)
+{
+       int ret;
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+       struct spi_device *spi = message->spi;
+
+       if (dln2->cs != spi->chip_select) {
+               ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
+               if (ret < 0)
+                       return ret;
+
+               dln2->cs = spi->chip_select;
+       }
+
+       return 0;
+}
+
+static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
+                                  u8 bpw, u8 mode)
+{
+       int ret;
+       bool bus_setup_change;
+
+       bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
+                          dln2->bpw != bpw;
+
+       if (!bus_setup_change)
+               return 0;
+
+       ret = dln2_spi_enable(dln2, false);
+       if (ret < 0)
+               return ret;
+
+       if (dln2->speed != speed) {
+               ret = dln2_spi_set_speed(dln2, speed);
+               if (ret < 0)
+                       return ret;
+
+               dln2->speed = speed;
+       }
+
+       if (dln2->mode != mode) {
+               ret = dln2_spi_set_mode(dln2, mode & 0x3);
+               if (ret < 0)
+                       return ret;
+
+               dln2->mode = mode;
+       }
+
+       if (dln2->bpw != bpw) {
+               ret = dln2_spi_set_bpw(dln2, bpw);
+               if (ret < 0)
+                       return ret;
+
+               dln2->bpw = bpw;
+       }
+
+       return dln2_spi_enable(dln2, true);
+}
+
+static int dln2_spi_transfer_one(struct spi_master *master,
+                                struct spi_device *spi,
+                                struct spi_transfer *xfer)
+{
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+       int status;
+       u8 attr = 0;
+
+       status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
+                                        xfer->bits_per_word,
+                                        spi->mode);
+       if (status < 0) {
+               dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
+               return status;
+       }
+
+       if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
+               attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+
+       status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
+                              xfer->len, attr);
+       if (status < 0)
+               dev_err(&dln2->pdev->dev, "write/read failed!\n");
+
+       return status;
+}
+
+static int dln2_spi_probe(struct platform_device *pdev)
+{
+       struct spi_master *master;
+       struct dln2_spi *dln2;
+       struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       int ret;
+
+       master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
+       if (!master)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, master);
+
+       dln2 = spi_master_get_devdata(master);
+
+       dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
+       if (!dln2->buf) {
+               ret = -ENOMEM;
+               goto exit_free_master;
+       }
+
+       dln2->master = master;
+       dln2->pdev = pdev;
+       dln2->port = pdata->port;
+       /* cs/mode can never be 0xff, so the first transfer will set them */
+       dln2->cs = 0xff;
+       dln2->mode = 0xff;
+
+       /* disable SPI module before continuing with the setup */
+       ret = dln2_spi_enable(dln2, false);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to disable SPI module\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to get number of CS pins\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_get_speed_range(dln2,
+                                      &master->min_speed_hz,
+                                      &master->max_speed_hz);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_get_supported_frame_sizes(dln2,
+                                                &master->bits_per_word_mask);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
+               goto exit_free_master;
+       }
+
+       ret = dln2_spi_cs_enable_all(dln2, true);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to enable CS pins\n");
+               goto exit_free_master;
+       }
+
+       master->bus_num = -1;
+       master->mode_bits = SPI_CPOL | SPI_CPHA;
+       master->prepare_message = dln2_spi_prepare_message;
+       master->transfer_one = dln2_spi_transfer_one;
+       master->auto_runtime_pm = true;
+
+       /* enable SPI module, we're good to go */
+       ret = dln2_spi_enable(dln2, true);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to enable SPI module\n");
+               goto exit_free_master;
+       }
+
+       pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        DLN2_RPM_AUTOSUSPEND_TIMEOUT);
+       pm_runtime_use_autosuspend(&pdev->dev);
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       ret = devm_spi_register_master(&pdev->dev, master);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register master\n");
+               goto exit_register;
+       }
+
+       return ret;
+
+exit_register:
+       pm_runtime_disable(&pdev->dev);
+       pm_runtime_set_suspended(&pdev->dev);
+
+       if (dln2_spi_enable(dln2, false) < 0)
+               dev_err(&pdev->dev, "Failed to disable SPI module\n");
+exit_free_master:
+       spi_master_put(master);
+
+       return ret;
+}
+
+static int dln2_spi_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       pm_runtime_disable(&pdev->dev);
+
+       if (dln2_spi_enable(dln2, false) < 0)
+               dev_err(&pdev->dev, "Failed to disable SPI module\n");
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dln2_spi_suspend(struct device *dev)
+{
+       int ret;
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       ret = spi_master_suspend(master);
+       if (ret < 0)
+               return ret;
+
+       if (!pm_runtime_suspended(dev)) {
+               ret = dln2_spi_enable(dln2, false);
+               if (ret < 0)
+                       return ret;
+       }
+
+       /*
+        * USB power may be cut off during sleep. Resetting the following
+        * parameters will force the board to be set up before first transfer.
+        */
+       dln2->cs = 0xff;
+       dln2->speed = 0;
+       dln2->bpw = 0;
+       dln2->mode = 0xff;
+
+       return 0;
+}
+
+static int dln2_spi_resume(struct device *dev)
+{
+       int ret;
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       if (!pm_runtime_suspended(dev)) {
+               ret = dln2_spi_cs_enable_all(dln2, true);
+               if (ret < 0)
+                       return ret;
+
+               ret = dln2_spi_enable(dln2, true);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int dln2_spi_runtime_suspend(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       return dln2_spi_enable(dln2, false);
+}
+
+static int dln2_spi_runtime_resume(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+       return  dln2_spi_enable(dln2, true);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops dln2_spi_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
+       SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
+                          dln2_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver spi_dln2_driver = {
+       .driver = {
+               .name   = "dln2-spi",
+               .pm     = &dln2_spi_pm,
+       },
+       .probe          = dln2_spi_probe,
+       .remove         = dln2_spi_remove,
+};
+module_platform_driver(spi_dln2_driver);
+
+MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
+MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dln2-spi");
index a67d37c7e3c00f9518694f267aa75efd5c73398a..a0197fd4e95c40b26817fe942d2069a0381a41e5 100644 (file)
@@ -247,9 +247,9 @@ static struct dw_spi_dma_ops mid_dma_ops = {
 
 /* Some specific info for SPI0 controller on Intel MID */
 
-/* HW info for MRST CLk Control Unit, one 32b reg */
+/* HW info for MRST Clk Control Unit, 32b reg per controller */
 #define MRST_SPI_CLK_BASE      100000000       /* 100m */
-#define MRST_CLK_SPI0_REG      0xff11d86c
+#define MRST_CLK_SPI_REG       0xff11d86c
 #define CLK_SPI_BDIV_OFFSET    0
 #define CLK_SPI_BDIV_MASK      0x00000007
 #define CLK_SPI_CDIV_OFFSET    9
@@ -261,16 +261,17 @@ int dw_spi_mid_init(struct dw_spi *dws)
        void __iomem *clk_reg;
        u32 clk_cdiv;
 
-       clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
+       clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
        if (!clk_reg)
                return -ENOMEM;
 
-       /* get SPI controller operating freq info */
-       clk_cdiv  = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
+       /* Get SPI controller operating freq info */
+       clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
+       clk_cdiv &= CLK_SPI_CDIV_MASK;
+       clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
        dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
-       iounmap(clk_reg);
 
-       dws->num_cs = 16;
+       iounmap(clk_reg);
 
 #ifdef CONFIG_SPI_DW_MID_DMA
        dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
index ba68da12cdf090e11b1d013bcc48315c74c44013..5ba331047cbefdea6d948ca20c8d6d3898cafbb0 100644 (file)
@@ -30,10 +30,20 @@ struct dw_spi_pci {
 
 struct spi_pci_desc {
        int     (*setup)(struct dw_spi *);
+       u16     num_cs;
+       u16     bus_num;
 };
 
-static struct spi_pci_desc spi_pci_mid_desc = {
+static struct spi_pci_desc spi_pci_mid_desc_1 = {
        .setup = dw_spi_mid_init,
+       .num_cs = 32,
+       .bus_num = 0,
+};
+
+static struct spi_pci_desc spi_pci_mid_desc_2 = {
+       .setup = dw_spi_mid_init,
+       .num_cs = 4,
+       .bus_num = 1,
 };
 
 static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -65,18 +75,23 @@ static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        dws->regs = pcim_iomap_table(pdev)[pci_bar];
 
-       dws->bus_num = 0;
-       dws->num_cs = 4;
        dws->irq = pdev->irq;
 
        /*
         * Specific handling for paltforms, like dma setup,
         * clock rate, FIFO depth.
         */
-       if (desc && desc->setup) {
-               ret = desc->setup(dws);
-               if (ret)
-                       return ret;
+       if (desc) {
+               dws->num_cs = desc->num_cs;
+               dws->bus_num = desc->bus_num;
+
+               if (desc->setup) {
+                       ret = desc->setup(dws);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               return -ENODEV;
        }
 
        ret = dw_spi_add_host(&pdev->dev, dws);
@@ -121,7 +136,14 @@ static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
 
 static const struct pci_device_id pci_ids[] = {
        /* Intel MID platform SPI controller 0 */
-       { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc},
+       /*
+        * The access to the device 8086:0801 is disabled by HW, since it's
+        * exclusively used by SCU to communicate with MSIC.
+        */
+       /* Intel MID platform SPI controller 1 */
+       { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
+       /* Intel MID platform SPI controller 2 */
+       { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
        {},
 };
 
index 8edcd1b84562109799281fb48867df8ce73ac7b2..5a97a62b298ac1a526d4c1bde932f3eb2d5ad574 100644 (file)
@@ -608,7 +608,7 @@ static void dw_spi_cleanup(struct spi_device *spi)
 }
 
 /* Restart the controller, disable all interrupts, clean rx fifo */
-static void spi_hw_init(struct dw_spi *dws)
+static void spi_hw_init(struct device *dev, struct dw_spi *dws)
 {
        spi_enable_chip(dws, 0);
        spi_mask_intr(dws, 0xff);
@@ -626,9 +626,10 @@ static void spi_hw_init(struct dw_spi *dws)
                        if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
                                break;
                }
+               dw_writew(dws, DW_SPI_TXFLTR, 0);
 
                dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
-               dw_writew(dws, DW_SPI_TXFLTR, 0);
+               dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
        }
 }
 
@@ -668,7 +669,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
        master->dev.of_node = dev->of_node;
 
        /* Basic HW init */
-       spi_hw_init(dws);
+       spi_hw_init(dev, dws);
 
        if (dws->dma_ops && dws->dma_ops->dma_init) {
                ret = dws->dma_ops->dma_init(dws);
@@ -731,7 +732,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
 {
        int ret;
 
-       spi_hw_init(dws);
+       spi_hw_init(&dws->master->dev, dws);
        ret = spi_master_resume(dws->master);
        if (ret)
                dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
index 912b9037e9cf511985ca15ce0364f64f2c57fbff..286b2c81fc6bb0d7500568df47753b47094da8a2 100644 (file)
@@ -353,16 +353,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
        return 0;
 }
 
-static int falcon_sflash_prepare_xfer(struct spi_master *master)
-{
-       return 0;
-}
-
-static int falcon_sflash_unprepare_xfer(struct spi_master *master)
-{
-       return 0;
-}
-
 static int falcon_sflash_xfer_one(struct spi_master *master,
                                        struct spi_message *m)
 {
@@ -420,9 +410,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
        master->mode_bits = SPI_MODE_3;
        master->flags = SPI_MASTER_HALF_DUPLEX;
        master->setup = falcon_sflash_setup;
-       master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
        master->transfer_one_message = falcon_sflash_xfer_one;
-       master->unprepare_transfer_hardware = falcon_sflash_unprepare_xfer;
        master->dev.of_node = pdev->dev.of_node;
 
        ret = devm_spi_register_master(&pdev->dev, master);
index e85ab1cb17a24a4decb61d28fc592a82f5b314d8..9c46a3058743b75228256f55e64b8419b49c1f3d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/fsl_devices.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/spi/spi.h>
 #include <linux/types.h>
@@ -68,6 +69,7 @@ void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
                }
        }
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
 
 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
 {
@@ -162,6 +164,7 @@ err_rx_dma:
                dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
        return -ENOMEM;
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
 
 void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
 {
@@ -174,6 +177,7 @@ void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
                dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
        mspi->xfer_in_progress = NULL;
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
 
 void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
 {
@@ -198,6 +202,7 @@ void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
        else
                complete(&mspi->done);
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
 
 static void *fsl_spi_alloc_dummy_rx(void)
 {
@@ -375,6 +380,7 @@ err_pram:
        fsl_spi_free_dummy_rx();
        return -ENOMEM;
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
 
 void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
 {
@@ -389,3 +395,6 @@ void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
        cpm_muram_free(cpm_muram_offset(mspi->pram));
        fsl_spi_free_dummy_rx();
 }
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
+
+MODULE_LICENSE("GPL");
index 4cda994d3f40cf86116b85ff21e71f7d8e33ed4d..d1a39249704a7e3a16bcd861bc7f16f5a9ed02dd 100644 (file)
@@ -106,7 +106,7 @@ struct chip_data {
 };
 
 struct fsl_dspi {
-       struct spi_bitbang      bitbang;
+       struct spi_master       *master;
        struct platform_device  *pdev;
 
        struct regmap           *regmap;
@@ -114,6 +114,7 @@ struct fsl_dspi {
        struct clk              *clk;
 
        struct spi_transfer     *cur_transfer;
+       struct spi_message      *cur_msg;
        struct chip_data        *cur_chip;
        size_t                  len;
        void                    *tx;
@@ -123,6 +124,7 @@ struct fsl_dspi {
        char                    dataflags;
        u8                      cs;
        u16                     void_write_data;
+       u32                     cs_change;
 
        wait_queue_head_t       waitq;
        u32                     waitflags;
@@ -225,6 +227,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
                if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
                        /* last transfer in the transfer */
                        dspi_pushr |= SPI_PUSHR_EOQ;
+                       if ((dspi->cs_change) && (!dspi->len))
+                               dspi_pushr &= ~SPI_PUSHR_CONT;
                } else if (tx_word && (dspi->len == 1))
                        dspi_pushr |= SPI_PUSHR_EOQ;
 
@@ -246,6 +250,7 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
        int rx_count = 0;
        int rx_word = is_double_byte_mode(dspi);
        u16 d;
+
        while ((dspi->rx < dspi->rx_end)
                        && (rx_count < DSPI_FIFO_SIZE)) {
                if (rx_word) {
@@ -276,86 +281,89 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
        return rx_count;
 }
 
-static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int dspi_transfer_one_message(struct spi_master *master,
+               struct spi_message *message)
 {
-       struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
-       dspi->cur_transfer = t;
-       dspi->cur_chip = spi_get_ctldata(spi);
-       dspi->cs = spi->chip_select;
-       dspi->void_write_data = dspi->cur_chip->void_write_data;
-
-       dspi->dataflags = 0;
-       dspi->tx = (void *)t->tx_buf;
-       dspi->tx_end = dspi->tx + t->len;
-       dspi->rx = t->rx_buf;
-       dspi->rx_end = dspi->rx + t->len;
-       dspi->len = t->len;
-
-       if (!dspi->rx)
-               dspi->dataflags |= TRAN_STATE_RX_VOID;
-
-       if (!dspi->tx)
-               dspi->dataflags |= TRAN_STATE_TX_VOID;
-
-       regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
-       regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val);
-       regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
-
-       if (t->speed_hz)
+       struct fsl_dspi *dspi = spi_master_get_devdata(master);
+       struct spi_device *spi = message->spi;
+       struct spi_transfer *transfer;
+       int status = 0;
+       message->actual_length = 0;
+
+       list_for_each_entry(transfer, &message->transfers, transfer_list) {
+               dspi->cur_transfer = transfer;
+               dspi->cur_msg = message;
+               dspi->cur_chip = spi_get_ctldata(spi);
+               dspi->cs = spi->chip_select;
+               if (dspi->cur_transfer->transfer_list.next
+                               == &dspi->cur_msg->transfers)
+                       transfer->cs_change = 1;
+               dspi->cs_change = transfer->cs_change;
+               dspi->void_write_data = dspi->cur_chip->void_write_data;
+
+               dspi->dataflags = 0;
+               dspi->tx = (void *)transfer->tx_buf;
+               dspi->tx_end = dspi->tx + transfer->len;
+               dspi->rx = transfer->rx_buf;
+               dspi->rx_end = dspi->rx + transfer->len;
+               dspi->len = transfer->len;
+
+               if (!dspi->rx)
+                       dspi->dataflags |= TRAN_STATE_RX_VOID;
+
+               if (!dspi->tx)
+                       dspi->dataflags |= TRAN_STATE_TX_VOID;
+
+               regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+               regmap_update_bits(dspi->regmap, SPI_MCR,
+                               SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+                               SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
                regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
                                dspi->cur_chip->ctar_val);
+               if (transfer->speed_hz)
+                       regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+                                       dspi->cur_chip->ctar_val);
 
-       dspi_transfer_write(dspi);
+               regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+               message->actual_length += dspi_transfer_write(dspi);
 
-       if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
-               dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
-       dspi->waitflags = 0;
-
-       return t->len - dspi->len;
-}
+               if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
+                       dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
+               dspi->waitflags = 0;
 
-static void dspi_chipselect(struct spi_device *spi, int value)
-{
-       struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
-       unsigned int pushr;
-
-       regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
-
-       switch (value) {
-       case BITBANG_CS_ACTIVE:
-               pushr |= SPI_PUSHR_CONT;
-               break;
-       case BITBANG_CS_INACTIVE:
-               pushr &= ~SPI_PUSHR_CONT;
-               break;
+               if (transfer->delay_usecs)
+                       udelay(transfer->delay_usecs);
        }
 
-       regmap_write(dspi->regmap, SPI_PUSHR, pushr);
+       message->status = status;
+       spi_finalize_current_message(master);
+
+       return status;
 }
 
-static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+static int dspi_setup(struct spi_device *spi)
 {
        struct chip_data *chip;
        struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
        unsigned char br = 0, pbr = 0, fmsz = 0;
 
+       if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
+               fmsz = spi->bits_per_word - 1;
+       } else {
+               pr_err("Invalid wordsize\n");
+               return -ENODEV;
+       }
+
        /* Only alloc on first setup */
        chip = spi_get_ctldata(spi);
        if (chip == NULL) {
-               chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
-                                   GFP_KERNEL);
+               chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
                if (!chip)
                        return -ENOMEM;
        }
 
        chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
                SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
-       if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
-               fmsz = spi->bits_per_word - 1;
-       } else {
-               pr_err("Invalid wordsize\n");
-               return -ENODEV;
-       }
 
        chip->void_write_data = 0;
 
@@ -374,34 +382,34 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
        return 0;
 }
 
-static int dspi_setup(struct spi_device *spi)
+static void dspi_cleanup(struct spi_device *spi)
 {
-       if (!spi->max_speed_hz)
-               return -EINVAL;
+       struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+       dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
+                       spi->master->bus_num, spi->chip_select);
 
-       return dspi_setup_transfer(spi, NULL);
+       kfree(chip);
 }
 
 static irqreturn_t dspi_interrupt(int irq, void *dev_id)
 {
        struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
 
-       regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
+       struct spi_message *msg = dspi->cur_msg;
 
+       regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
        dspi_transfer_read(dspi);
 
        if (!dspi->len) {
                if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
                        regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
-                               SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+                       SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
 
                dspi->waitflags = 1;
                wake_up_interruptible(&dspi->waitq);
-       } else {
-               dspi_transfer_write(dspi);
-
-               return IRQ_HANDLED;
-       }
+       } else
+               msg->actual_length += dspi_transfer_write(dspi);
 
        return IRQ_HANDLED;
 }
@@ -460,13 +468,14 @@ static int dspi_probe(struct platform_device *pdev)
 
        dspi = spi_master_get_devdata(master);
        dspi->pdev = pdev;
-       dspi->bitbang.master = master;
-       dspi->bitbang.chipselect = dspi_chipselect;
-       dspi->bitbang.setup_transfer = dspi_setup_transfer;
-       dspi->bitbang.txrx_bufs = dspi_txrx_transfer;
-       dspi->bitbang.master->setup = dspi_setup;
-       dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+       dspi->master = master;
+
+       master->transfer = NULL;
+       master->setup = dspi_setup;
+       master->transfer_one_message = dspi_transfer_one_message;
+       master->dev.of_node = pdev->dev.of_node;
 
+       master->cleanup = dspi_cleanup;
        master->mode_bits = SPI_CPOL | SPI_CPHA;
        master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
                                        SPI_BPW_MASK(16);
@@ -525,7 +534,7 @@ static int dspi_probe(struct platform_device *pdev)
        init_waitqueue_head(&dspi->waitq);
        platform_set_drvdata(pdev, master);
 
-       ret = spi_bitbang_start(&dspi->bitbang);
+       ret = spi_register_master(master);
        if (ret != 0) {
                dev_err(&pdev->dev, "Problem registering DSPI master\n");
                goto out_clk_put;
@@ -547,9 +556,9 @@ static int dspi_remove(struct platform_device *pdev)
        struct fsl_dspi *dspi = spi_master_get_devdata(master);
 
        /* Disconnect from the SPI framework */
-       spi_bitbang_stop(&dspi->bitbang);
        clk_disable_unprepare(dspi->clk);
-       spi_master_put(dspi->bitbang.master);
+       spi_unregister_master(dspi->master);
+       spi_master_put(dspi->master);
 
        return 0;
 }
index 446b737e153261f473008cb1ac774cc68e94dc1f..cb35d2f0d0e63cf0d5dcf2d7aaddb22c54590c4a 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/of_platform.h>
 #include <linux/spi/spi.h>
 #ifdef CONFIG_FSL_SOC
@@ -35,7 +36,8 @@ void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
        type *rx = mpc8xxx_spi->rx;                                       \
        *rx++ = (type)(data >> mpc8xxx_spi->rx_shift);                    \
        mpc8xxx_spi->rx = rx;                                             \
-}
+}                                                                        \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
 
 #define MPC8XXX_SPI_TX_BUF(type)                               \
 u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
@@ -47,7 +49,8 @@ u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi)        \
        data = *tx++ << mpc8xxx_spi->tx_shift;                  \
        mpc8xxx_spi->tx = tx;                                   \
        return data;                                            \
-}
+}                                                              \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
 
 MPC8XXX_SPI_RX_BUF(u8)
 MPC8XXX_SPI_RX_BUF(u16)
@@ -60,6 +63,7 @@ struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
 {
        return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
 }
+EXPORT_SYMBOL_GPL(to_of_pinfo);
 
 const char *mpc8xxx_spi_strmode(unsigned int flags)
 {
@@ -75,6 +79,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
        }
        return "CPU";
 }
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
 
 void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
                        unsigned int irq)
@@ -102,13 +107,12 @@ void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
        mpc8xxx_spi->rx_shift = 0;
        mpc8xxx_spi->tx_shift = 0;
 
-       init_completion(&mpc8xxx_spi->done);
-
        master->bus_num = pdata->bus_num;
        master->num_chipselect = pdata->max_chipselect;
 
        init_completion(&mpc8xxx_spi->done);
 }
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
 
 int mpc8xxx_spi_remove(struct device *dev)
 {
@@ -127,6 +131,7 @@ int mpc8xxx_spi_remove(struct device *dev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
 
 int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
 {
@@ -173,3 +178,6 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
+
+MODULE_LICENSE("GPL");
index b4ed04e8862fd82017d42374d27c6aa4261d763d..1326a392adcad162dd303617a03bf675d40a7a68 100644 (file)
@@ -28,7 +28,7 @@ struct mpc8xxx_spi {
        /* rx & tx bufs from the spi_transfer */
        const void *tx;
        void *rx;
-#ifdef CONFIG_SPI_FSL_ESPI
+#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
        int len;
 #endif
 
@@ -68,7 +68,7 @@ struct mpc8xxx_spi {
 
        unsigned int flags;
 
-#ifdef CONFIG_SPI_FSL_SPI
+#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
        int type;
        int native_chipselects;
        u8 max_bits_per_word;
index aee4e7589568c7cb2e3f69c02e9f44db4ba3ffb5..1c34c9314c8a1fab3c5bae9948775467f66017a9 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -92,7 +88,7 @@ struct spi_gpio {
 
 /*----------------------------------------------------------------------*/
 
-static inline struct spi_gpio * __pure
+static inline struct spi_gpio *__pure
 spi_to_spi_gpio(const struct spi_device *spi)
 {
        const struct spi_bitbang        *bang;
@@ -103,7 +99,7 @@ spi_to_spi_gpio(const struct spi_device *spi)
        return spi_gpio;
 }
 
-static inline struct spi_gpio_platform_data * __pure
+static inline struct spi_gpio_platform_data *__pure
 spi_to_pdata(const struct spi_device *spi)
 {
        return &spi_to_spi_gpio(spi)->pdata;
index aad6683db81b9a0154d12d3fc71fd5d45fb8bfc3..c01567d53581c0dcdfc6fcb61e7f6e821d0e0b39 100644 (file)
@@ -160,16 +160,16 @@ static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
        unsigned int count = 0;
        u32 status;
 
-       while (count < max) {
+       while (count < max / 4) {
                spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
                if (status & SPFI_INTERRUPT_SDFUL)
                        break;
-               spfi_writel(spfi, buf[count / 4], SPFI_TX_32BIT_VALID_DATA);
-               count += 4;
+               spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
+               count++;
        }
 
-       return count;
+       return count * 4;
 }
 
 static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
@@ -196,17 +196,17 @@ static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
        unsigned int count = 0;
        u32 status;
 
-       while (count < max) {
+       while (count < max / 4) {
                spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
                            SPFI_INTERRUPT_CLEAR);
                status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
                if (!(status & SPFI_INTERRUPT_GDEX32BIT))
                        break;
-               buf[count / 4] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
-               count += 4;
+               buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
+               count++;
        }
 
-       return count;
+       return count * 4;
 }
 
 static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
@@ -251,17 +251,15 @@ static int img_spfi_start_pio(struct spi_master *master,
               time_before(jiffies, timeout)) {
                unsigned int tx_count, rx_count;
 
-               switch (xfer->bits_per_word) {
-               case 32:
+               if (tx_bytes >= 4)
                        tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
-                       rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
-                       break;
-               case 8:
-               default:
+               else
                        tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
+
+               if (rx_bytes >= 4)
+                       rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
+               else
                        rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
-                       break;
-               }
 
                tx_buf += tx_count;
                rx_buf += rx_count;
@@ -331,14 +329,11 @@ static int img_spfi_start_dma(struct spi_master *master,
 
        if (xfer->rx_buf) {
                rxconf.direction = DMA_DEV_TO_MEM;
-               switch (xfer->bits_per_word) {
-               case 32:
+               if (xfer->len % 4 == 0) {
                        rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
                        rxconf.src_addr_width = 4;
                        rxconf.src_maxburst = 4;
-                       break;
-               case 8:
-               default:
+               } else {
                        rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
                        rxconf.src_addr_width = 1;
                        rxconf.src_maxburst = 4;
@@ -358,18 +353,14 @@ static int img_spfi_start_dma(struct spi_master *master,
 
        if (xfer->tx_buf) {
                txconf.direction = DMA_MEM_TO_DEV;
-               switch (xfer->bits_per_word) {
-               case 32:
+               if (xfer->len % 4 == 0) {
                        txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
                        txconf.dst_addr_width = 4;
                        txconf.dst_maxburst = 4;
-                       break;
-               case 8:
-               default:
+               } else {
                        txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
                        txconf.dst_addr_width = 1;
                        txconf.dst_maxburst = 4;
-                       break;
                }
                dmaengine_slave_config(spfi->tx_ch, &txconf);
 
@@ -508,9 +499,7 @@ static void img_spfi_set_cs(struct spi_device *spi, bool enable)
 static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
                             struct spi_transfer *xfer)
 {
-       if (xfer->bits_per_word == 8 && xfer->len > SPFI_8BIT_FIFO_SIZE)
-               return true;
-       if (xfer->bits_per_word == 32 && xfer->len > SPFI_32BIT_FIFO_SIZE)
+       if (xfer->len > SPFI_32BIT_FIFO_SIZE)
                return true;
        return false;
 }
index 961b97d43b430914ed317b9623d06f940e39d797..6fea4af51c413f27640c626ad61a2bcdca0b6bac 100644 (file)
@@ -89,7 +89,6 @@ struct spi_imx_data {
 
        struct completion xfer_done;
        void __iomem *base;
-       int irq;
        struct clk *clk_per;
        struct clk *clk_ipg;
        unsigned long spi_clk;
@@ -823,6 +822,10 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
        struct dma_slave_config slave_config = {};
        int ret;
 
+       /* use pio mode for i.mx6dl chip TKT238285 */
+       if (of_machine_is_compatible("fsl,imx6dl"))
+               return 0;
+
        /* Prepare for TX DMA: */
        master->dma_tx = dma_request_slave_channel(dev, "tx");
        if (!master->dma_tx) {
@@ -892,6 +895,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 {
        struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
        int ret;
+       unsigned long timeout;
        u32 dma;
        int left;
        struct spi_master *master = spi_imx->bitbang.master;
@@ -939,17 +943,17 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
        dma_async_issue_pending(master->dma_tx);
        dma_async_issue_pending(master->dma_rx);
        /* Wait SDMA to finish the data transfer.*/
-       ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+       timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
                                                IMX_DMA_TIMEOUT);
-       if (!ret) {
+       if (!timeout) {
                pr_warn("%s %s: I/O Error in DMA TX\n",
                        dev_driver_string(&master->dev),
                        dev_name(&master->dev));
                dmaengine_terminate_all(master->dma_tx);
        } else {
-               ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
-                               IMX_DMA_TIMEOUT);
-               if (!ret) {
+               timeout = wait_for_completion_timeout(
+                               &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
+               if (!timeout) {
                        pr_warn("%s %s: I/O Error in DMA RX\n",
                                dev_driver_string(&master->dev),
                                dev_name(&master->dev));
@@ -964,9 +968,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
        spi_imx->dma_finished = 1;
        spi_imx->devtype_data->trigger(spi_imx);
 
-       if (!ret)
+       if (!timeout)
                ret = -ETIMEDOUT;
-       else if (ret > 0)
+       else
                ret = transfer->len;
 
        return ret;
@@ -1076,7 +1080,7 @@ static int spi_imx_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct spi_imx_data *spi_imx;
        struct resource *res;
-       int i, ret, num_cs;
+       int i, ret, num_cs, irq;
 
        if (!np && !mxc_platform_info) {
                dev_err(&pdev->dev, "can't get the platform data\n");
@@ -1143,16 +1147,16 @@ static int spi_imx_probe(struct platform_device *pdev)
                goto out_master_put;
        }
 
-       spi_imx->irq = platform_get_irq(pdev, 0);
-       if (spi_imx->irq < 0) {
-               ret = spi_imx->irq;
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               ret = irq;
                goto out_master_put;
        }
 
-       ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
+       ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
                               dev_name(&pdev->dev), spi_imx);
        if (ret) {
-               dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
+               dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
                goto out_master_put;
        }
 
index 41c5765be7469de4283ed1c2d9d47802abe17e48..ba72347cb99d9242742792f2c3692ff150b9a921 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
index 1bbac0378bf7bcdc00d336428cfec4440f0701ce..5468fc70dbf8d06432ce1cc1e8b2ef20c96a5a72 100644 (file)
@@ -85,7 +85,7 @@ struct meson_spifc {
        struct device *dev;
 };
 
-static struct regmap_config spifc_regmap_config = {
+static const struct regmap_config spifc_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
index 4045a1e580e1c20f7a85b5cc27c98d4fc5dda51a..5b0e9a3e83f6944d90f23de597a3a9ff504e3a7b 100644 (file)
@@ -282,9 +282,8 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi,
        dmaengine_submit(desc);
        dma_async_issue_pending(ssp->dmach);
 
-       ret = wait_for_completion_timeout(&spi->c,
-                               msecs_to_jiffies(SSP_TIMEOUT));
-       if (!ret) {
+       if (!wait_for_completion_timeout(&spi->c,
+                                        msecs_to_jiffies(SSP_TIMEOUT))) {
                dev_err(ssp->dev, "DMA transfer timeout\n");
                ret = -ETIMEDOUT;
                dmaengine_terminate_all(ssp->dmach);
index 79399ae9c84c485718d6390c234278b8b5a4f235..d890d309dff9b553364654ae343f322c65d1c51a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index daf1ada5cd11a89e73be62ff059db220a76eac39..3c0844457c075d0c5f3ed98fce96dd7951888f05 100644 (file)
  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #include <linux/kernel.h>
 #include <linux/init.h>
index 3bc3cbabbbc0f350d8db5a370e6fb45d18a5468f..4df8942058deed3928e61a4b2bc4061c56eec7d7 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  */
 
 #include <linux/kernel.h>
index 3dec9e0b99b83c242a68456aa88c2c10f485bb71..861664776672cfab25200b22dba1227d28af08d6 100644 (file)
 /* Runtime PM autosuspend timeout: PM is fairly light on this driver */
 #define SPI_AUTOSUSPEND_TIMEOUT                200
 
-#define ORION_NUM_CHIPSELECTS          1 /* only one slave is supported*/
+/* Some SoCs using this driver support up to 8 chip selects.
+ * It is up to the implementer to only use the chip selects
+ * that are available.
+ */
+#define ORION_NUM_CHIPSELECTS          8
+
 #define ORION_SPI_WAIT_RDY_MAX_LOOP    2000 /* in usec */
 
 #define ORION_SPI_IF_CTRL_REG          0x00
 #define ARMADA_SPI_CLK_PRESCALE_MASK   0xDF
 #define ORION_SPI_MODE_MASK            (ORION_SPI_MODE_CPOL | \
                                         ORION_SPI_MODE_CPHA)
+#define ORION_SPI_CS_MASK      0x1C
+#define ORION_SPI_CS_SHIFT     2
+#define ORION_SPI_CS(cs)       ((cs << ORION_SPI_CS_SHIFT) & \
+                                       ORION_SPI_CS_MASK)
 
 enum orion_spi_type {
        ORION_SPI,
@@ -215,9 +224,18 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
        return 0;
 }
 
-static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
+static void orion_spi_set_cs(struct spi_device *spi, bool enable)
 {
-       if (enable)
+       struct orion_spi *orion_spi;
+
+       orion_spi = spi_master_get_devdata(spi->master);
+
+       orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
+       orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
+                               ORION_SPI_CS(spi->chip_select));
+
+       /* Chip select logic is inverted from spi_set_cs */
+       if (!enable)
                orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
        else
                orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
@@ -332,64 +350,31 @@ out:
        return xfer->len - count;
 }
 
-static int orion_spi_transfer_one_message(struct spi_master *master,
-                                          struct spi_message *m)
+static int orion_spi_transfer_one(struct spi_master *master,
+                                       struct spi_device *spi,
+                                       struct spi_transfer *t)
 {
-       struct orion_spi *orion_spi = spi_master_get_devdata(master);
-       struct spi_device *spi = m->spi;
-       struct spi_transfer *t = NULL;
-       int par_override = 0;
        int status = 0;
-       int cs_active = 0;
-
-       /* Load defaults */
-       status = orion_spi_setup_transfer(spi, NULL);
 
+       status = orion_spi_setup_transfer(spi, t);
        if (status < 0)
-               goto msg_done;
-
-       list_for_each_entry(t, &m->transfers, transfer_list) {
-               if (par_override || t->speed_hz || t->bits_per_word) {
-                       par_override = 1;
-                       status = orion_spi_setup_transfer(spi, t);
-                       if (status < 0)
-                               break;
-                       if (!t->speed_hz && !t->bits_per_word)
-                               par_override = 0;
-               }
-
-               if (!cs_active) {
-                       orion_spi_set_cs(orion_spi, 1);
-                       cs_active = 1;
-               }
+               return status;
 
-               if (t->len)
-                       m->actual_length += orion_spi_write_read(spi, t);
+       if (t->len)
+               orion_spi_write_read(spi, t);
 
-               if (t->delay_usecs)
-                       udelay(t->delay_usecs);
-
-               if (t->cs_change) {
-                       orion_spi_set_cs(orion_spi, 0);
-                       cs_active = 0;
-               }
-       }
-
-msg_done:
-       if (cs_active)
-               orion_spi_set_cs(orion_spi, 0);
-
-       m->status = status;
-       spi_finalize_current_message(master);
+       return status;
+}
 
-       return 0;
+static int orion_spi_setup(struct spi_device *spi)
+{
+       return orion_spi_setup_transfer(spi, NULL);
 }
 
 static int orion_spi_reset(struct orion_spi *orion_spi)
 {
        /* Verify that the CS is deasserted */
-       orion_spi_set_cs(orion_spi, 0);
-
+       orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
        return 0;
 }
 
@@ -442,9 +427,10 @@ static int orion_spi_probe(struct platform_device *pdev)
 
        /* we support only mode 0, and no options */
        master->mode_bits = SPI_CPHA | SPI_CPOL;
-
-       master->transfer_one_message = orion_spi_transfer_one_message;
+       master->set_cs = orion_spi_set_cs;
+       master->transfer_one = orion_spi_transfer_one;
        master->num_chipselect = ORION_NUM_CHIPSELECTS;
+       master->setup = orion_spi_setup;
        master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
        master->auto_runtime_pm = true;
 
index 62a9297e96acdb74576378b1d2e9e2184cf2127e..66a173939be81e5f4b944b287991c7c99957ae49 100644 (file)
@@ -111,23 +111,24 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
         * by using ->dma_running.
         */
        if (atomic_dec_and_test(&drv_data->dma_running)) {
-               void __iomem *reg = drv_data->ioaddr;
-
                /*
                 * If the other CPU is still handling the ROR interrupt we
                 * might not know about the error yet. So we re-check the
                 * ROR bit here before we clear the status register.
                 */
                if (!error) {
-                       u32 status = read_SSSR(reg) & drv_data->mask_sr;
+                       u32 status = pxa2xx_spi_read(drv_data, SSSR)
+                                    & drv_data->mask_sr;
                        error = status & SSSR_ROR;
                }
 
                /* Clear status & disable interrupts */
-               write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+               pxa2xx_spi_write(drv_data, SSCR1,
+                                pxa2xx_spi_read(drv_data, SSCR1)
+                                & ~drv_data->dma_cr1);
                write_SSSR_CS(drv_data, drv_data->clear_sr);
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(0, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, 0);
 
                if (!error) {
                        pxa2xx_spi_unmap_dma_buffers(drv_data);
@@ -139,7 +140,9 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
                        msg->state = pxa2xx_spi_next_transfer(drv_data);
                } else {
                        /* In case we got an error we disable the SSP now */
-                       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+                       pxa2xx_spi_write(drv_data, SSCR0,
+                                        pxa2xx_spi_read(drv_data, SSCR0)
+                                        & ~SSCR0_SSE);
 
                        msg->state = ERROR_STATE;
                }
@@ -247,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
 {
        u32 status;
 
-       status = read_SSSR(drv_data->ioaddr) & drv_data->mask_sr;
+       status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
        if (status & SSSR_ROR) {
                dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
 
index e8a26f25d5c0a1464ab259c545e2b1d2031a9741..2e0796a0003f470a56508180be7c75a7a505983f 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/delay.h>
@@ -25,6 +21,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/pxa2xx_spi.h>
 
+#include <mach/dma.h>
 #include "spi-pxa2xx.h"
 
 #define DMA_INT_MASK           (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -118,11 +115,11 @@ static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
        drv_data->dma_mapped = 0;
 }
 
-static int wait_ssp_rx_stall(void const __iomem *ioaddr)
+static int wait_ssp_rx_stall(struct driver_data *drv_data)
 {
        unsigned long limit = loops_per_jiffy << 1;
 
-       while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
                cpu_relax();
 
        return limit;
@@ -141,17 +138,18 @@ static int wait_dma_channel_stop(int channel)
 static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
                                      const char *msg)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        /* Stop and reset */
        DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
        DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
        write_SSSR_CS(drv_data, drv_data->clear_sr);
-       write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+       pxa2xx_spi_write(drv_data, SSCR1,
+                        pxa2xx_spi_read(drv_data, SSCR1)
+                        & ~drv_data->dma_cr1);
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, reg);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
 
        pxa2xx_spi_unmap_dma_buffers(drv_data);
 
@@ -163,11 +161,12 @@ static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
 
 static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        struct spi_message *msg = drv_data->cur_msg;
 
        /* Clear and disable interrupts on SSP and DMA channels*/
-       write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
+       pxa2xx_spi_write(drv_data, SSCR1,
+                        pxa2xx_spi_read(drv_data, SSCR1)
+                        & ~drv_data->dma_cr1);
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
        DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -228,7 +227,7 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
                && (drv_data->ssp_type == PXA25x_SSP)) {
 
                /* Wait for rx to stall */
-               if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+               if (wait_ssp_rx_stall(drv_data) == 0)
                        dev_err(&drv_data->pdev->dev,
                                "dma_handler: ssp rx stall failed\n");
 
@@ -240,9 +239,8 @@ void pxa2xx_spi_dma_handler(int channel, void *data)
 irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
 {
        u32 irq_status;
-       void __iomem *reg = drv_data->ioaddr;
 
-       irq_status = read_SSSR(reg) & drv_data->mask_sr;
+       irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
        if (irq_status & SSSR_ROR) {
                pxa2xx_spi_dma_error_stop(drv_data,
                                          "dma_transfer: fifo overrun");
@@ -252,7 +250,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
        /* Check for false positive timeout */
        if ((irq_status & SSSR_TINT)
                && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
-               write_SSSR(SSSR_TINT, reg);
+               pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
                return IRQ_HANDLED;
        }
 
@@ -261,7 +259,7 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
                /* Clear and disable timeout interrupt, do the rest in
                 * dma_transfer_complete */
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(0, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, 0);
 
                /* finish this transfer, start the next */
                pxa2xx_spi_dma_transfer_complete(drv_data);
index 23822e7df6c1c6e1e2caa18ea19cfcb069c3796d..6f72ad01e0410257a42bc8739f8962abfbaf3b5e 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
@@ -45,8 +41,6 @@ MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:pxa2xx-spi");
 
-#define MAX_BUSES 3
-
 #define TIMOUT_DFLT            1000
 
 /*
@@ -162,7 +156,6 @@ pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
 
 static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        u32 mask;
 
        switch (drv_data->ssp_type) {
@@ -174,7 +167,7 @@ static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
                break;
        }
 
-       return (read_SSSR(reg) & mask) == mask;
+       return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
 }
 
 static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
@@ -253,9 +246,6 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
        unsigned offset = 0x400;
        u32 value, orig;
 
-       if (!is_lpss_ssp(drv_data))
-               return;
-
        /*
         * Perform auto-detection of the LPSS SSP private registers. They
         * can be either at 1k or 2k offset from the base address.
@@ -304,9 +294,6 @@ static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
 {
        u32 value;
 
-       if (!is_lpss_ssp(drv_data))
-               return;
-
        value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
        if (enable)
                value &= ~SPI_CS_CONTROL_CS_HIGH;
@@ -320,7 +307,7 @@ static void cs_assert(struct driver_data *drv_data)
        struct chip_data *chip = drv_data->cur_chip;
 
        if (drv_data->ssp_type == CE4100_SSP) {
-               write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
                return;
        }
 
@@ -334,7 +321,8 @@ static void cs_assert(struct driver_data *drv_data)
                return;
        }
 
-       lpss_ssp_cs_control(drv_data, true);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_cs_control(drv_data, true);
 }
 
 static void cs_deassert(struct driver_data *drv_data)
@@ -354,20 +342,18 @@ static void cs_deassert(struct driver_data *drv_data)
                return;
        }
 
-       lpss_ssp_cs_control(drv_data, false);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_cs_control(drv_data, false);
 }
 
 int pxa2xx_spi_flush(struct driver_data *drv_data)
 {
        unsigned long limit = loops_per_jiffy << 1;
 
-       void __iomem *reg = drv_data->ioaddr;
-
        do {
-               while (read_SSSR(reg) & SSSR_RNE) {
-                       read_SSDR(reg);
-               }
-       } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
+               while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+                       pxa2xx_spi_read(drv_data, SSDR);
+       } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
        write_SSSR_CS(drv_data, SSSR_ROR);
 
        return limit;
@@ -375,14 +361,13 @@ int pxa2xx_spi_flush(struct driver_data *drv_data)
 
 static int null_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        u8 n_bytes = drv_data->n_bytes;
 
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(0, reg);
+       pxa2xx_spi_write(drv_data, SSDR, 0);
        drv_data->tx += n_bytes;
 
        return 1;
@@ -390,12 +375,11 @@ static int null_writer(struct driver_data *drv_data)
 
 static int null_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        u8 n_bytes = drv_data->n_bytes;
 
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               pxa2xx_spi_read(drv_data, SSDR);
                drv_data->rx += n_bytes;
        }
 
@@ -404,13 +388,11 @@ static int null_reader(struct driver_data *drv_data)
 
 static int u8_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(*(u8 *)(drv_data->tx), reg);
+       pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
        ++drv_data->tx;
 
        return 1;
@@ -418,11 +400,9 @@ static int u8_writer(struct driver_data *drv_data)
 
 static int u8_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               *(u8 *)(drv_data->rx) = read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
                ++drv_data->rx;
        }
 
@@ -431,13 +411,11 @@ static int u8_reader(struct driver_data *drv_data)
 
 static int u16_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(*(u16 *)(drv_data->tx), reg);
+       pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
        drv_data->tx += 2;
 
        return 1;
@@ -445,11 +423,9 @@ static int u16_writer(struct driver_data *drv_data)
 
 static int u16_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               *(u16 *)(drv_data->rx) = read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
                drv_data->rx += 2;
        }
 
@@ -458,13 +434,11 @@ static int u16_reader(struct driver_data *drv_data)
 
 static int u32_writer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (pxa2xx_spi_txfifo_full(drv_data)
                || (drv_data->tx == drv_data->tx_end))
                return 0;
 
-       write_SSDR(*(u32 *)(drv_data->tx), reg);
+       pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
        drv_data->tx += 4;
 
        return 1;
@@ -472,11 +446,9 @@ static int u32_writer(struct driver_data *drv_data)
 
 static int u32_reader(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
-       while ((read_SSSR(reg) & SSSR_RNE)
-               && (drv_data->rx < drv_data->rx_end)) {
-               *(u32 *)(drv_data->rx) = read_SSDR(reg);
+       while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+              && (drv_data->rx < drv_data->rx_end)) {
+               *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
                drv_data->rx += 4;
        }
 
@@ -552,27 +524,25 @@ static void giveback(struct driver_data *drv_data)
 
 static void reset_sccr1(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
        struct chip_data *chip = drv_data->cur_chip;
        u32 sccr1_reg;
 
-       sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
+       sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
        sccr1_reg &= ~SSCR1_RFT;
        sccr1_reg |= chip->threshold;
-       write_SSCR1(sccr1_reg, reg);
+       pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
 }
 
 static void int_error_stop(struct driver_data *drv_data, const char* msg)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        /* Stop and reset SSP */
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        reset_sccr1(drv_data);
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, reg);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
        pxa2xx_spi_flush(drv_data);
-       write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
 
        dev_err(&drv_data->pdev->dev, "%s\n", msg);
 
@@ -582,13 +552,11 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
 
 static void int_transfer_complete(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        /* Stop SSP */
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        reset_sccr1(drv_data);
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, reg);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
 
        /* Update total byte transferred return count actual bytes read */
        drv_data->cur_msg->actual_length += drv_data->len -
@@ -607,12 +575,10 @@ static void int_transfer_complete(struct driver_data *drv_data)
 
 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 {
-       void __iomem *reg = drv_data->ioaddr;
+       u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
+                      drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
 
-       u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
-                       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
-
-       u32 irq_status = read_SSSR(reg) & irq_mask;
+       u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
 
        if (irq_status & SSSR_ROR) {
                int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
@@ -620,7 +586,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
        }
 
        if (irq_status & SSSR_TINT) {
-               write_SSSR(SSSR_TINT, reg);
+               pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
                if (drv_data->read(drv_data)) {
                        int_transfer_complete(drv_data);
                        return IRQ_HANDLED;
@@ -644,7 +610,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
                u32 bytes_left;
                u32 sccr1_reg;
 
-               sccr1_reg = read_SSCR1(reg);
+               sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
                sccr1_reg &= ~SSCR1_TIE;
 
                /*
@@ -670,7 +636,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 
                        pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
                }
-               write_SSCR1(sccr1_reg, reg);
+               pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
        }
 
        /* We did something */
@@ -680,7 +646,6 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 static irqreturn_t ssp_int(int irq, void *dev_id)
 {
        struct driver_data *drv_data = dev_id;
-       void __iomem *reg = drv_data->ioaddr;
        u32 sccr1_reg;
        u32 mask = drv_data->mask_sr;
        u32 status;
@@ -700,11 +665,11 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
         * are all set to one. That means that the device is already
         * powered off.
         */
-       status = read_SSSR(reg);
+       status = pxa2xx_spi_read(drv_data, SSSR);
        if (status == ~0)
                return IRQ_NONE;
 
-       sccr1_reg = read_SSCR1(reg);
+       sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
 
        /* Ignore possible writes if we don't need to write */
        if (!(sccr1_reg & SSCR1_TIE))
@@ -715,10 +680,14 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
 
        if (!drv_data->cur_msg) {
 
-               write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
-               write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
+               pxa2xx_spi_write(drv_data, SSCR0,
+                                pxa2xx_spi_read(drv_data, SSCR0)
+                                & ~SSCR0_SSE);
+               pxa2xx_spi_write(drv_data, SSCR1,
+                                pxa2xx_spi_read(drv_data, SSCR1)
+                                & ~drv_data->int_cr1);
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(0, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, 0);
                write_SSSR_CS(drv_data, drv_data->clear_sr);
 
                dev_err(&drv_data->pdev->dev,
@@ -787,7 +756,6 @@ static void pump_transfers(unsigned long data)
        struct spi_transfer *transfer = NULL;
        struct spi_transfer *previous = NULL;
        struct chip_data *chip = NULL;
-       void __iomem *reg = drv_data->ioaddr;
        u32 clk_div = 0;
        u8 bits = 0;
        u32 speed = 0;
@@ -931,7 +899,7 @@ static void pump_transfers(unsigned long data)
 
                /* Clear status and start DMA engine */
                cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
-               write_SSSR(drv_data->clear_sr, reg);
+               pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
 
                pxa2xx_spi_dma_start(drv_data);
        } else {
@@ -944,39 +912,43 @@ static void pump_transfers(unsigned long data)
        }
 
        if (is_lpss_ssp(drv_data)) {
-               if ((read_SSIRF(reg) & 0xff) != chip->lpss_rx_threshold)
-                       write_SSIRF(chip->lpss_rx_threshold, reg);
-               if ((read_SSITF(reg) & 0xffff) != chip->lpss_tx_threshold)
-                       write_SSITF(chip->lpss_tx_threshold, reg);
+               if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
+                   != chip->lpss_rx_threshold)
+                       pxa2xx_spi_write(drv_data, SSIRF,
+                                        chip->lpss_rx_threshold);
+               if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
+                   != chip->lpss_tx_threshold)
+                       pxa2xx_spi_write(drv_data, SSITF,
+                                        chip->lpss_tx_threshold);
        }
 
        if (is_quark_x1000_ssp(drv_data) &&
-           (read_DDS_RATE(reg) != chip->dds_rate))
-               write_DDS_RATE(chip->dds_rate, reg);
+           (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
+               pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
 
        /* see if we need to reload the config registers */
-       if ((read_SSCR0(reg) != cr0) ||
-           (read_SSCR1(reg) & change_mask) != (cr1 & change_mask)) {
-
+       if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
+           || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
+           != (cr1 & change_mask)) {
                /* stop the SSP, and update the other bits */
-               write_SSCR0(cr0 & ~SSCR0_SSE, reg);
+               pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(chip->timeout, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
                /* first set CR1 without interrupt and service enables */
-               write_SSCR1(cr1 & change_mask, reg);
+               pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
                /* restart the SSP */
-               write_SSCR0(cr0, reg);
+               pxa2xx_spi_write(drv_data, SSCR0, cr0);
 
        } else {
                if (!pxa25x_ssp_comp(drv_data))
-                       write_SSTO(chip->timeout, reg);
+                       pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
        }
 
        cs_assert(drv_data);
 
        /* after chip select, release the data by enabling service
         * requests and interrupts, without changing any mode bits */
-       write_SSCR1(cr1, reg);
+       pxa2xx_spi_write(drv_data, SSCR1, cr1);
 }
 
 static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
@@ -1005,8 +977,8 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
        struct driver_data *drv_data = spi_master_get_devdata(master);
 
        /* Disable the SSP now */
-       write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
-                   drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0,
+                        pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
 
        return 0;
 }
@@ -1289,6 +1261,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
        struct driver_data *drv_data;
        struct ssp_device *ssp;
        int status;
+       u32 tmp;
 
        platform_info = dev_get_platdata(dev);
        if (!platform_info) {
@@ -1386,38 +1359,35 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
        drv_data->max_clk_rate = clk_get_rate(ssp->clk);
 
        /* Load default SSP configuration */
-       write_SSCR0(0, drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0, 0);
        switch (drv_data->ssp_type) {
        case QUARK_X1000_SSP:
-               write_SSCR1(QUARK_X1000_SSCR1_RxTresh(
-                                       RX_THRESH_QUARK_X1000_DFLT) |
-                           QUARK_X1000_SSCR1_TxTresh(
-                                       TX_THRESH_QUARK_X1000_DFLT),
-                           drv_data->ioaddr);
+               tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
+                     | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
+               pxa2xx_spi_write(drv_data, SSCR1, tmp);
 
                /* using the Motorola SPI protocol and use 8 bit frame */
-               write_SSCR0(QUARK_X1000_SSCR0_Motorola
-                           | QUARK_X1000_SSCR0_DataSize(8),
-                           drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSCR0,
+                                QUARK_X1000_SSCR0_Motorola
+                                | QUARK_X1000_SSCR0_DataSize(8));
                break;
        default:
-               write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
-                           SSCR1_TxTresh(TX_THRESH_DFLT),
-                           drv_data->ioaddr);
-               write_SSCR0(SSCR0_SCR(2)
-                           | SSCR0_Motorola
-                           | SSCR0_DataSize(8),
-                           drv_data->ioaddr);
+               tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
+                     SSCR1_TxTresh(TX_THRESH_DFLT);
+               pxa2xx_spi_write(drv_data, SSCR1, tmp);
+               tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+               pxa2xx_spi_write(drv_data, SSCR0, tmp);
                break;
        }
 
        if (!pxa25x_ssp_comp(drv_data))
-               write_SSTO(0, drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSTO, 0);
 
        if (!is_quark_x1000_ssp(drv_data))
-               write_SSPSP(0, drv_data->ioaddr);
+               pxa2xx_spi_write(drv_data, SSPSP, 0);
 
-       lpss_ssp_setup(drv_data);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_setup(drv_data);
 
        tasklet_init(&drv_data->pump_transfers, pump_transfers,
                     (unsigned long)drv_data);
@@ -1460,7 +1430,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        /* Disable the SSP at the peripheral and SOC level */
-       write_SSCR0(0, drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0, 0);
        clk_disable_unprepare(ssp->clk);
 
        /* Release DMA */
@@ -1497,7 +1467,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
        status = spi_master_suspend(drv_data->master);
        if (status != 0)
                return status;
-       write_SSCR0(0, drv_data->ioaddr);
+       pxa2xx_spi_write(drv_data, SSCR0, 0);
 
        if (!pm_runtime_suspended(dev))
                clk_disable_unprepare(ssp->clk);
@@ -1518,7 +1488,8 @@ static int pxa2xx_spi_resume(struct device *dev)
                clk_prepare_enable(ssp->clk);
 
        /* Restore LPSS private register bits */
-       lpss_ssp_setup(drv_data);
+       if (is_lpss_ssp(drv_data))
+               lpss_ssp_setup(drv_data);
 
        /* Start the queue running */
        status = spi_master_resume(drv_data->master);
index 6bec59c90cd4be52d4772b5f953c6fa00bb7c173..85a58c9068694fa7a8b80629a455f3b3125efe12 100644 (file)
@@ -115,23 +115,17 @@ struct chip_data {
        void (*cs_control)(u32 command);
 };
 
-#define DEFINE_SSP_REG(reg, off) \
-static inline u32 read_##reg(void const __iomem *p) \
-{ return __raw_readl(p + (off)); } \
-\
-static inline void write_##reg(u32 v, void __iomem *p) \
-{ __raw_writel(v, p + (off)); }
-
-DEFINE_SSP_REG(SSCR0, 0x00)
-DEFINE_SSP_REG(SSCR1, 0x04)
-DEFINE_SSP_REG(SSSR, 0x08)
-DEFINE_SSP_REG(SSITR, 0x0c)
-DEFINE_SSP_REG(SSDR, 0x10)
-DEFINE_SSP_REG(DDS_RATE, 0x28)  /* DDS Clock Rate */
-DEFINE_SSP_REG(SSTO, 0x28)
-DEFINE_SSP_REG(SSPSP, 0x2c)
-DEFINE_SSP_REG(SSITF, SSITF)
-DEFINE_SSP_REG(SSIRF, SSIRF)
+static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
+                                 unsigned reg)
+{
+       return __raw_readl(drv_data->ioaddr + reg);
+}
+
+static  inline void pxa2xx_spi_write(const struct driver_data *drv_data,
+                                    unsigned reg, u32 val)
+{
+       __raw_writel(val, drv_data->ioaddr + reg);
+}
 
 #define START_STATE ((void *)0)
 #define RUNNING_STATE ((void *)1)
@@ -155,13 +149,11 @@ static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
 
 static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
 {
-       void __iomem *reg = drv_data->ioaddr;
-
        if (drv_data->ssp_type == CE4100_SSP ||
            drv_data->ssp_type == QUARK_X1000_SSP)
-               val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
+               val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
 
-       write_SSSR(val, reg);
+       pxa2xx_spi_write(drv_data, SSSR, val);
 }
 
 extern int pxa2xx_spi_flush(struct driver_data *drv_data);
index e7fb5a0d2e8dc35900099cbc471f66b58b3fa088..ff9cdbdb6672371df54b6c59ce54579a46758602 100644 (file)
@@ -337,7 +337,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
 static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 {
        struct spi_qup *controller = spi_master_get_devdata(spi->master);
-       u32 config, iomode, mode;
+       u32 config, iomode, mode, control;
        int ret, n_words, w_size;
 
        if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
@@ -392,6 +392,15 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
 
        writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
 
+       control = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+       if (spi->mode & SPI_CPOL)
+               control |= SPI_IO_C_CLK_IDLE_HIGH;
+       else
+               control &= ~SPI_IO_C_CLK_IDLE_HIGH;
+
+       writel_relaxed(control, controller->base + SPI_IO_CONTROL);
+
        config = readl_relaxed(controller->base + SPI_CONFIG);
 
        if (spi->mode & SPI_LOOP)
index daabbabd26b051744fcc07417c53f4d0a8b03a4f..1a777dc261d6f5bfa2e56dc437fb7d957d2b0891 100644 (file)
@@ -437,6 +437,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
        rs->state &= ~TXBUSY;
        spin_unlock_irqrestore(&rs->lock, flags);
 
+       rxdesc = NULL;
        if (rs->rx) {
                rxconf.direction = rs->dma_rx.direction;
                rxconf.src_addr = rs->dma_rx.addr;
@@ -453,6 +454,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                rxdesc->callback_param = rs;
        }
 
+       txdesc = NULL;
        if (rs->tx) {
                txconf.direction = rs->dma_tx.direction;
                txconf.dst_addr = rs->dma_tx.addr;
@@ -470,7 +472,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
        }
 
        /* rx must be started before tx due to spi instinct */
-       if (rs->rx) {
+       if (rxdesc) {
                spin_lock_irqsave(&rs->lock, flags);
                rs->state |= RXBUSY;
                spin_unlock_irqrestore(&rs->lock, flags);
@@ -478,7 +480,7 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                dma_async_issue_pending(rs->dma_rx.ch);
        }
 
-       if (rs->tx) {
+       if (txdesc) {
                spin_lock_irqsave(&rs->lock, flags);
                rs->state |= TXBUSY;
                spin_unlock_irqrestore(&rs->lock, flags);
index 2071f788c6fb3b8b376cade7b7319f6779a5f1be..46ce47076e63d143f10b298f386877bb960b8f56 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/module.h>
index 37b19836f5cb45fe66eb484b398f84b8c775daa8..9231c34b5a5c73bc9c32175d346cdb4e9c232cd6 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
index 237f2e7a717999087e464c41e329ad43e3ff3455..5a56acf8a43e697f6e569e1ee768bc3a9cf6120a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 #include <linux/kernel.h>
index fc29233d0650904f648a2d08ee9a37fcb5e25a7c..20e800e70442b59753f90957a67eb845abab9dbe 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/clk.h>
index 3ab7a21445fc253406eaf92abb87ce99974ce828..e57eec0b2f46a64f99ad104251baa505396b0555 100644 (file)
@@ -82,6 +82,8 @@ struct sh_msiof_spi_priv {
 #define MDR1_SYNCMD_LR  0x30000000 /*   L/R mode */
 #define MDR1_SYNCAC_SHIFT       25 /* Sync Polarity (1 = Active-low) */
 #define MDR1_BITLSB_SHIFT       24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_DTDL_SHIFT                 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define MDR1_SYNCDL_SHIFT       16 /* Frame Sync Signal Timing Delay */
 #define MDR1_FLD_MASK   0x0000000c /* Frame Sync Signal Interval (0-3) */
 #define MDR1_FLD_SHIFT           2
 #define MDR1_XXSTP      0x00000001 /* Transmission/Reception Stop on FIFO */
@@ -241,42 +243,80 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
 
 static struct {
        unsigned short div;
-       unsigned short scr;
-} const sh_msiof_spi_clk_table[] = {
-       { 1,    SCR_BRPS( 1) | SCR_BRDV_DIV_1 },
-       { 2,    SCR_BRPS( 1) | SCR_BRDV_DIV_2 },
-       { 4,    SCR_BRPS( 1) | SCR_BRDV_DIV_4 },
-       { 8,    SCR_BRPS( 1) | SCR_BRDV_DIV_8 },
-       { 16,   SCR_BRPS( 1) | SCR_BRDV_DIV_16 },
-       { 32,   SCR_BRPS( 1) | SCR_BRDV_DIV_32 },
-       { 64,   SCR_BRPS(32) | SCR_BRDV_DIV_2 },
-       { 128,  SCR_BRPS(32) | SCR_BRDV_DIV_4 },
-       { 256,  SCR_BRPS(32) | SCR_BRDV_DIV_8 },
-       { 512,  SCR_BRPS(32) | SCR_BRDV_DIV_16 },
-       { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
+       unsigned short brdv;
+} const sh_msiof_spi_div_table[] = {
+       { 1,    SCR_BRDV_DIV_1 },
+       { 2,    SCR_BRDV_DIV_2 },
+       { 4,    SCR_BRDV_DIV_4 },
+       { 8,    SCR_BRDV_DIV_8 },
+       { 16,   SCR_BRDV_DIV_16 },
+       { 32,   SCR_BRDV_DIV_32 },
 };
 
 static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
                                      unsigned long parent_rate, u32 spi_hz)
 {
        unsigned long div = 1024;
+       u32 brps, scr;
        size_t k;
 
        if (!WARN_ON(!spi_hz || !parent_rate))
                div = DIV_ROUND_UP(parent_rate, spi_hz);
 
-       /* TODO: make more fine grained */
-
-       for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) {
-               if (sh_msiof_spi_clk_table[k].div >= div)
+       for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
+               brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+               if (brps <= 32) /* max of brdv is 32 */
                        break;
        }
 
-       k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
+       k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
 
-       sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
+       scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
+       sh_msiof_write(p, TSCR, scr);
        if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
-               sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+               sh_msiof_write(p, RSCR, scr);
+}
+
+static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
+{
+       /*
+        * DTDL/SYNCDL bit      : p->info->dtdl or p->info->syncdl
+        * b'000                : 0
+        * b'001                : 100
+        * b'010                : 200
+        * b'011 (SYNCDL only)  : 300
+        * b'101                : 50
+        * b'110                : 150
+        */
+       if (dtdl_or_syncdl % 100)
+               return dtdl_or_syncdl / 100 + 5;
+       else
+               return dtdl_or_syncdl / 100;
+}
+
+static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
+{
+       u32 val;
+
+       if (!p->info)
+               return 0;
+
+       /* check if DTDL and SYNCDL is allowed value */
+       if (p->info->dtdl > 200 || p->info->syncdl > 300) {
+               dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
+               return 0;
+       }
+
+       /* check if the sum of DTDL and SYNCDL becomes an integer value  */
+       if ((p->info->dtdl + p->info->syncdl) % 100) {
+               dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
+               return 0;
+       }
+
+       val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
+       val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+
+       return val;
 }
 
 static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -296,6 +336,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
        tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
        tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
        tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+       tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
        sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
        if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
                /* These bits are reserved if RX needs TX */
@@ -501,7 +542,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
                gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 
 
-       pm_runtime_put_sync(&p->pdev->dev);
+       pm_runtime_put(&p->pdev->dev);
 
        return 0;
 }
@@ -595,8 +636,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
        }
 
        /* wait for tx fifo to be emptied / rx fifo to be filled */
-       ret = wait_for_completion_timeout(&p->done, HZ);
-       if (!ret) {
+       if (!wait_for_completion_timeout(&p->done, HZ)) {
                dev_err(&p->pdev->dev, "PIO timeout\n");
                ret = -ETIMEDOUT;
                goto stop_reset;
@@ -706,8 +746,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
        }
 
        /* wait for tx fifo to be emptied / rx fifo to be filled */
-       ret = wait_for_completion_timeout(&p->done, HZ);
-       if (!ret) {
+       if (!wait_for_completion_timeout(&p->done, HZ)) {
                dev_err(&p->pdev->dev, "DMA timeout\n");
                ret = -ETIMEDOUT;
                goto stop_reset;
@@ -957,6 +996,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
                                        &info->tx_fifo_override);
        of_property_read_u32(np, "renesas,rx-fifo-size",
                                        &info->rx_fifo_override);
+       of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
+       of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
 
        info->num_chipselect = num_cs;
 
index 1cfc906dd1741a4faaabb445fb2a2f0b586bb2a3..502501187c9e839ea1222bbffedf5e57acb17a6a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #include <linux/module.h>
index d075191476f00218b271c6466c47a60614cb9bc6..f5715c9f68b0e0cb3dd2f7f568fed38798820796 100644 (file)
@@ -818,7 +818,6 @@ static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
 
 static const struct of_device_id spi_sirfsoc_of_match[] = {
        { .compatible = "sirf,prima2-spi", },
-       { .compatible = "sirf,marco-spi", },
        {}
 };
 MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644 (file)
index 0000000..2faeaa7
--- /dev/null
@@ -0,0 +1,504 @@
+/*
+ *  Copyright (c) 2008-2014 STMicroelectronics Limited
+ *
+ *  Author: Angus Clark <Angus.Clark@st.com>
+ *          Patrice Chotard <patrice.chotard@st.com>
+ *          Lee Jones <lee.jones@linaro.org>
+ *
+ *  SPI master mode controller driver, used in STMicroelectronics devices.
+ *
+ *  May be copied or modified under the terms of the GNU General Public
+ *  License Version 2.0 only.  See linux/COPYING for more information.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+/* SSC registers */
+#define SSC_BRG                                0x000
+#define SSC_TBUF                       0x004
+#define SSC_RBUF                       0x008
+#define SSC_CTL                                0x00C
+#define SSC_IEN                                0x010
+#define SSC_I2C                                0x018
+
+/* SSC Control */
+#define SSC_CTL_DATA_WIDTH_9           0x8
+#define SSC_CTL_DATA_WIDTH_MSK         0xf
+#define SSC_CTL_BM                     0xf
+#define SSC_CTL_HB                     BIT(4)
+#define SSC_CTL_PH                     BIT(5)
+#define SSC_CTL_PO                     BIT(6)
+#define SSC_CTL_SR                     BIT(7)
+#define SSC_CTL_MS                     BIT(8)
+#define SSC_CTL_EN                     BIT(9)
+#define SSC_CTL_LPB                    BIT(10)
+#define SSC_CTL_EN_TX_FIFO             BIT(11)
+#define SSC_CTL_EN_RX_FIFO             BIT(12)
+#define SSC_CTL_EN_CLST_RX             BIT(13)
+
+/* SSC Interrupt Enable */
+#define SSC_IEN_TEEN                   BIT(2)
+
+#define FIFO_SIZE                      8
+
+struct spi_st {
+       /* SSC SPI Controller */
+       void __iomem            *base;
+       struct clk              *clk;
+       struct device           *dev;
+
+       /* SSC SPI current transaction */
+       const u8                *tx_ptr;
+       u8                      *rx_ptr;
+       u16                     bytes_per_word;
+       unsigned int            words_remaining;
+       unsigned int            baud;
+       struct completion       done;
+};
+
+static int spi_st_clk_enable(struct spi_st *spi_st)
+{
+       /*
+        * Current platforms use one of the core clocks for SPI and I2C.
+        * If we attempt to disable the clock, the system will hang.
+        *
+        * TODO: Remove this when platform supports power domains.
+        */
+       return 0;
+
+       return clk_prepare_enable(spi_st->clk);
+}
+
+static void spi_st_clk_disable(struct spi_st *spi_st)
+{
+       /*
+        * Current platforms use one of the core clocks for SPI and I2C.
+        * If we attempt to disable the clock, the system will hang.
+        *
+        * TODO: Remove this when platform supports power domains.
+        */
+       return;
+
+       clk_disable_unprepare(spi_st->clk);
+}
+
+/* Load the TX FIFO */
+static void ssc_write_tx_fifo(struct spi_st *spi_st)
+{
+       unsigned int count, i;
+       uint32_t word = 0;
+
+       if (spi_st->words_remaining > FIFO_SIZE)
+               count = FIFO_SIZE;
+       else
+               count = spi_st->words_remaining;
+
+       for (i = 0; i < count; i++) {
+               if (spi_st->tx_ptr) {
+                       if (spi_st->bytes_per_word == 1) {
+                               word = *spi_st->tx_ptr++;
+                       } else {
+                               word = *spi_st->tx_ptr++;
+                               word = *spi_st->tx_ptr++ | (word << 8);
+                       }
+               }
+               writel_relaxed(word, spi_st->base + SSC_TBUF);
+       }
+}
+
+/* Read the RX FIFO */
+static void ssc_read_rx_fifo(struct spi_st *spi_st)
+{
+       unsigned int count, i;
+       uint32_t word = 0;
+
+       if (spi_st->words_remaining > FIFO_SIZE)
+               count = FIFO_SIZE;
+       else
+               count = spi_st->words_remaining;
+
+       for (i = 0; i < count; i++) {
+               word = readl_relaxed(spi_st->base + SSC_RBUF);
+
+               if (spi_st->rx_ptr) {
+                       if (spi_st->bytes_per_word == 1) {
+                               *spi_st->rx_ptr++ = (uint8_t)word;
+                       } else {
+                               *spi_st->rx_ptr++ = (word >> 8);
+                               *spi_st->rx_ptr++ = word & 0xff;
+                       }
+               }
+       }
+       spi_st->words_remaining -= count;
+}
+
+static int spi_st_transfer_one(struct spi_master *master,
+                              struct spi_device *spi, struct spi_transfer *t)
+{
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+       uint32_t ctl = 0;
+
+       /* Setup transfer */
+       spi_st->tx_ptr = t->tx_buf;
+       spi_st->rx_ptr = t->rx_buf;
+
+       if (spi->bits_per_word > 8) {
+               /*
+                * Anything greater than 8 bits-per-word requires 2
+                * bytes-per-word in the RX/TX buffers
+                */
+               spi_st->bytes_per_word = 2;
+               spi_st->words_remaining = t->len / 2;
+
+       } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
+               /*
+                * If transfer is even-length, and 8 bits-per-word, then
+                * implement as half-length 16 bits-per-word transfer
+                */
+               spi_st->bytes_per_word = 2;
+               spi_st->words_remaining = t->len / 2;
+
+               /* Set SSC_CTL to 16 bits-per-word */
+               ctl = readl_relaxed(spi_st->base + SSC_CTL);
+               writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
+
+               readl_relaxed(spi_st->base + SSC_RBUF);
+
+       } else {
+               spi_st->bytes_per_word = 1;
+               spi_st->words_remaining = t->len;
+       }
+
+       reinit_completion(&spi_st->done);
+
+       /* Start transfer by writing to the TX FIFO */
+       ssc_write_tx_fifo(spi_st);
+       writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
+
+       /* Wait for transfer to complete */
+       wait_for_completion(&spi_st->done);
+
+       /* Restore SSC_CTL if necessary */
+       if (ctl)
+               writel_relaxed(ctl, spi_st->base + SSC_CTL);
+
+       spi_finalize_current_transfer(spi->master);
+
+       return t->len;
+}
+
+static void spi_st_cleanup(struct spi_device *spi)
+{
+       int cs = spi->cs_gpio;
+
+       if (gpio_is_valid(cs))
+               devm_gpio_free(&spi->dev, cs);
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS  (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
+static int spi_st_setup(struct spi_device *spi)
+{
+       struct spi_st *spi_st = spi_master_get_devdata(spi->master);
+       u32 spi_st_clk, sscbrg, var;
+       u32 hz = spi->max_speed_hz;
+       int cs = spi->cs_gpio;
+       int ret;
+
+       if (!hz)  {
+               dev_err(&spi->dev, "max_speed_hz unspecified\n");
+               return -EINVAL;
+       }
+
+       if (!gpio_is_valid(cs)) {
+               dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
+               return -EINVAL;
+       }
+
+       if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
+               dev_err(&spi->dev, "could not request gpio:%d\n", cs);
+               return -EINVAL;
+       }
+
+       ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
+       if (ret)
+               return ret;
+
+       spi_st_clk = clk_get_rate(spi_st->clk);
+
+       /* Set SSC_BRF */
+       sscbrg = spi_st_clk / (2 * hz);
+       if (sscbrg < 0x07 || sscbrg > BIT(16)) {
+               dev_err(&spi->dev,
+                       "baudrate %d outside valid range %d\n", sscbrg, hz);
+               return -EINVAL;
+       }
+
+       spi_st->baud = spi_st_clk / (2 * sscbrg);
+       if (sscbrg == BIT(16)) /* 16-bit counter wraps */
+               sscbrg = 0x0;
+
+       writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
+
+       dev_dbg(&spi->dev,
+               "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
+               hz, spi_st->baud, sscbrg);
+
+        /* Set SSC_CTL and enable SSC */
+        var = readl_relaxed(spi_st->base + SSC_CTL);
+        var |= SSC_CTL_MS;
+
+        if (spi->mode & SPI_CPOL)
+               var |= SSC_CTL_PO;
+        else
+               var &= ~SSC_CTL_PO;
+
+        if (spi->mode & SPI_CPHA)
+               var |= SSC_CTL_PH;
+        else
+               var &= ~SSC_CTL_PH;
+
+        if ((spi->mode & SPI_LSB_FIRST) == 0)
+               var |= SSC_CTL_HB;
+        else
+               var &= ~SSC_CTL_HB;
+
+        if (spi->mode & SPI_LOOP)
+               var |= SSC_CTL_LPB;
+        else
+               var &= ~SSC_CTL_LPB;
+
+        var &= ~SSC_CTL_DATA_WIDTH_MSK;
+        var |= (spi->bits_per_word - 1);
+
+        var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
+        var |= SSC_CTL_EN;
+
+        writel_relaxed(var, spi_st->base + SSC_CTL);
+
+        /* Clear the status register */
+        readl_relaxed(spi_st->base + SSC_RBUF);
+
+        return 0;
+}
+
+/* Interrupt fired when TX shift register becomes empty */
+static irqreturn_t spi_st_irq(int irq, void *dev_id)
+{
+       struct spi_st *spi_st = (struct spi_st *)dev_id;
+
+       /* Read RX FIFO */
+       ssc_read_rx_fifo(spi_st);
+
+       /* Fill TX FIFO */
+       if (spi_st->words_remaining) {
+               ssc_write_tx_fifo(spi_st);
+       } else {
+               /* TX/RX complete */
+               writel_relaxed(0x0, spi_st->base + SSC_IEN);
+               /*
+                * read SSC_IEN to ensure that this bit is set
+                * before re-enabling interrupt
+                */
+               readl(spi_st->base + SSC_IEN);
+               complete(&spi_st->done);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int spi_st_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct spi_master *master;
+       struct resource *res;
+       struct spi_st *spi_st;
+       int irq, ret = 0;
+       u32 var;
+
+       master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
+       if (!master)
+               return -ENOMEM;
+
+       master->dev.of_node             = np;
+       master->mode_bits               = MODEBITS;
+       master->setup                   = spi_st_setup;
+       master->cleanup                 = spi_st_cleanup;
+       master->transfer_one            = spi_st_transfer_one;
+       master->bits_per_word_mask      = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+       master->auto_runtime_pm         = true;
+       master->bus_num                 = pdev->id;
+       spi_st                          = spi_master_get_devdata(master);
+
+       spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
+       if (IS_ERR(spi_st->clk)) {
+               dev_err(&pdev->dev, "Unable to request clock\n");
+               return PTR_ERR(spi_st->clk);
+       }
+
+       ret = spi_st_clk_enable(spi_st);
+       if (ret)
+               return ret;
+
+       init_completion(&spi_st->done);
+
+       /* Get resources */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       spi_st->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(spi_st->base)) {
+               ret = PTR_ERR(spi_st->base);
+               goto clk_disable;
+       }
+
+       /* Disable I2C and Reset SSC */
+       writel_relaxed(0x0, spi_st->base + SSC_I2C);
+       var = readw_relaxed(spi_st->base + SSC_CTL);
+       var |= SSC_CTL_SR;
+       writel_relaxed(var, spi_st->base + SSC_CTL);
+
+       udelay(1);
+       var = readl_relaxed(spi_st->base + SSC_CTL);
+       var &= ~SSC_CTL_SR;
+       writel_relaxed(var, spi_st->base + SSC_CTL);
+
+       /* Set SSC into slave mode before reconfiguring PIO pins */
+       var = readl_relaxed(spi_st->base + SSC_CTL);
+       var &= ~SSC_CTL_MS;
+       writel_relaxed(var, spi_st->base + SSC_CTL);
+
+       irq = irq_of_parse_and_map(np, 0);
+       if (!irq) {
+               dev_err(&pdev->dev, "IRQ missing or invalid\n");
+               ret = -EINVAL;
+               goto clk_disable;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
+                              pdev->name, spi_st);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
+               goto clk_disable;
+       }
+
+       /* by default the device is on */
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
+       platform_set_drvdata(pdev, master);
+
+       ret = devm_spi_register_master(&pdev->dev, master);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to register master\n");
+               goto clk_disable;
+       }
+
+       return 0;
+
+clk_disable:
+       spi_st_clk_disable(spi_st);
+
+       return ret;
+}
+
+static int spi_st_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+
+       spi_st_clk_disable(spi_st);
+
+       pinctrl_pm_select_sleep_state(&pdev->dev);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_st_runtime_suspend(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+
+       writel_relaxed(0, spi_st->base + SSC_IEN);
+       pinctrl_pm_select_sleep_state(dev);
+
+       spi_st_clk_disable(spi_st);
+
+       return 0;
+}
+
+static int spi_st_runtime_resume(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct spi_st *spi_st = spi_master_get_devdata(master);
+       int ret;
+
+       ret = spi_st_clk_enable(spi_st);
+       pinctrl_pm_select_default_state(dev);
+
+       return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_st_suspend(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       int ret;
+
+       ret = spi_master_suspend(master);
+       if (ret)
+               return ret;
+
+       return pm_runtime_force_suspend(dev);
+}
+
+static int spi_st_resume(struct device *dev)
+{
+       struct spi_master *master = dev_get_drvdata(dev);
+       int ret;
+
+       ret = spi_master_resume(master);
+       if (ret)
+               return ret;
+
+       return pm_runtime_force_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops spi_st_pm = {
+       SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
+       SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
+};
+
+static struct of_device_id stm_spi_match[] = {
+       { .compatible = "st,comms-ssc4-spi", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, stm_spi_match);
+
+static struct platform_driver spi_st_driver = {
+       .driver = {
+               .name = "spi-st",
+               .pm = &spi_st_pm,
+               .of_match_table = of_match_ptr(stm_spi_match),
+       },
+       .probe = spi_st_probe,
+       .remove = spi_st_remove,
+};
+module_platform_driver(spi_st_driver);
+
+MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
+MODULE_DESCRIPTION("STM SSC SPI driver");
+MODULE_LICENSE("GPL v2");
index 6146c4cd6583df60f72f0c526b4becb7e682db08..884a716e50cb822ce49dcb0e925a2c1856130bcb 100644 (file)
@@ -201,7 +201,7 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
 
 static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 {
-       int wlen, count, ret;
+       int wlen, count;
        unsigned int cmd;
        const u8 *txbuf;
 
@@ -230,9 +230,8 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
                }
 
                ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
-               ret = wait_for_completion_timeout(&qspi->transfer_complete,
-                                                 QSPI_COMPLETION_TIMEOUT);
-               if (ret == 0) {
+               if (!wait_for_completion_timeout(&qspi->transfer_complete,
+                                                QSPI_COMPLETION_TIMEOUT)) {
                        dev_err(qspi->dev, "write timed out\n");
                        return -ETIMEDOUT;
                }
@@ -245,7 +244,7 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 
 static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
 {
-       int wlen, count, ret;
+       int wlen, count;
        unsigned int cmd;
        u8 *rxbuf;
 
@@ -268,9 +267,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        while (count) {
                dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
                ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
-               ret = wait_for_completion_timeout(&qspi->transfer_complete,
-                               QSPI_COMPLETION_TIMEOUT);
-               if (ret == 0) {
+               if (!wait_for_completion_timeout(&qspi->transfer_complete,
+                                                QSPI_COMPLETION_TIMEOUT)) {
                        dev_err(qspi->dev, "read timed out\n");
                        return -ETIMEDOUT;
                }
index be692ad504423e42beed4800fb9fadc90a3b3b3f..93dfcee0f987b705be1329893c950ad19989ccd8 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307, USA.
  */
 
 #include <linux/delay.h>
index 79bd84f43430d8c718a1369f167337053b328309..133f53a9c1d4eb9b5235b88de03dac9c5d2ea27e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/spi/xilinx_spi.h>
 #include <linux/io.h>
 
+#define XILINX_SPI_MAX_CS      32
+
 #define XILINX_SPI_NAME "xilinx_spi"
 
 /* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
@@ -34,7 +36,8 @@
 #define XSPI_CR_MASTER_MODE    0x04
 #define XSPI_CR_CPOL           0x08
 #define XSPI_CR_CPHA           0x10
-#define XSPI_CR_MODE_MASK      (XSPI_CR_CPHA | XSPI_CR_CPOL)
+#define XSPI_CR_MODE_MASK      (XSPI_CR_CPHA | XSPI_CR_CPOL | \
+                                XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
 #define XSPI_CR_TXFIFO_RESET   0x20
 #define XSPI_CR_RXFIFO_RESET   0x40
 #define XSPI_CR_MANUAL_SSELECT 0x80
@@ -85,12 +88,11 @@ struct xilinx_spi {
 
        u8 *rx_ptr;             /* pointer in the Tx buffer */
        const u8 *tx_ptr;       /* pointer in the Rx buffer */
-       int remaining_bytes;    /* the number of bytes left to transfer */
-       u8 bits_per_word;
+       u8 bytes_per_word;
+       int buffer_size;        /* buffer size in words */
+       u32 cs_inactive;        /* Level of the CS pins when inactive*/
        unsigned int (*read_fn)(void __iomem *);
        void (*write_fn)(u32, void __iomem *);
-       void (*tx_fn)(struct xilinx_spi *);
-       void (*rx_fn)(struct xilinx_spi *);
 };
 
 static void xspi_write32(u32 val, void __iomem *addr)
@@ -113,49 +115,51 @@ static unsigned int xspi_read32_be(void __iomem *addr)
        return ioread32be(addr);
 }
 
-static void xspi_tx8(struct xilinx_spi *xspi)
+static void xilinx_spi_tx(struct xilinx_spi *xspi)
 {
-       xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
-       xspi->tx_ptr++;
-}
-
-static void xspi_tx16(struct xilinx_spi *xspi)
-{
-       xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
-       xspi->tx_ptr += 2;
-}
+       u32 data = 0;
 
-static void xspi_tx32(struct xilinx_spi *xspi)
-{
-       xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
-       xspi->tx_ptr += 4;
-}
-
-static void xspi_rx8(struct xilinx_spi *xspi)
-{
-       u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-       if (xspi->rx_ptr) {
-               *xspi->rx_ptr = data & 0xff;
-               xspi->rx_ptr++;
+       if (!xspi->tx_ptr) {
+               xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+               return;
        }
-}
 
-static void xspi_rx16(struct xilinx_spi *xspi)
-{
-       u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-       if (xspi->rx_ptr) {
-               *(u16 *)(xspi->rx_ptr) = data & 0xffff;
-               xspi->rx_ptr += 2;
+       switch (xspi->bytes_per_word) {
+       case 1:
+               data = *(u8 *)(xspi->tx_ptr);
+               break;
+       case 2:
+               data = *(u16 *)(xspi->tx_ptr);
+               break;
+       case 4:
+               data = *(u32 *)(xspi->tx_ptr);
+               break;
        }
+
+       xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
+       xspi->tx_ptr += xspi->bytes_per_word;
 }
 
-static void xspi_rx32(struct xilinx_spi *xspi)
+static void xilinx_spi_rx(struct xilinx_spi *xspi)
 {
        u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
-       if (xspi->rx_ptr) {
+
+       if (!xspi->rx_ptr)
+               return;
+
+       switch (xspi->bytes_per_word) {
+       case 1:
+               *(u8 *)(xspi->rx_ptr) = data;
+               break;
+       case 2:
+               *(u16 *)(xspi->rx_ptr) = data;
+               break;
+       case 4:
                *(u32 *)(xspi->rx_ptr) = data;
-               xspi->rx_ptr += 4;
+               break;
        }
+
+       xspi->rx_ptr += xspi->bytes_per_word;
 }
 
 static void xspi_init_hw(struct xilinx_spi *xspi)
@@ -165,46 +169,56 @@ static void xspi_init_hw(struct xilinx_spi *xspi)
        /* Reset the SPI device */
        xspi->write_fn(XIPIF_V123B_RESET_MASK,
                regs_base + XIPIF_V123B_RESETR_OFFSET);
-       /* Disable all the interrupts just in case */
-       xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
-       /* Enable the global IPIF interrupt */
-       xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
-               regs_base + XIPIF_V123B_DGIER_OFFSET);
+       /* Enable the transmit empty interrupt, which we use to determine
+        * progress on the transmission.
+        */
+       xspi->write_fn(XSPI_INTR_TX_EMPTY,
+                       regs_base + XIPIF_V123B_IIER_OFFSET);
+       /* Disable the global IPIF interrupt */
+       xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
        /* Deselect the slave on the SPI bus */
        xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
        /* Disable the transmitter, enable Manual Slave Select Assertion,
         * put SPI controller into master mode, and enable it */
-       xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
-               XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
-               XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET);
+       xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
+               XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
+               regs_base + XSPI_CR_OFFSET);
 }
 
 static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
 {
        struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+       u16 cr;
+       u32 cs;
 
        if (is_on == BITBANG_CS_INACTIVE) {
                /* Deselect the slave on the SPI bus */
-               xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET);
-       } else if (is_on == BITBANG_CS_ACTIVE) {
-               /* Set the SPI clock phase and polarity */
-               u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
-                        & ~XSPI_CR_MODE_MASK;
-               if (spi->mode & SPI_CPHA)
-                       cr |= XSPI_CR_CPHA;
-               if (spi->mode & SPI_CPOL)
-                       cr |= XSPI_CR_CPOL;
-               xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
-
-               /* We do not check spi->max_speed_hz here as the SPI clock
-                * frequency is not software programmable (the IP block design
-                * parameter)
-                */
-
-               /* Activate the chip select */
-               xspi->write_fn(~(0x0001 << spi->chip_select),
-                       xspi->regs + XSPI_SSR_OFFSET);
+               xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
+               return;
        }
+
+       /* Set the SPI clock phase and polarity */
+       cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
+       if (spi->mode & SPI_CPHA)
+               cr |= XSPI_CR_CPHA;
+       if (spi->mode & SPI_CPOL)
+               cr |= XSPI_CR_CPOL;
+       if (spi->mode & SPI_LSB_FIRST)
+               cr |= XSPI_CR_LSB_FIRST;
+       if (spi->mode & SPI_LOOP)
+               cr |= XSPI_CR_LOOP;
+       xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+       /* We do not check spi->max_speed_hz here as the SPI clock
+        * frequency is not software programmable (the IP block design
+        * parameter)
+        */
+
+       cs = xspi->cs_inactive;
+       cs ^= BIT(spi->chip_select);
+
+       /* Activate the chip select */
+       xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
 }
 
 /* spi_bitbang requires custom setup_transfer() to be defined if there is a
@@ -213,85 +227,85 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
 static int xilinx_spi_setup_transfer(struct spi_device *spi,
                struct spi_transfer *t)
 {
-       return 0;
-}
+       struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
 
-static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
-{
-       u8 sr;
+       if (spi->mode & SPI_CS_HIGH)
+               xspi->cs_inactive &= ~BIT(spi->chip_select);
+       else
+               xspi->cs_inactive |= BIT(spi->chip_select);
 
-       /* Fill the Tx FIFO with as many bytes as possible */
-       sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-       while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
-               if (xspi->tx_ptr)
-                       xspi->tx_fn(xspi);
-               else
-                       xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
-               xspi->remaining_bytes -= xspi->bits_per_word / 8;
-               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-       }
+       return 0;
 }
 
 static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
 {
        struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
-       u32 ipif_ier;
+       int remaining_words;    /* the number of words left to transfer */
+       bool use_irq = false;
+       u16 cr = 0;
 
        /* We get here with transmitter inhibited */
 
        xspi->tx_ptr = t->tx_buf;
        xspi->rx_ptr = t->rx_buf;
-       xspi->remaining_bytes = t->len;
+       remaining_words = t->len / xspi->bytes_per_word;
        reinit_completion(&xspi->done);
 
+       if (xspi->irq >= 0 &&  remaining_words > xspi->buffer_size) {
+               use_irq = true;
+               xspi->write_fn(XSPI_INTR_TX_EMPTY,
+                               xspi->regs + XIPIF_V123B_IISR_OFFSET);
+               /* Enable the global IPIF interrupt */
+               xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+                               xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+               /* Inhibit irq to avoid spurious irqs on tx_empty*/
+               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+               xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+                              xspi->regs + XSPI_CR_OFFSET);
+       }
 
-       /* Enable the transmit empty interrupt, which we use to determine
-        * progress on the transmission.
-        */
-       ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
-       xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
-               xspi->regs + XIPIF_V123B_IIER_OFFSET);
+       while (remaining_words) {
+               int n_words, tx_words, rx_words;
 
-       for (;;) {
-               u16 cr;
-               u8 sr;
+               n_words = min(remaining_words, xspi->buffer_size);
 
-               xilinx_spi_fill_tx_fifo(xspi);
+               tx_words = n_words;
+               while (tx_words--)
+                       xilinx_spi_tx(xspi);
 
                /* Start the transfer by not inhibiting the transmitter any
                 * longer
                 */
-               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
-                                                       ~XSPI_CR_TRANS_INHIBIT;
-               xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
 
-               wait_for_completion(&xspi->done);
+               if (use_irq) {
+                       xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+                       wait_for_completion(&xspi->done);
+               } else
+                       while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
+                                               XSPI_SR_TX_EMPTY_MASK))
+                               ;
 
                /* A transmit has just completed. Process received data and
                 * check for more data to transmit. Always inhibit the
                 * transmitter while the Isr refills the transmit register/FIFO,
                 * or make sure it is stopped if we're done.
                 */
-               cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
-               xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+               if (use_irq)
+                       xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
                               xspi->regs + XSPI_CR_OFFSET);
 
                /* Read out all the data from the Rx FIFO */
-               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-               while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
-                       xspi->rx_fn(xspi);
-                       sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
-               }
-
-               /* See if there is more data to send */
-               if (xspi->remaining_bytes <= 0)
-                       break;
+               rx_words = n_words;
+               while (rx_words--)
+                       xilinx_spi_rx(xspi);
+
+               remaining_words -= n_words;
        }
 
-       /* Disable the transmit empty interrupt */
-       xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
+       if (use_irq)
+               xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
 
-       return t->len - xspi->remaining_bytes;
+       return t->len;
 }
 
 
@@ -316,6 +330,28 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
+{
+       u8 sr;
+       int n_words = 0;
+
+       /*
+        * Before the buffer_size detection we reset the core
+        * to make sure we start with a clean state.
+        */
+       xspi->write_fn(XIPIF_V123B_RESET_MASK,
+               xspi->regs + XIPIF_V123B_RESETR_OFFSET);
+
+       /* Fill the Tx FIFO with as many words as possible */
+       do {
+               xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+               sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+               n_words++;
+       } while (!(sr & XSPI_SR_TX_FULL_MASK));
+
+       return n_words;
+}
+
 static const struct of_device_id xilinx_spi_of_match[] = {
        { .compatible = "xlnx,xps-spi-2.00.a", },
        { .compatible = "xlnx,xps-spi-2.00.b", },
@@ -348,14 +384,21 @@ static int xilinx_spi_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       if (num_cs > XILINX_SPI_MAX_CS) {
+               dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+               return -EINVAL;
+       }
+
        master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
        if (!master)
                return -ENODEV;
 
        /* the spi->mode bits understood by this driver: */
-       master->mode_bits = SPI_CPOL | SPI_CPHA;
+       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
+                           SPI_CS_HIGH;
 
        xspi = spi_master_get_devdata(master);
+       xspi->cs_inactive = 0xffffffff;
        xspi->bitbang.master = master;
        xspi->bitbang.chipselect = xilinx_spi_chipselect;
        xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
@@ -392,35 +435,20 @@ static int xilinx_spi_probe(struct platform_device *pdev)
        }
 
        master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
-       xspi->bits_per_word = bits_per_word;
-       if (xspi->bits_per_word == 8) {
-               xspi->tx_fn = xspi_tx8;
-               xspi->rx_fn = xspi_rx8;
-       } else if (xspi->bits_per_word == 16) {
-               xspi->tx_fn = xspi_tx16;
-               xspi->rx_fn = xspi_rx16;
-       } else if (xspi->bits_per_word == 32) {
-               xspi->tx_fn = xspi_tx32;
-               xspi->rx_fn = xspi_rx32;
-       } else {
-               ret = -EINVAL;
-               goto put_master;
-       }
-
-       /* SPI controller initializations */
-       xspi_init_hw(xspi);
+       xspi->bytes_per_word = bits_per_word / 8;
+       xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
 
        xspi->irq = platform_get_irq(pdev, 0);
-       if (xspi->irq < 0) {
-               ret = xspi->irq;
-               goto put_master;
+       if (xspi->irq >= 0) {
+               /* Register for SPI Interrupt */
+               ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
+                               dev_name(&pdev->dev), xspi);
+               if (ret)
+                       goto put_master;
        }
 
-       /* Register for SPI Interrupt */
-       ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
-                              dev_name(&pdev->dev), xspi);
-       if (ret)
-               goto put_master;
+       /* SPI controller initializations */
+       xspi_init_hw(xspi);
 
        ret = spi_bitbang_start(&xspi->bitbang);
        if (ret) {
index 66a70e9bc7438d0090b90a815ccea59812603110..c64a3e59fce30a7f9658afcca246a6d6872627da 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/kernel.h>
@@ -788,7 +784,7 @@ static int spi_transfer_one_message(struct spi_master *master,
        struct spi_transfer *xfer;
        bool keep_cs = false;
        int ret = 0;
-       int ms = 1;
+       unsigned long ms = 1;
 
        spi_set_cs(msg->spi, true);
 
@@ -875,31 +871,59 @@ void spi_finalize_current_transfer(struct spi_master *master)
 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
 
 /**
- * spi_pump_messages - kthread work function which processes spi message queue
- * @work: pointer to kthread work struct contained in the master struct
+ * __spi_pump_messages - function which processes spi message queue
+ * @master: master to process queue for
+ * @in_kthread: true if we are in the context of the message pump thread
  *
  * This function checks if there is any spi message in the queue that
  * needs processing and if so call out to the driver to initialize hardware
  * and transfer each message.
  *
+ * Note that it is called both from the kthread itself and also from
+ * inside spi_sync(); the queue extraction handling at the top of the
+ * function should deal with this safely.
  */
-static void spi_pump_messages(struct kthread_work *work)
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 {
-       struct spi_master *master =
-               container_of(work, struct spi_master, pump_messages);
        unsigned long flags;
        bool was_busy = false;
        int ret;
 
-       /* Lock queue and check for queue work */
+       /* Lock queue */
        spin_lock_irqsave(&master->queue_lock, flags);
+
+       /* Make sure we are not already running a message */
+       if (master->cur_msg) {
+               spin_unlock_irqrestore(&master->queue_lock, flags);
+               return;
+       }
+
+       /* If another context is idling the device then defer */
+       if (master->idling) {
+               queue_kthread_work(&master->kworker, &master->pump_messages);
+               spin_unlock_irqrestore(&master->queue_lock, flags);
+               return;
+       }
+
+       /* Check if the queue is idle */
        if (list_empty(&master->queue) || !master->running) {
                if (!master->busy) {
                        spin_unlock_irqrestore(&master->queue_lock, flags);
                        return;
                }
+
+               /* Only do teardown in the thread */
+               if (!in_kthread) {
+                       queue_kthread_work(&master->kworker,
+                                          &master->pump_messages);
+                       spin_unlock_irqrestore(&master->queue_lock, flags);
+                       return;
+               }
+
                master->busy = false;
+               master->idling = true;
                spin_unlock_irqrestore(&master->queue_lock, flags);
+
                kfree(master->dummy_rx);
                master->dummy_rx = NULL;
                kfree(master->dummy_tx);
@@ -913,14 +937,13 @@ static void spi_pump_messages(struct kthread_work *work)
                        pm_runtime_put_autosuspend(master->dev.parent);
                }
                trace_spi_master_idle(master);
-               return;
-       }
 
-       /* Make sure we are not already running a message */
-       if (master->cur_msg) {
+               spin_lock_irqsave(&master->queue_lock, flags);
+               master->idling = false;
                spin_unlock_irqrestore(&master->queue_lock, flags);
                return;
        }
+
        /* Extract head of queue */
        master->cur_msg =
                list_first_entry(&master->queue, struct spi_message, queue);
@@ -985,13 +1008,22 @@ static void spi_pump_messages(struct kthread_work *work)
        }
 }
 
+/**
+ * spi_pump_messages - kthread work function which processes spi message queue
+ * @work: pointer to kthread work struct contained in the master struct
+ */
+static void spi_pump_messages(struct kthread_work *work)
+{
+       struct spi_master *master =
+               container_of(work, struct spi_master, pump_messages);
+
+       __spi_pump_messages(master, true);
+}
+
 static int spi_init_queue(struct spi_master *master)
 {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
 
-       INIT_LIST_HEAD(&master->queue);
-       spin_lock_init(&master->queue_lock);
-
        master->running = false;
        master->busy = false;
 
@@ -1161,12 +1193,9 @@ static int spi_destroy_queue(struct spi_master *master)
        return 0;
 }
 
-/**
- * spi_queued_transfer - transfer function for queued transfers
- * @spi: spi device which is requesting transfer
- * @msg: spi message which is to handled is queued to driver queue
- */
-static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+static int __spi_queued_transfer(struct spi_device *spi,
+                                struct spi_message *msg,
+                                bool need_pump)
 {
        struct spi_master *master = spi->master;
        unsigned long flags;
@@ -1181,13 +1210,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
        msg->status = -EINPROGRESS;
 
        list_add_tail(&msg->queue, &master->queue);
-       if (!master->busy)
+       if (!master->busy && need_pump)
                queue_kthread_work(&master->kworker, &master->pump_messages);
 
        spin_unlock_irqrestore(&master->queue_lock, flags);
        return 0;
 }
 
+/**
+ * spi_queued_transfer - transfer function for queued transfers
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ */
+static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+       return __spi_queued_transfer(spi, msg, true);
+}
+
 static int spi_master_initialize_queue(struct spi_master *master)
 {
        int ret;
@@ -1609,6 +1648,8 @@ int spi_register_master(struct spi_master *master)
                dynamic = 1;
        }
 
+       INIT_LIST_HEAD(&master->queue);
+       spin_lock_init(&master->queue_lock);
        spin_lock_init(&master->bus_lock_spinlock);
        mutex_init(&master->bus_lock_mutex);
        master->bus_lock_flag = 0;
@@ -2114,19 +2155,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
        DECLARE_COMPLETION_ONSTACK(done);
        int status;
        struct spi_master *master = spi->master;
+       unsigned long flags;
+
+       status = __spi_validate(spi, message);
+       if (status != 0)
+               return status;
 
        message->complete = spi_complete;
        message->context = &done;
+       message->spi = spi;
 
        if (!bus_locked)
                mutex_lock(&master->bus_lock_mutex);
 
-       status = spi_async_locked(spi, message);
+       /* If we're not using the legacy transfer method then we will
+        * try to transfer in the calling context so special case.
+        * This code would be less tricky if we could remove the
+        * support for driver implemented message queues.
+        */
+       if (master->transfer == spi_queued_transfer) {
+               spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+               trace_spi_message_submit(message);
+
+               status = __spi_queued_transfer(spi, message, false);
+
+               spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+       } else {
+               status = spi_async_locked(spi, message);
+       }
 
        if (!bus_locked)
                mutex_unlock(&master->bus_lock_mutex);
 
        if (status == 0) {
+               /* Push out the messages in the calling context if we
+                * can.
+                */
+               if (master->transfer == spi_queued_transfer)
+                       __spi_pump_messages(master, false);
+
                wait_for_completion(&done);
                status = message->status;
        }
index 6941e04afb8c4526e329a8096601d0e8098f9721..4eb7a980e67075a018a9be2dd4146927a3f38a6c 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/init.h>
@@ -317,6 +313,37 @@ done:
        return status;
 }
 
+static struct spi_ioc_transfer *
+spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+               unsigned *n_ioc)
+{
+       struct spi_ioc_transfer *ioc;
+       u32     tmp;
+
+       /* Check type, command number and direction */
+       if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
+                       || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
+                       || _IOC_DIR(cmd) != _IOC_WRITE)
+               return ERR_PTR(-ENOTTY);
+
+       tmp = _IOC_SIZE(cmd);
+       if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+               return ERR_PTR(-EINVAL);
+       *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+       if (*n_ioc == 0)
+               return NULL;
+
+       /* copy into scratch area */
+       ioc = kmalloc(tmp, GFP_KERNEL);
+       if (!ioc)
+               return ERR_PTR(-ENOMEM);
+       if (__copy_from_user(ioc, u_ioc, tmp)) {
+               kfree(ioc);
+               return ERR_PTR(-EFAULT);
+       }
+       return ioc;
+}
+
 static long
 spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
@@ -456,32 +483,15 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
        default:
                /* segmented and/or full-duplex I/O request */
-               if (_IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
-                               || _IOC_DIR(cmd) != _IOC_WRITE) {
-                       retval = -ENOTTY;
-                       break;
-               }
-
-               tmp = _IOC_SIZE(cmd);
-               if ((tmp % sizeof(struct spi_ioc_transfer)) != 0) {
-                       retval = -EINVAL;
-                       break;
-               }
-               n_ioc = tmp / sizeof(struct spi_ioc_transfer);
-               if (n_ioc == 0)
-                       break;
-
-               /* copy into scratch area */
-               ioc = kmalloc(tmp, GFP_KERNEL);
-               if (!ioc) {
-                       retval = -ENOMEM;
-                       break;
-               }
-               if (__copy_from_user(ioc, (void __user *)arg, tmp)) {
-                       kfree(ioc);
-                       retval = -EFAULT;
+               /* Check message and copy into scratch area */
+               ioc = spidev_get_ioc_message(cmd,
+                               (struct spi_ioc_transfer __user *)arg, &n_ioc);
+               if (IS_ERR(ioc)) {
+                       retval = PTR_ERR(ioc);
                        break;
                }
+               if (!ioc)
+                       break;  /* n_ioc is also 0 */
 
                /* translate to spi_message, execute */
                retval = spidev_message(spidev, ioc, n_ioc);
@@ -495,9 +505,68 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 }
 
 #ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
+               unsigned long arg)
+{
+       struct spi_ioc_transfer __user  *u_ioc;
+       int                             retval = 0;
+       struct spidev_data              *spidev;
+       struct spi_device               *spi;
+       unsigned                        n_ioc, n;
+       struct spi_ioc_transfer         *ioc;
+
+       u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
+       if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
+               return -EFAULT;
+
+       /* guard against device removal before, or while,
+        * we issue this ioctl.
+        */
+       spidev = filp->private_data;
+       spin_lock_irq(&spidev->spi_lock);
+       spi = spi_dev_get(spidev->spi);
+       spin_unlock_irq(&spidev->spi_lock);
+
+       if (spi == NULL)
+               return -ESHUTDOWN;
+
+       /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+       mutex_lock(&spidev->buf_lock);
+
+       /* Check message and copy into scratch area */
+       ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
+       if (IS_ERR(ioc)) {
+               retval = PTR_ERR(ioc);
+               goto done;
+       }
+       if (!ioc)
+               goto done;      /* n_ioc is also 0 */
+
+       /* Convert buffer pointers */
+       for (n = 0; n < n_ioc; n++) {
+               ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
+               ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
+       }
+
+       /* translate to spi_message, execute */
+       retval = spidev_message(spidev, ioc, n_ioc);
+       kfree(ioc);
+
+done:
+       mutex_unlock(&spidev->buf_lock);
+       spi_dev_put(spi);
+       return retval;
+}
+
 static long
 spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
+       if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
+                       && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
+                       && _IOC_DIR(cmd) == _IOC_WRITE)
+               return spidev_compat_ioc_message(filp, cmd, arg);
+
        return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
 }
 #else
index 930f6010203e96d6aadb3602fea868bf2a1b5bfd..65d610abe06e53fc0ec8cb34ccd367b7492b40f4 100644 (file)
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
                return 0;
        }
 
-       if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+       if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
                CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
                return -EFAULT;
        }
index 093535c6217b46000007f370fb047ea8ee24919c..120b70d72d79849e79fb52f2c8580cf7bf0538a1 100644 (file)
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
 static const struct mfd_cell nvec_devices[] = {
        {
                .name = "nvec-kbd",
-               .id = 1,
        },
        {
                .name = "nvec-mouse",
-               .id = 1,
        },
        {
                .name = "nvec-power",
-               .id = 1,
+               .id = 0,
        },
        {
                .name = "nvec-power",
-               .id = 2,
+               .id = 1,
        },
        {
                .name = "nvec-paz00",
-               .id = 1,
        },
 };
 
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
                nvec_msg_free(nvec, msg);
        }
 
-       ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+       ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
                              ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
        if (ret)
                dev_err(nvec->dev, "error adding subdevices\n");
index de0c9c9d7091903af17a388a99ac7da963374a21..a6315abe7b7cec272f28265157f1e65a24f71ddb 100644 (file)
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
             le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
                return 0;
 
+       /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
+       if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
+            le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
+               return 1;
+
        /* NOTE: can't use usb_match_id() since interface caches
         * aren't set up yet. this is cut/paste from that code.
         */
index 0ffb4ed0a9451af2465beed78e0e30e560299f06..41e510ae8c837ea337135c4ec8ddbe26fccac275 100644 (file)
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
                        USB_QUIRK_IGNORE_REMOTE_WAKEUP },
 
+       /* Protocol and OTG Electrical Test Device */
+       { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+                       USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
        { }  /* terminating entry must be last */
 };
 
index ad43c5bc1ef19eadfc074f42690183fcbcf37cef..02e3e2d4ea5658c0ddd6de8b1cb1729425a3c0ab 100644 (file)
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
        u32 gintsts;
        irqreturn_t retval = IRQ_NONE;
 
+       spin_lock(&hsotg->lock);
+
        if (!dwc2_is_controller_alive(hsotg)) {
                dev_warn(hsotg->dev, "Controller is dead\n");
                goto out;
        }
 
-       spin_lock(&hsotg->lock);
-
        gintsts = dwc2_read_common_intr(hsotg);
        if (gintsts & ~GINTSTS_PRTINT)
                retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
                }
        }
 
-       spin_unlock(&hsotg->lock);
 out:
+       spin_unlock(&hsotg->lock);
        return retval;
 }
 EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
index ccfdfb24b24017e8eda929fb30fb8363311ab7d2..2f9735b3533891c85dc907d8b1cc95a1e50d2559 100644 (file)
@@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list,
                return phy;
        }
 
-       return ERR_PTR(-EPROBE_DEFER);
+       return ERR_PTR(-ENODEV);
 }
 
 static struct usb_phy *__usb_find_phy_dev(struct device *dev,
index 11c7a96764415c4c299c77ddd4a2bc7b0b911967..d684b4b8108ff34a5c4023088d9e927dcb378f6b 100644 (file)
@@ -507,7 +507,7 @@ UNUSUAL_DEV(  0x04e6, 0x000c, 0x0100, 0x0100,
 UNUSUAL_DEV(  0x04e6, 0x000f, 0x0000, 0x9999,
                "SCM Microsystems",
                "eUSB SCSI Adapter (Bus Powered)",
-               USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+               USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
                US_FL_SCM_MULT_TARG ),
 
 UNUSUAL_DEV(  0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV(  0x152d, 0x2329, 0x0100, 0x0100,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
 
+/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
+UNUSUAL_DEV(  0x152d, 0x2566, 0x0114, 0x0114,
+               "JMicron",
+               "USB to ATA/ATAPI Bridge",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA ),
+
 /* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
  * and Mac USB Dock USB-SCSI */
 UNUSUAL_DEV(  0x1645, 0x0007, 0x0100, 0x0133,
index 6df4357d9ee358b36d33961507e8677bd346d432..dbc00e56c7f5c106a67028e1b6da7dfee2c39e5e 100644 (file)
@@ -140,3 +140,10 @@ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
                "External HDD",
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_UAS),
+
+/* Reported-by: Richard Henderson <rth@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+               "SimpleTech",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
index d415d69dc2378cbc7568bbcdcc53e601770ee761..9484d5652ca5a23e051c54b1b0544ce8868f3876 100644 (file)
@@ -650,8 +650,10 @@ static void handle_rx(struct vhost_net *net)
                        break;
                }
                /* TODO: Should check and handle checksum. */
+
+               hdr.num_buffers = cpu_to_vhost16(vq, headcount);
                if (likely(mergeable) &&
-                   memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
+                   memcpy_toiovecend(nvq->hdr, (void *)&hdr.num_buffers,
                                      offsetof(typeof(hdr), num_buffers),
                                      sizeof hdr.num_buffers)) {
                        vq_err(vq, "Failed num_buffers write");
index 1b7893ecc29654f6d20cf420b24ee20dddbf9038..c428871f10934a87aa4b5a7038b8c0519b6bc80a 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1140,6 +1140,13 @@ static long aio_read_events_ring(struct kioctx *ctx,
        long ret = 0;
        int copy_ret;
 
+       /*
+        * The mutex can block and wake us up and that will cause
+        * wait_event_interruptible_hrtimeout() to schedule without sleeping
+        * and repeat. This should be rare enough that it doesn't cause
+        * peformance issues. See the comment in read_events() for more detail.
+        */
+       sched_annotate_sleep();
        mutex_lock(&ctx->ring_lock);
 
        /* Access to ->ring_pages here is protected by ctx->ring_lock. */
index a66768ebc8d19d394f2cd0818d56178a50f84803..80e9c18ea64f69b68f84e3953256654774bd0b7e 100644 (file)
@@ -8,6 +8,7 @@ config BTRFS_FS
        select LZO_DECOMPRESS
        select RAID6_PQ
        select XOR_BLOCKS
+       select SRCU
 
        help
          Btrfs is a general purpose copy-on-write filesystem with extents,
index 2f0fbc374e876f90ee1e07ecfe5704bd4021c570..e427cb7ee12c7d848cd16402d78854e22db969dd 100644 (file)
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        path->search_commit_root = 1;
        path->skip_locking = 1;
 
+       ppath->search_commit_root = 1;
+       ppath->skip_locking = 1;
        /*
         * trigger the readahead for extent tree csum tree and wait for
         * completion. During readahead, the scrub is officially paused
index 9a02da16f2beee18f53d7244cde93f3f11cc9866..1a9585d4380a330f96ba7d82f63cb87314d1701e 100644 (file)
@@ -2591,6 +2591,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        }
 
        if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
+               blk_finish_plug(&plug);
                mutex_unlock(&log_root_tree->log_mutex);
                ret = root_log_ctx.log_ret;
                goto out;
index 9c56ef776407ad28e30760e2a0f0c1caede8fea0..7febcf2475c5ab675c04dfd2fddaa3ed574522a0 100644 (file)
@@ -606,9 +606,11 @@ cifs_security_flags_handle_must_flags(unsigned int *flags)
                *flags = CIFSSEC_MUST_NTLMV2;
        else if ((*flags & CIFSSEC_MUST_NTLM) == CIFSSEC_MUST_NTLM)
                *flags = CIFSSEC_MUST_NTLM;
-       else if ((*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
+       else if (CIFSSEC_MUST_LANMAN &&
+                (*flags & CIFSSEC_MUST_LANMAN) == CIFSSEC_MUST_LANMAN)
                *flags = CIFSSEC_MUST_LANMAN;
-       else if ((*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
+       else if (CIFSSEC_MUST_PLNTXT &&
+                (*flags & CIFSSEC_MUST_PLNTXT) == CIFSSEC_MUST_PLNTXT)
                *flags = CIFSSEC_MUST_PLNTXT;
 
        *flags |= signflags;
index 96b7e9b7706dc58b767863fdeadf3cadfb4e5324..74f12877493ac6c3f87792192b8aa8c1190f3c05 100644 (file)
@@ -366,6 +366,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        struct cifsLockInfo *li, *tmp;
        struct cifs_fid fid;
        struct cifs_pending_open open;
+       bool oplock_break_cancelled;
 
        spin_lock(&cifs_file_list_lock);
        if (--cifs_file->count > 0) {
@@ -397,7 +398,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
        }
        spin_unlock(&cifs_file_list_lock);
 
-       cancel_work_sync(&cifs_file->oplock_break);
+       oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
 
        if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
                struct TCP_Server_Info *server = tcon->ses->server;
@@ -409,6 +410,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
                _free_xid(xid);
        }
 
+       if (oplock_break_cancelled)
+               cifs_done_oplock_break(cifsi);
+
        cifs_del_pending_open(&open);
 
        /*
index 6c1566366a6613cbc494fe46344c8fd00342a82b..a4232ec4f2ba45386b4f25db484f7f30135b01c2 100644 (file)
@@ -221,7 +221,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
        }
 
        rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
-       memset(wpwd, 0, 129 * sizeof(__le16));
+       memzero_explicit(wpwd, sizeof(wpwd));
 
        return rc;
 }
index c8b148bbdc8b574f77660eb35c8feba4baec2e5e..3e193cb36996d06520e2ed98e49a838fdfbab90d 100644 (file)
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
 
 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
                             s64 change, struct gfs2_quota_data *qd,
-                            struct fs_disk_quota *fdq)
+                            struct qc_dqblk *fdq)
 {
        struct inode *inode = &ip->i_inode;
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
        be64_add_cpu(&q.qu_value, change);
        qd->qd_qb.qb_value = q.qu_value;
        if (fdq) {
-               if (fdq->d_fieldmask & FS_DQ_BSOFT) {
-                       q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPC_SOFT) {
+                       q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_warn = q.qu_warn;
                }
-               if (fdq->d_fieldmask & FS_DQ_BHARD) {
-                       q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPC_HARD) {
+                       q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_limit = q.qu_limit;
                }
-               if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
-                       q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPACE) {
+                       q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_value = q.qu_value;
                }
        }
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
 }
 
 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
-                         struct fs_disk_quota *fdq)
+                         struct qc_dqblk *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
        struct gfs2_holder q_gh;
        int error;
 
-       memset(fdq, 0, sizeof(struct fs_disk_quota));
+       memset(fdq, 0, sizeof(*fdq));
 
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
                goto out;
 
        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
-       fdq->d_version = FS_DQUOT_VERSION;
-       fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
-       fdq->d_id = from_kqid_munged(current_user_ns(), qid);
-       fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
-       fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
-       fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
+       fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
+       fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
+       fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
 
        gfs2_glock_dq_uninit(&q_gh);
 out:
@@ -1536,10 +1533,10 @@ out:
 }
 
 /* GFS2 only supports a subset of the XFS fields */
-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
+#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
 
 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
-                         struct fs_disk_quota *fdq)
+                         struct qc_dqblk *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
                goto out_i;
 
        /* If nothing has changed, this is a no-op */
-       if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
-           ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
-               fdq->d_fieldmask ^= FS_DQ_BSOFT;
+       if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
+           ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
+               fdq->d_fieldmask ^= QC_SPC_SOFT;
 
-       if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
-           ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
-               fdq->d_fieldmask ^= FS_DQ_BHARD;
+       if ((fdq->d_fieldmask & QC_SPC_HARD) &&
+           ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
+               fdq->d_fieldmask ^= QC_SPC_HARD;
 
-       if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
-           ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
-               fdq->d_fieldmask ^= FS_DQ_BCOUNT;
+       if ((fdq->d_fieldmask & QC_SPACE) &&
+           ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
+               fdq->d_fieldmask ^= QC_SPACE;
 
        if (fdq->d_fieldmask == 0)
                goto out_i;
index 10bf07280f4ab2715845003334b73d80bde15f44..294692ff83b1a4e024ff217727fe958edabb0e3d 100644 (file)
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
  */
 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
+       struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+       /* we only support swap file calling nfs_direct_IO */
+       if (!IS_SWAPFILE(inode))
+               return 0;
+
 #ifndef CONFIG_NFS_SWAP
        dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
                        iocb->ki_filp, (long long) pos, iter->nr_segs);
index 4bffe637ea3255fd2e1e6812556e3907791f4152..2211f6ba873628485fcabf874adf0c3c985a6d84 100644 (file)
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
 
        nfs_attr_check_mountpoint(sb, fattr);
 
-       if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
-           !nfs_attr_use_mounted_on_fileid(fattr))
+       if (nfs_attr_use_mounted_on_fileid(fattr))
+               fattr->fileid = fattr->mounted_on_fileid;
+       else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
                goto out_no_inode;
        if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
                goto out_no_inode;
index efaa31c70fbe1c43d265c51bc542a8270348c339..b6f34bfa6fe83b271a3b1f6c66e63f9e21587f6d 100644 (file)
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
            (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
             ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
                return 0;
-
-       fattr->fileid = fattr->mounted_on_fileid;
        return 1;
 }
 
index 953daa44a28232d6863da375e59d44a0b42f49b6..706ad10b8186d4401eb4da48ebf0e7fba7686481 100644 (file)
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                        prev = pos;
 
                        status = nfs_wait_client_init_complete(pos);
-                       if (status == 0) {
+                       if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
                                nfs4_schedule_lease_recovery(pos);
                                status = nfs4_wait_clnt_recover(pos);
                        }
index 91093cd74f0da15e7f972d9c695f221e5afb454d..385704027575525474673b78d385d7ead580ef6d 100644 (file)
@@ -141,7 +141,6 @@ enum {
  * @ti_save: Backup of journal_info field of task_struct
  * @ti_flags: Flags
  * @ti_count: Nest level
- * @ti_garbage:        List of inode to be put when releasing semaphore
  */
 struct nilfs_transaction_info {
        u32                     ti_magic;
@@ -150,7 +149,6 @@ struct nilfs_transaction_info {
                                   one of other filesystems has a bug. */
        unsigned short          ti_flags;
        unsigned short          ti_count;
-       struct list_head        ti_garbage;
 };
 
 /* ti_magic */
index 7ef18fc656c28568047c30c24adb0bc029bada5c..469086b9f99bc8e20053e492237d48a3373bdb37 100644 (file)
@@ -305,7 +305,6 @@ static void nilfs_transaction_lock(struct super_block *sb,
        ti->ti_count = 0;
        ti->ti_save = cur_ti;
        ti->ti_magic = NILFS_TI_MAGIC;
-       INIT_LIST_HEAD(&ti->ti_garbage);
        current->journal_info = ti;
 
        for (;;) {
@@ -332,8 +331,6 @@ static void nilfs_transaction_unlock(struct super_block *sb)
 
        up_write(&nilfs->ns_segctor_sem);
        current->journal_info = ti->ti_save;
-       if (!list_empty(&ti->ti_garbage))
-               nilfs_dispose_list(nilfs, &ti->ti_garbage, 0);
 }
 
 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
@@ -746,6 +743,15 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs,
        }
 }
 
+static void nilfs_iput_work_func(struct work_struct *work)
+{
+       struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
+                                                sc_iput_work);
+       struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
+
+       nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
+}
+
 static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
                                     struct nilfs_root *root)
 {
@@ -1900,8 +1906,8 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
 static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                                             struct the_nilfs *nilfs)
 {
-       struct nilfs_transaction_info *ti = current->journal_info;
        struct nilfs_inode_info *ii, *n;
+       int defer_iput = false;
 
        spin_lock(&nilfs->ns_inode_lock);
        list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
@@ -1912,9 +1918,24 @@ static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
                clear_bit(NILFS_I_BUSY, &ii->i_state);
                brelse(ii->i_bh);
                ii->i_bh = NULL;
-               list_move_tail(&ii->i_dirty, &ti->ti_garbage);
+               list_del_init(&ii->i_dirty);
+               if (!ii->vfs_inode.i_nlink) {
+                       /*
+                        * Defer calling iput() to avoid a deadlock
+                        * over I_SYNC flag for inodes with i_nlink == 0
+                        */
+                       list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
+                       defer_iput = true;
+               } else {
+                       spin_unlock(&nilfs->ns_inode_lock);
+                       iput(&ii->vfs_inode);
+                       spin_lock(&nilfs->ns_inode_lock);
+               }
        }
        spin_unlock(&nilfs->ns_inode_lock);
+
+       if (defer_iput)
+               schedule_work(&sci->sc_iput_work);
 }
 
 /*
@@ -2583,6 +2604,8 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
        INIT_LIST_HEAD(&sci->sc_segbufs);
        INIT_LIST_HEAD(&sci->sc_write_logs);
        INIT_LIST_HEAD(&sci->sc_gc_inodes);
+       INIT_LIST_HEAD(&sci->sc_iput_queue);
+       INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
        init_timer(&sci->sc_timer);
 
        sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
@@ -2609,6 +2632,8 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
                ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
                nilfs_transaction_unlock(sci->sc_super);
 
+               flush_work(&sci->sc_iput_work);
+
        } while (ret && retrycount-- > 0);
 }
 
@@ -2633,6 +2658,9 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
                || sci->sc_seq_request != sci->sc_seq_done);
        spin_unlock(&sci->sc_state_lock);
 
+       if (flush_work(&sci->sc_iput_work))
+               flag = true;
+
        if (flag || !nilfs_segctor_confirm(sci))
                nilfs_segctor_write_out(sci);
 
@@ -2642,6 +2670,12 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
                nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
        }
 
+       if (!list_empty(&sci->sc_iput_queue)) {
+               nilfs_warning(sci->sc_super, __func__,
+                             "iput queue is not empty\n");
+               nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
+       }
+
        WARN_ON(!list_empty(&sci->sc_segbufs));
        WARN_ON(!list_empty(&sci->sc_write_logs));
 
index 38a1d0013314395938ceb78fd1f1e906705206f1..a48d6de1e02cc276019fb150fee8e0bc6c41bcbe 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/types.h>
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
+#include <linux/workqueue.h>
 #include <linux/nilfs2_fs.h>
 #include "nilfs.h"
 
@@ -92,6 +93,8 @@ struct nilfs_segsum_pointer {
  * @sc_nblk_inc: Block count of current generation
  * @sc_dirty_files: List of files to be written
  * @sc_gc_inodes: List of GC inodes having blocks to be written
+ * @sc_iput_queue: list of inodes for which iput should be done
+ * @sc_iput_work: work struct to defer iput call
  * @sc_freesegs: array of segment numbers to be freed
  * @sc_nfreesegs: number of segments on @sc_freesegs
  * @sc_dsync_inode: inode whose data pages are written for a sync operation
@@ -135,6 +138,8 @@ struct nilfs_sc_info {
 
        struct list_head        sc_dirty_files;
        struct list_head        sc_gc_inodes;
+       struct list_head        sc_iput_queue;
+       struct work_struct      sc_iput_work;
 
        __u64                  *sc_freesegs;
        size_t                  sc_nfreesegs;
index 22c629eedd82d70425704ee86b4ddf816b7bd174..2a24249b30af845d4514552da47fb94e70765db3 100644 (file)
@@ -1,5 +1,6 @@
 config FSNOTIFY
        def_bool n
+       select SRCU
 
 source "fs/notify/dnotify/Kconfig"
 source "fs/notify/inotify/Kconfig"
index c51df1dd237e74a0127da81f95d77f570d463b84..4a09975aac907e563182879362f816d36773f481 100644 (file)
@@ -5,6 +5,7 @@
 config QUOTA
        bool "Quota support"
        select QUOTACTL
+       select SRCU
        help
          If you say Y here, you will be able to set per user limits for disk
          usage (also called disk quotas). Currently, it works for the
index 8f0acef3d18481647507d32e1861420007f38e3d..69df5b239844f9395f38d2e3acb8f93f34142c0a 100644 (file)
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space)
 }
 
 /* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
 
        memset(di, 0, sizeof(*di));
-       di->d_version = FS_DQUOT_VERSION;
-       di->d_flags = dquot->dq_id.type == USRQUOTA ?
-                       FS_USER_QUOTA : FS_GROUP_QUOTA;
-       di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
-
        spin_lock(&dq_data_lock);
-       di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
-       di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+       di->d_spc_hardlimit = dm->dqb_bhardlimit;
+       di->d_spc_softlimit = dm->dqb_bsoftlimit;
        di->d_ino_hardlimit = dm->dqb_ihardlimit;
        di->d_ino_softlimit = dm->dqb_isoftlimit;
-       di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
-       di->d_icount = dm->dqb_curinodes;
-       di->d_btimer = dm->dqb_btime;
-       di->d_itimer = dm->dqb_itime;
+       di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+       di->d_ino_count = dm->dqb_curinodes;
+       di->d_spc_timer = dm->dqb_btime;
+       di->d_ino_timer = dm->dqb_itime;
        spin_unlock(&dq_data_lock);
 }
 
 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
-                   struct fs_disk_quota *di)
+                   struct qc_dqblk *di)
 {
        struct dquot *dquot;
 
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
 }
 EXPORT_SYMBOL(dquot_get_dqblk);
 
-#define VFS_FS_DQ_MASK \
-       (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
-        FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
-        FS_DQ_BTIMER | FS_DQ_ITIMER)
+#define VFS_QC_MASK \
+       (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+        QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+        QC_SPC_TIMER | QC_INO_TIMER)
 
 /* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
        int check_blim = 0, check_ilim = 0;
        struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
 
-       if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+       if (di->d_fieldmask & ~VFS_QC_MASK)
                return -EINVAL;
 
-       if (((di->d_fieldmask & FS_DQ_BSOFT) &&
-            (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
-           ((di->d_fieldmask & FS_DQ_BHARD) &&
-            (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
-           ((di->d_fieldmask & FS_DQ_ISOFT) &&
+       if (((di->d_fieldmask & QC_SPC_SOFT) &&
+            stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
+           ((di->d_fieldmask & QC_SPC_HARD) &&
+            stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
+           ((di->d_fieldmask & QC_INO_SOFT) &&
             (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
-           ((di->d_fieldmask & FS_DQ_IHARD) &&
+           ((di->d_fieldmask & QC_INO_HARD) &&
             (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
                return -ERANGE;
 
        spin_lock(&dq_data_lock);
-       if (di->d_fieldmask & FS_DQ_BCOUNT) {
-               dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
+       if (di->d_fieldmask & QC_SPACE) {
+               dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_BSOFT)
-               dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
-       if (di->d_fieldmask & FS_DQ_BHARD)
-               dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
-       if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
+       if (di->d_fieldmask & QC_SPC_SOFT)
+               dm->dqb_bsoftlimit = di->d_spc_softlimit;
+       if (di->d_fieldmask & QC_SPC_HARD)
+               dm->dqb_bhardlimit = di->d_spc_hardlimit;
+       if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ICOUNT) {
-               dm->dqb_curinodes = di->d_icount;
+       if (di->d_fieldmask & QC_INO_COUNT) {
+               dm->dqb_curinodes = di->d_ino_count;
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ISOFT)
+       if (di->d_fieldmask & QC_INO_SOFT)
                dm->dqb_isoftlimit = di->d_ino_softlimit;
-       if (di->d_fieldmask & FS_DQ_IHARD)
+       if (di->d_fieldmask & QC_INO_HARD)
                dm->dqb_ihardlimit = di->d_ino_hardlimit;
-       if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
+       if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_BTIMER) {
-               dm->dqb_btime = di->d_btimer;
+       if (di->d_fieldmask & QC_SPC_TIMER) {
+               dm->dqb_btime = di->d_spc_timer;
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ITIMER) {
-               dm->dqb_itime = di->d_itimer;
+       if (di->d_fieldmask & QC_INO_TIMER) {
+               dm->dqb_itime = di->d_ino_timer;
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
        }
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
                    dm->dqb_curspace < dm->dqb_bsoftlimit) {
                        dm->dqb_btime = 0;
                        clear_bit(DQ_BLKS_B, &dquot->dq_flags);
-               } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
+               } else if (!(di->d_fieldmask & QC_SPC_TIMER))
                        /* Set grace only if user hasn't provided his own... */
                        dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
        }
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
                    dm->dqb_curinodes < dm->dqb_isoftlimit) {
                        dm->dqb_itime = 0;
                        clear_bit(DQ_INODES_B, &dquot->dq_flags);
-               } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
+               } else if (!(di->d_fieldmask & QC_INO_TIMER))
                        /* Set grace only if user hasn't provided his own... */
                        dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
        }
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
 }
 
 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
-                 struct fs_disk_quota *di)
+                 struct qc_dqblk *di)
 {
        struct dquot *dquot;
        int rc;
index 2aa4151f99d2e5e9183adfb34e2c31da24e2faa2..6f3856328eeabd4bbc57b09ca9c8fe01a9827c9c 100644 (file)
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
        return sb->s_qcop->set_info(sb, type, &info);
 }
 
-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+static inline qsize_t qbtos(qsize_t blocks)
+{
+       return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+       return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
 {
        memset(dst, 0, sizeof(*dst));
-       dst->dqb_bhardlimit = src->d_blk_hardlimit;
-       dst->dqb_bsoftlimit = src->d_blk_softlimit;
-       dst->dqb_curspace = src->d_bcount;
+       dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+       dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+       dst->dqb_curspace = src->d_space;
        dst->dqb_ihardlimit = src->d_ino_hardlimit;
        dst->dqb_isoftlimit = src->d_ino_softlimit;
-       dst->dqb_curinodes = src->d_icount;
-       dst->dqb_btime = src->d_btimer;
-       dst->dqb_itime = src->d_itimer;
+       dst->dqb_curinodes = src->d_ino_count;
+       dst->dqb_btime = src->d_spc_timer;
+       dst->dqb_itime = src->d_ino_timer;
        dst->dqb_valid = QIF_ALL;
 }
 
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
                          void __user *addr)
 {
        struct kqid qid;
-       struct fs_disk_quota fdq;
+       struct qc_dqblk fdq;
        struct if_dqblk idq;
        int ret;
 
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
        return 0;
 }
 
-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
 {
-       dst->d_blk_hardlimit = src->dqb_bhardlimit;
-       dst->d_blk_softlimit  = src->dqb_bsoftlimit;
-       dst->d_bcount = src->dqb_curspace;
+       dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+       dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+       dst->d_space = src->dqb_curspace;
        dst->d_ino_hardlimit = src->dqb_ihardlimit;
        dst->d_ino_softlimit = src->dqb_isoftlimit;
-       dst->d_icount = src->dqb_curinodes;
-       dst->d_btimer = src->dqb_btime;
-       dst->d_itimer = src->dqb_itime;
+       dst->d_ino_count = src->dqb_curinodes;
+       dst->d_spc_timer = src->dqb_btime;
+       dst->d_ino_timer = src->dqb_itime;
 
        dst->d_fieldmask = 0;
        if (src->dqb_valid & QIF_BLIMITS)
-               dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+               dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
        if (src->dqb_valid & QIF_SPACE)
-               dst->d_fieldmask |= FS_DQ_BCOUNT;
+               dst->d_fieldmask |= QC_SPACE;
        if (src->dqb_valid & QIF_ILIMITS)
-               dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+               dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
        if (src->dqb_valid & QIF_INODES)
-               dst->d_fieldmask |= FS_DQ_ICOUNT;
+               dst->d_fieldmask |= QC_INO_COUNT;
        if (src->dqb_valid & QIF_BTIME)
-               dst->d_fieldmask |= FS_DQ_BTIMER;
+               dst->d_fieldmask |= QC_SPC_TIMER;
        if (src->dqb_valid & QIF_ITIME)
-               dst->d_fieldmask |= FS_DQ_ITIMER;
+               dst->d_fieldmask |= QC_INO_TIMER;
 }
 
 static int quota_setquota(struct super_block *sb, int type, qid_t id,
                          void __user *addr)
 {
-       struct fs_disk_quota fdq;
+       struct qc_dqblk fdq;
        struct if_dqblk idq;
        struct kqid qid;
 
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
        return ret;
 }
 
+/*
+ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+ * out of there as xfsprogs rely on definitions being in that header file. So
+ * just define same functions here for quota purposes.
+ */
+#define XFS_BB_SHIFT 9
+
+static inline u64 quota_bbtob(u64 blocks)
+{
+       return blocks << XFS_BB_SHIFT;
+}
+
+static inline u64 quota_btobb(u64 bytes)
+{
+       return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+}
+
+static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+{
+       dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+       dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+       dst->d_ino_hardlimit = src->d_ino_hardlimit;
+       dst->d_ino_softlimit = src->d_ino_softlimit;
+       dst->d_space = quota_bbtob(src->d_bcount);
+       dst->d_ino_count = src->d_icount;
+       dst->d_ino_timer = src->d_itimer;
+       dst->d_spc_timer = src->d_btimer;
+       dst->d_ino_warns = src->d_iwarns;
+       dst->d_spc_warns = src->d_bwarns;
+       dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+       dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+       dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+       dst->d_rt_spc_timer = src->d_rtbtimer;
+       dst->d_rt_spc_warns = src->d_rtbwarns;
+       dst->d_fieldmask = 0;
+       if (src->d_fieldmask & FS_DQ_ISOFT)
+               dst->d_fieldmask |= QC_INO_SOFT;
+       if (src->d_fieldmask & FS_DQ_IHARD)
+               dst->d_fieldmask |= QC_INO_HARD;
+       if (src->d_fieldmask & FS_DQ_BSOFT)
+               dst->d_fieldmask |= QC_SPC_SOFT;
+       if (src->d_fieldmask & FS_DQ_BHARD)
+               dst->d_fieldmask |= QC_SPC_HARD;
+       if (src->d_fieldmask & FS_DQ_RTBSOFT)
+               dst->d_fieldmask |= QC_RT_SPC_SOFT;
+       if (src->d_fieldmask & FS_DQ_RTBHARD)
+               dst->d_fieldmask |= QC_RT_SPC_HARD;
+       if (src->d_fieldmask & FS_DQ_BTIMER)
+               dst->d_fieldmask |= QC_SPC_TIMER;
+       if (src->d_fieldmask & FS_DQ_ITIMER)
+               dst->d_fieldmask |= QC_INO_TIMER;
+       if (src->d_fieldmask & FS_DQ_RTBTIMER)
+               dst->d_fieldmask |= QC_RT_SPC_TIMER;
+       if (src->d_fieldmask & FS_DQ_BWARNS)
+               dst->d_fieldmask |= QC_SPC_WARNS;
+       if (src->d_fieldmask & FS_DQ_IWARNS)
+               dst->d_fieldmask |= QC_INO_WARNS;
+       if (src->d_fieldmask & FS_DQ_RTBWARNS)
+               dst->d_fieldmask |= QC_RT_SPC_WARNS;
+       if (src->d_fieldmask & FS_DQ_BCOUNT)
+               dst->d_fieldmask |= QC_SPACE;
+       if (src->d_fieldmask & FS_DQ_ICOUNT)
+               dst->d_fieldmask |= QC_INO_COUNT;
+       if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+               dst->d_fieldmask |= QC_RT_SPACE;
+}
+
 static int quota_setxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
        struct kqid qid;
 
        if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
        qid = make_kqid(current_user_ns(), type, id);
        if (!qid_valid(qid))
                return -EINVAL;
-       return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+       copy_from_xfs_dqblk(&qdq, &fdq);
+       return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+}
+
+static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+                             int type, qid_t id)
+{
+       memset(dst, 0, sizeof(*dst));
+       dst->d_version = FS_DQUOT_VERSION;
+       dst->d_id = id;
+       if (type == USRQUOTA)
+               dst->d_flags = FS_USER_QUOTA;
+       else if (type == PRJQUOTA)
+               dst->d_flags = FS_PROJ_QUOTA;
+       else
+               dst->d_flags = FS_GROUP_QUOTA;
+       dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+       dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+       dst->d_ino_hardlimit = src->d_ino_hardlimit;
+       dst->d_ino_softlimit = src->d_ino_softlimit;
+       dst->d_bcount = quota_btobb(src->d_space);
+       dst->d_icount = src->d_ino_count;
+       dst->d_itimer = src->d_ino_timer;
+       dst->d_btimer = src->d_spc_timer;
+       dst->d_iwarns = src->d_ino_warns;
+       dst->d_bwarns = src->d_spc_warns;
+       dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+       dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+       dst->d_rtbcount = quota_btobb(src->d_rt_space);
+       dst->d_rtbtimer = src->d_rt_spc_timer;
+       dst->d_rtbwarns = src->d_rt_spc_warns;
 }
 
 static int quota_getxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
        struct kqid qid;
        int ret;
 
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
        qid = make_kqid(current_user_ns(), type, id);
        if (!qid_valid(qid))
                return -EINVAL;
-       ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
-       if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
+       ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+       if (ret)
+               return ret;
+       copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+       if (copy_to_user(addr, &fdq, sizeof(fdq)))
                return -EFAULT;
        return ret;
 }
index bb15771b92ae32ae02390179492647a1d87c9dc2..08f3555fbeac3f6ceeda033cb8f6ec82557623d0 100644 (file)
@@ -224,7 +224,7 @@ out:
 static int udf_release_file(struct inode *inode, struct file *filp)
 {
        if (filp->f_mode & FMODE_WRITE &&
-           atomic_read(&inode->i_writecount) > 1) {
+           atomic_read(&inode->i_writecount) == 1) {
                /*
                 * Grab i_mutex to avoid races with writes changing i_size
                 * while we are running.
index 3a07a937e232a7e51bf089f981a664f782f5a75f..41f6c0b9d51cd3dc3d8b22b2fe37fcf78cd064bf 100644 (file)
@@ -166,9 +166,9 @@ extern void         xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
 /* quota ops */
 extern int             xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
 extern int             xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
-                                       uint, struct fs_disk_quota *);
+                                       uint, struct qc_dqblk *);
 extern int             xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
-                                       struct fs_disk_quota *);
+                                       struct qc_dqblk *);
 extern int             xfs_qm_scall_getqstat(struct xfs_mount *,
                                        struct fs_quota_stat *);
 extern int             xfs_qm_scall_getqstatv(struct xfs_mount *,
index 74fca68e43b6b4e98a0d203204d4ce2f646ff8fa..cb6168ec92c9e0763640d5cc4eeff12fc89f6ef0 100644 (file)
@@ -39,7 +39,6 @@ STATIC int    xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
 STATIC int     xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
                                        uint);
 STATIC uint    xfs_qm_export_flags(uint);
-STATIC uint    xfs_qm_export_qtype_flags(uint);
 
 /*
  * Turn off quota accounting and/or enforcement for all udquots and/or
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv(
        return 0;
 }
 
-#define XFS_DQ_MASK \
-       (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+#define XFS_QC_MASK \
+       (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
 
 /*
  * Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim(
        struct xfs_mount        *mp,
        xfs_dqid_t              id,
        uint                    type,
-       fs_disk_quota_t         *newlim)
+       struct qc_dqblk         *newlim)
 {
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        struct xfs_disk_dquot   *ddq;
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim(
        int                     error;
        xfs_qcnt_t              hard, soft;
 
-       if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+       if (newlim->d_fieldmask & ~XFS_QC_MASK)
                return -EINVAL;
-       if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+       if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
                return 0;
 
        /*
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim(
        /*
         * Make sure that hardlimits are >= soft limits before changing.
         */
-       hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+       hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
                        be64_to_cpu(ddq->d_blk_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+       soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
                        be64_to_cpu(ddq->d_blk_softlimit);
        if (hard == 0 || hard >= soft) {
                ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim(
        } else {
                xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
        }
-       hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+       hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
                        be64_to_cpu(ddq->d_rtb_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+       soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
                        be64_to_cpu(ddq->d_rtb_softlimit);
        if (hard == 0 || hard >= soft) {
                ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim(
                xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
        }
 
-       hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+       hard = (newlim->d_fieldmask & QC_INO_HARD) ?
                (xfs_qcnt_t) newlim->d_ino_hardlimit :
                        be64_to_cpu(ddq->d_ino_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+       soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
                (xfs_qcnt_t) newlim->d_ino_softlimit :
                        be64_to_cpu(ddq->d_ino_softlimit);
        if (hard == 0 || hard >= soft) {
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim(
        /*
         * Update warnings counter(s) if requested
         */
-       if (newlim->d_fieldmask & FS_DQ_BWARNS)
-               ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
-       if (newlim->d_fieldmask & FS_DQ_IWARNS)
-               ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
-       if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
-               ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
+       if (newlim->d_fieldmask & QC_SPC_WARNS)
+               ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
+       if (newlim->d_fieldmask & QC_INO_WARNS)
+               ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
+       if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+               ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
 
        if (id == 0) {
                /*
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim(
                 * soft and hard limit values (already done, above), and
                 * for warnings.
                 */
-               if (newlim->d_fieldmask & FS_DQ_BTIMER) {
-                       q->qi_btimelimit = newlim->d_btimer;
-                       ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
+               if (newlim->d_fieldmask & QC_SPC_TIMER) {
+                       q->qi_btimelimit = newlim->d_spc_timer;
+                       ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_ITIMER) {
-                       q->qi_itimelimit = newlim->d_itimer;
-                       ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
+               if (newlim->d_fieldmask & QC_INO_TIMER) {
+                       q->qi_itimelimit = newlim->d_ino_timer;
+                       ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
-                       q->qi_rtbtimelimit = newlim->d_rtbtimer;
-                       ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
+               if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
+                       q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
+                       ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_BWARNS)
-                       q->qi_bwarnlimit = newlim->d_bwarns;
-               if (newlim->d_fieldmask & FS_DQ_IWARNS)
-                       q->qi_iwarnlimit = newlim->d_iwarns;
-               if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
-                       q->qi_rtbwarnlimit = newlim->d_rtbwarns;
+               if (newlim->d_fieldmask & QC_SPC_WARNS)
+                       q->qi_bwarnlimit = newlim->d_spc_warns;
+               if (newlim->d_fieldmask & QC_INO_WARNS)
+                       q->qi_iwarnlimit = newlim->d_ino_warns;
+               if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+                       q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
        } else {
                /*
                 * If the user is now over quota, start the timelimit.
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota(
        struct xfs_mount        *mp,
        xfs_dqid_t              id,
        uint                    type,
-       struct fs_disk_quota    *dst)
+       struct qc_dqblk         *dst)
 {
        struct xfs_dquot        *dqp;
        int                     error;
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota(
        }
 
        memset(dst, 0, sizeof(*dst));
-       dst->d_version = FS_DQUOT_VERSION;
-       dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
-       dst->d_id = be32_to_cpu(dqp->q_core.d_id);
-       dst->d_blk_hardlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
-       dst->d_blk_softlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+       dst->d_spc_hardlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+       dst->d_spc_softlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
        dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
        dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
-       dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
-       dst->d_icount = dqp->q_res_icount;
-       dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
-       dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
-       dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
-       dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
-       dst->d_rtb_hardlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
-       dst->d_rtb_softlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
-       dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
-       dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
-       dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+       dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
+       dst->d_ino_count = dqp->q_res_icount;
+       dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
+       dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
+       dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
+       dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
+       dst->d_rt_spc_hardlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+       dst->d_rt_spc_softlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+       dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
+       dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+       dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
 
        /*
         * Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota(
             dqp->q_core.d_flags == XFS_DQ_GROUP) ||
            (!XFS_IS_PQUOTA_ENFORCED(mp) &&
             dqp->q_core.d_flags == XFS_DQ_PROJ)) {
-               dst->d_btimer = 0;
-               dst->d_itimer = 0;
-               dst->d_rtbtimer = 0;
+               dst->d_spc_timer = 0;
+               dst->d_ino_timer = 0;
+               dst->d_rt_spc_timer = 0;
        }
 
 #ifdef DEBUG
-       if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
-            (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
-            (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
-           dst->d_id != 0) {
-               if ((dst->d_bcount > dst->d_blk_softlimit) &&
-                   (dst->d_blk_softlimit > 0)) {
-                       ASSERT(dst->d_btimer != 0);
+       if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
+            (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
+            (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
+           id != 0) {
+               if ((dst->d_space > dst->d_spc_softlimit) &&
+                   (dst->d_spc_softlimit > 0)) {
+                       ASSERT(dst->d_spc_timer != 0);
                }
-               if ((dst->d_icount > dst->d_ino_softlimit) &&
+               if ((dst->d_ino_count > dst->d_ino_softlimit) &&
                    (dst->d_ino_softlimit > 0)) {
-                       ASSERT(dst->d_itimer != 0);
+                       ASSERT(dst->d_ino_timer != 0);
                }
        }
 #endif
@@ -907,26 +903,6 @@ out_put:
        return error;
 }
 
-STATIC uint
-xfs_qm_export_qtype_flags(
-       uint flags)
-{
-       /*
-        * Can't be more than one, or none.
-        */
-       ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
-               (FS_PROJ_QUOTA | FS_USER_QUOTA));
-       ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
-               (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
-       ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
-               (FS_USER_QUOTA | FS_GROUP_QUOTA));
-       ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
-
-       return (flags & XFS_DQ_USER) ?
-               FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
-                       FS_PROJ_QUOTA : FS_GROUP_QUOTA;
-}
-
 STATIC uint
 xfs_qm_export_flags(
        uint flags)
index 7542bbeca6a12b18ae1162a51a8dd97f6c35790e..801a84c1cdc3c76d86ae413cf4a1dfddd89cdc92 100644 (file)
@@ -131,7 +131,7 @@ STATIC int
 xfs_fs_get_dqblk(
        struct super_block      *sb,
        struct kqid             qid,
-       struct fs_disk_quota    *fdq)
+       struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk(
                return -ESRCH;
 
        return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
-                                     xfs_quota_type(qid.type), fdq);
+                                     xfs_quota_type(qid.type), qdq);
 }
 
 STATIC int
 xfs_fs_set_dqblk(
        struct super_block      *sb,
        struct kqid             qid,
-       struct fs_disk_quota    *fdq)
+       struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk(
                return -ESRCH;
 
        return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
-                                    xfs_quota_type(qid.type), fdq);
+                                    xfs_quota_type(qid.type), qdq);
 }
 
 const struct quotactl_ops xfs_quotactl_operations = {
index 33063f872ee3cd698a233a0f0defa67aa1b6bd01..176bf816875edcb76fe9dd39470c5e1c139a4551 100644 (file)
@@ -385,7 +385,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 
 /* Is this type a native word size -- useful for atomic operations */
 #ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
+# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 #endif
 
 /* Compile time object size, -1 for unknown */
index 0bebb5c348b8268eff6135f24f1635c0adf2dbb1..d36f68b08acc1fc95c0df5867df545ac2976d0d6 100644 (file)
@@ -595,7 +595,7 @@ extern int  ftrace_profile_set_filter(struct perf_event *event, int event_id,
                                     char *filter_str);
 extern void ftrace_profile_free_filter(struct perf_event *event);
 extern void *perf_trace_buf_prepare(int size, unsigned short type,
-                                   struct pt_regs *regs, int *rctxp);
+                                   struct pt_regs **regs, int *rctxp);
 
 static inline void
 perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr,
index a036d058a249c99d289a721027a529e1c0245040..05f6df1fdf5bbfc70880f188c40e61264f764cc7 100644 (file)
@@ -170,6 +170,7 @@ enum  hrtimer_base_type {
  * @clock_was_set:     Indicates that clock was set from irq context.
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
+ * @in_hrtirq:         hrtimer_interrupt() is currently executing
  * @hres_active:       State of high resolution mode
  * @hang_detected:     The last hrtimer interrupt detected a hang
  * @nr_events:         Total number of hrtimer interrupt events
@@ -185,6 +186,7 @@ struct hrtimer_cpu_base {
        unsigned int                    clock_was_set;
 #ifdef CONFIG_HIGH_RES_TIMERS
        ktime_t                         expires_next;
+       int                             in_hrtirq;
        int                             hres_active;
        int                             hang_detected;
        unsigned long                   nr_events;
index e3a1721c8354b98d6c613d536e4704ff52fa18d4..7c7695940dddeae9d3d22129ce4a14eaf70e1a5e 100644 (file)
@@ -228,7 +228,9 @@ struct i2c_client {
        struct device dev;              /* the device structure         */
        int irq;                        /* irq issued by device         */
        struct list_head detected;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
        i2c_slave_cb_t slave_cb;        /* callback for slave mode      */
+#endif
 };
 #define to_i2c_client(d) container_of(d, struct i2c_client, dev)
 
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
 
 /* I2C slave support */
 
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
 enum i2c_slave_event {
        I2C_SLAVE_REQ_READ_START,
        I2C_SLAVE_REQ_READ_END,
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
 {
        return client->slave_cb(client, event, val);
 }
+#endif
 
 /**
  * struct i2c_board_info - template for device creation
@@ -404,8 +408,10 @@ struct i2c_algorithm {
        /* To determine what the adapter supports */
        u32 (*functionality) (struct i2c_adapter *);
 
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
        int (*reg_slave)(struct i2c_client *client);
        int (*unreg_slave)(struct i2c_client *client);
+#endif
 };
 
 /**
index 515a35e2a48ab7e55d550fcd164466080773b3ce..960e666c51e44620686b6aad9fdbfbc3d325b95e 100644 (file)
@@ -472,27 +472,59 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
 /**
  * vlan_get_protocol - get protocol EtherType.
  * @skb: skbuff to query
+ * @type: first vlan protocol
+ * @depth: buffer to store length of eth and vlan tags in bytes
  *
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+                                        int *depth)
 {
-       __be16 protocol = 0;
-
-       if (vlan_tx_tag_present(skb) ||
-            skb->protocol != cpu_to_be16(ETH_P_8021Q))
-               protocol = skb->protocol;
-       else {
-               __be16 proto, *protop;
-               protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
-                                               h_vlan_encapsulated_proto),
-                                               sizeof(proto), &proto);
-               if (likely(protop))
-                       protocol = *protop;
+       unsigned int vlan_depth = skb->mac_len;
+
+       /* if type is 802.1Q/AD then the header should already be
+        * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+        * ETH_HLEN otherwise
+        */
+       if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+               if (vlan_depth) {
+                       if (WARN_ON(vlan_depth < VLAN_HLEN))
+                               return 0;
+                       vlan_depth -= VLAN_HLEN;
+               } else {
+                       vlan_depth = ETH_HLEN;
+               }
+               do {
+                       struct vlan_hdr *vh;
+
+                       if (unlikely(!pskb_may_pull(skb,
+                                                   vlan_depth + VLAN_HLEN)))
+                               return 0;
+
+                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+                       type = vh->h_vlan_encapsulated_proto;
+                       vlan_depth += VLAN_HLEN;
+               } while (type == htons(ETH_P_8021Q) ||
+                        type == htons(ETH_P_8021AD));
        }
 
-       return protocol;
+       if (depth)
+               *depth = vlan_depth;
+
+       return type;
+}
+
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+{
+       return __vlan_get_protocol(skb, skb->protocol, NULL);
 }
 
 static inline void vlan_set_encap_proto(struct sk_buff *skb,
index 5449d2f4a1efa51203ecd27237ddf9899af2d59a..64ce58bee6f5a74356f612a48730c35455ae6662 100644 (file)
@@ -176,7 +176,7 @@ extern int _cond_resched(void);
  */
 # define might_sleep() \
        do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
-# define sched_annotate_sleep()        __set_current_state(TASK_RUNNING)
+# define sched_annotate_sleep()        (current->task_state_change = 0)
 #else
   static inline void ___might_sleep(const char *file, int line,
                                   int preempt_offset) { }
index c9d645ad98ff7ac9919f0fee00808cfd0e4963f7..5fc3d1083071ca24a96a6da038324fafae997667 100644 (file)
@@ -166,7 +166,17 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
 }
 
 #if BITS_PER_LONG < 64
-extern u64 ktime_divns(const ktime_t kt, s64 div);
+extern u64 __ktime_divns(const ktime_t kt, s64 div);
+static inline u64 ktime_divns(const ktime_t kt, s64 div)
+{
+       if (__builtin_constant_p(div) && !(div >> 32)) {
+               u64 ns = kt.tv64;
+               do_div(ns, div);
+               return ns;
+       } else {
+               return __ktime_divns(kt, div);
+       }
+}
 #else /* BITS_PER_LONG < 64 */
 # define ktime_divns(kt, div)          (u64)((kt).tv64 / (div))
 #endif
@@ -186,6 +196,11 @@ static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
        return ktime_to_us(ktime_sub(later, earlier));
 }
 
+static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
+{
+       return ktime_to_ms(ktime_sub(later, earlier));
+}
+
 static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
 {
        return ktime_add_ns(kt, usec * NSEC_PER_USEC);
index 25c791e295fd5355650a3db42bc69ddc1adf2cc8..5f3a9aa7225d917d5d769d565d3ceb58ad5c4aa0 100644 (file)
@@ -97,7 +97,7 @@ enum {
        MLX4_MAX_NUM_PF         = 16,
        MLX4_MAX_NUM_VF         = 126,
        MLX4_MAX_NUM_VF_P_PORT  = 64,
-       MLX4_MFUNC_MAX          = 80,
+       MLX4_MFUNC_MAX          = 128,
        MLX4_MAX_EQ_NUM         = 1024,
        MLX4_MFUNC_EQ_NUM       = 4,
        MLX4_MFUNC_MAX_EQES     = 8,
index 80fc92a49649cf66ed87ac1343c153debd36619c..dd5ea3016fc4e854ded6b1e7c2e096224d83317f 100644 (file)
@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)
 #define VM_FAULT_WRITE 0x0008  /* Special case for get_user_pages */
 #define VM_FAULT_HWPOISON 0x0010       /* Hit poisoned small page */
 #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
 
 #define VM_FAULT_NOPAGE        0x0100  /* ->fault installed the pte, not return page */
 #define VM_FAULT_LOCKED        0x0200  /* ->fault locked the returned page */
@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)
 
 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
 
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
-                        VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+                        VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+                        VM_FAULT_FALLBACK)
 
 /* Encode hstate index for a hwpoisoned large page */
 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
index 90230d5811c5aab49b8755fceda1e1d5e6d326aa..3a6490e81b2856821ca190f438c039136a7fa207 100644 (file)
@@ -5,8 +5,11 @@
  * An MCS like lock especially tailored for optimistic spinning for sleeping
  * lock implementations (mutex, rwsem, etc).
  */
-
-#define OSQ_UNLOCKED_VAL (0)
+struct optimistic_spin_node {
+       struct optimistic_spin_node *next, *prev;
+       int locked; /* 1 if lock acquired */
+       int cpu; /* encoded CPU # + 1 value */
+};
 
 struct optimistic_spin_queue {
        /*
@@ -16,6 +19,8 @@ struct optimistic_spin_queue {
        atomic_t tail;
 };
 
+#define OSQ_UNLOCKED_VAL (0)
+
 /* Init macro and function. */
 #define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
 
@@ -24,4 +29,7 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
        atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
 }
 
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
+
 #endif
index 4f7a61ca4b393dc837cb4ad278c4a66306247cbd..5cad0e6f35524b454ec691e1787848d322893b3b 100644 (file)
@@ -450,11 +450,6 @@ struct perf_event {
 #endif /* CONFIG_PERF_EVENTS */
 };
 
-enum perf_event_context_type {
-       task_context,
-       cpu_context,
-};
-
 /**
  * struct perf_event_context - event context structure
  *
@@ -462,7 +457,6 @@ enum perf_event_context_type {
  */
 struct perf_event_context {
        struct pmu                      *pmu;
-       enum perf_event_context_type    type;
        /*
         * Protect the states of the events in the list,
         * nr_active, and the list:
@@ -475,6 +469,7 @@ struct perf_event_context {
         */
        struct mutex                    mutex;
 
+       struct list_head                active_ctx_list;
        struct list_head                pinned_groups;
        struct list_head                flexible_groups;
        struct list_head                event_list;
@@ -525,7 +520,6 @@ struct perf_cpu_context {
        int                             exclusive;
        struct hrtimer                  hrtimer;
        ktime_t                         hrtimer_interval;
-       struct list_head                rotation_list;
        struct pmu                      *unique_pmu;
        struct perf_cgroup              *cgrp;
 };
@@ -665,6 +659,7 @@ static inline int is_software_event(struct perf_event *event)
 
 extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
+extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 
 #ifndef perf_arch_fetch_caller_regs
@@ -689,14 +684,25 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 static __always_inline void
 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
-       struct pt_regs hot_regs;
+       if (static_key_false(&perf_swevent_enabled[event_id]))
+               __perf_sw_event(event_id, nr, regs, addr);
+}
+
+DECLARE_PER_CPU(struct pt_regs, __perf_regs[4]);
 
+/*
+ * 'Special' version for the scheduler, it hard assumes no recursion,
+ * which is guaranteed by us not actually scheduling inside other swevents
+ * because those disable preemption.
+ */
+static __always_inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
+{
        if (static_key_false(&perf_swevent_enabled[event_id])) {
-               if (!regs) {
-                       perf_fetch_caller_regs(&hot_regs);
-                       regs = &hot_regs;
-               }
-               __perf_sw_event(event_id, nr, regs, addr);
+               struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+               perf_fetch_caller_regs(regs);
+               ___perf_sw_event(event_id, nr, regs, addr);
        }
 }
 
@@ -712,7 +718,7 @@ static inline void perf_event_task_sched_in(struct task_struct *prev,
 static inline void perf_event_task_sched_out(struct task_struct *prev,
                                             struct task_struct *next)
 {
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
+       perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
 
        if (static_key_false(&perf_sched_events.key))
                __perf_event_task_sched_out(prev, next);
@@ -823,6 +829,8 @@ static inline int perf_event_refresh(struct perf_event *event, int refresh)
 static inline void
 perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)    { }
 static inline void
+perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)                    { }
+static inline void
 perf_bp_event(struct perf_event *event, void *data)                    { }
 
 static inline int perf_register_guest_info_callbacks
index 77aed9ea1d264386376bf3694b87ae3dab07275a..dab545bb66b3114597da2d4a851539d0ab0d89a8 100644 (file)
@@ -37,6 +37,7 @@
 #define SSDR           (0x10)  /* SSP Data Write/Data Read Register */
 
 #define SSTO           (0x28)  /* SSP Time Out Register */
+#define DDS_RATE       (0x28)  /* SSP DDS Clock Rate Register (Intel Quark) */
 #define SSPSP          (0x2C)  /* SSP Programmable Serial Protocol */
 #define SSTSA          (0x30)  /* SSP Tx Timeslot Active */
 #define SSRSA          (0x34)  /* SSP Rx Timeslot Active */
index 50978b781a19c4d82c60c44918f5049c4cd25272..097d7eb2441e529f139822b6bf4fe5031dc5e710 100644 (file)
@@ -321,6 +321,49 @@ struct dquot_operations {
 
 struct path;
 
+/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+struct qc_dqblk {
+       int d_fieldmask;        /* mask of fields to change in ->set_dqblk() */
+       u64 d_spc_hardlimit;    /* absolute limit on used space */
+       u64 d_spc_softlimit;    /* preferred limit on used space */
+       u64 d_ino_hardlimit;    /* maximum # allocated inodes */
+       u64 d_ino_softlimit;    /* preferred inode limit */
+       u64 d_space;            /* Space owned by the user */
+       u64 d_ino_count;        /* # inodes owned by the user */
+       s64 d_ino_timer;        /* zero if within inode limits */
+                               /* if not, we refuse service */
+       s64 d_spc_timer;        /* similar to above; for space */
+       int d_ino_warns;        /* # warnings issued wrt num inodes */
+       int d_spc_warns;        /* # warnings issued wrt used space */
+       u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
+       u64 d_rt_spc_softlimit; /* preferred limit on RT space */
+       u64 d_rt_space;         /* realtime space owned */
+       s64 d_rt_spc_timer;     /* similar to above; for RT space */
+       int d_rt_spc_warns;     /* # warnings issued wrt RT space */
+};
+
+/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+#define        QC_INO_SOFT     (1<<0)
+#define        QC_INO_HARD     (1<<1)
+#define        QC_SPC_SOFT     (1<<2)
+#define        QC_SPC_HARD     (1<<3)
+#define        QC_RT_SPC_SOFT  (1<<4)
+#define        QC_RT_SPC_HARD  (1<<5)
+#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+                      QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+#define        QC_SPC_TIMER    (1<<6)
+#define        QC_INO_TIMER    (1<<7)
+#define        QC_RT_SPC_TIMER (1<<8)
+#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+#define        QC_SPC_WARNS    (1<<9)
+#define        QC_INO_WARNS    (1<<10)
+#define        QC_RT_SPC_WARNS (1<<11)
+#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+#define        QC_SPACE        (1<<12)
+#define        QC_INO_COUNT    (1<<13)
+#define        QC_RT_SPACE     (1<<14)
+#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+
 /* Operations handling requests from userspace */
 struct quotactl_ops {
        int (*quota_on)(struct super_block *, int, int, struct path *);
@@ -329,8 +372,8 @@ struct quotactl_ops {
        int (*quota_sync)(struct super_block *, int);
        int (*get_info)(struct super_block *, int, struct if_dqinfo *);
        int (*set_info)(struct super_block *, int, struct if_dqinfo *);
-       int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
-       int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
+       int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+       int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
        int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
        int (*set_xstate)(struct super_block *, unsigned int, int);
        int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
index f23538a6e411f4e1d5700ebbed96480f1425a9e3..29e3455f7d41f7f951e95d544acefd970388e767 100644 (file)
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int dquot_get_dqblk(struct super_block *sb, struct kqid id,
-               struct fs_disk_quota *di);
+               struct qc_dqblk *di);
 int dquot_set_dqblk(struct super_block *sb, struct kqid id,
-               struct fs_disk_quota *di);
+               struct qc_dqblk *di);
 
 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
 int dquot_transfer(struct inode *inode, struct iattr *iattr);
index 529bc946f450359158503332e6d563fa46ce6f47..a18b16f1dc0e44f7f5a3b99ea4f43d64c67b8a0c 100644 (file)
@@ -524,11 +524,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
  * @member:    the name of the hlist_node within the struct.
  */
 #define hlist_for_each_entry_continue_rcu(pos, member)                 \
-       for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
-                       typeof(*(pos)), member);                        \
+       for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+                       &(pos)->member)), typeof(*(pos)), member);      \
             pos;                                                       \
-            pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
-                       typeof(*(pos)), member))
+            pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+                       &(pos)->member)), typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
@@ -536,11 +536,11 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
  * @member:    the name of the hlist_node within the struct.
  */
 #define hlist_for_each_entry_continue_rcu_bh(pos, member)              \
-       for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
-                       typeof(*(pos)), member);                        \
+       for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
+                       &(pos)->member)), typeof(*(pos)), member);      \
             pos;                                                       \
-            pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
-                       typeof(*(pos)), member))
+            pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
+                       &(pos)->member)), typeof(*(pos)), member))
 
 /**
  * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
index ed4f5939a452cb424671f87dc9911fe318cb1fc3..78097491cd99a693f41b45e2b12ea1a4119cb52d 100644 (file)
@@ -331,12 +331,13 @@ static inline void rcu_init_nohz(void)
 extern struct srcu_struct tasks_rcu_exit_srcu;
 #define rcu_note_voluntary_context_switch(t) \
        do { \
+               rcu_all_qs(); \
                if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
                        ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
        } while (0)
 #else /* #ifdef CONFIG_TASKS_RCU */
 #define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch(t)   do { } while (0)
+#define rcu_note_voluntary_context_switch(t)   rcu_all_qs()
 #endif /* #else #ifdef CONFIG_TASKS_RCU */
 
 /**
@@ -582,11 +583,11 @@ static inline void rcu_preempt_sleep_check(void)
 })
 #define __rcu_dereference_check(p, c, space) \
 ({ \
-       typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+       /* Dependency order vs. p above. */ \
+       typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
        rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
        rcu_dereference_sparse(p, space); \
-       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
-       ((typeof(*p) __force __kernel *)(_________p1)); \
+       ((typeof(*p) __force __kernel *)(________p1)); \
 })
 #define __rcu_dereference_protected(p, c, space) \
 ({ \
@@ -603,10 +604,10 @@ static inline void rcu_preempt_sleep_check(void)
 })
 #define __rcu_dereference_index_check(p, c) \
 ({ \
-       typeof(p) _________p1 = ACCESS_ONCE(p); \
+       /* Dependency order vs. p above. */ \
+       typeof(p) _________p1 = lockless_dereference(p); \
        rcu_lockdep_assert(c, \
                           "suspicious rcu_dereference_index_check() usage"); \
-       smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
        (_________p1); \
 })
 
index 0e5366200154e87e394b61c52a60fad72f2050ff..937edaeb150deb17759a9c0c715630fd0cc9a729 100644 (file)
@@ -92,17 +92,49 @@ static inline void rcu_virt_note_context_switch(int cpu)
 }
 
 /*
- * Return the number of grace periods.
+ * Return the number of grace periods started.
  */
-static inline long rcu_batches_completed(void)
+static inline unsigned long rcu_batches_started(void)
 {
        return 0;
 }
 
 /*
- * Return the number of bottom-half grace periods.
+ * Return the number of bottom-half grace periods started.
  */
-static inline long rcu_batches_completed_bh(void)
+static inline unsigned long rcu_batches_started_bh(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of sched grace periods started.
+ */
+static inline unsigned long rcu_batches_started_sched(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of bottom-half grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_bh(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of sched grace periods completed.
+ */
+static inline unsigned long rcu_batches_completed_sched(void)
 {
        return 0;
 }
@@ -154,7 +186,10 @@ static inline bool rcu_is_watching(void)
        return true;
 }
 
-
 #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 
+static inline void rcu_all_qs(void)
+{
+}
+
 #endif /* __LINUX_RCUTINY_H */
index 52953790dcca2089527ec9f48725fd7d31fb0da0..d2e583a6aacacf09ee9dc3bf3646b6a3cff3494e 100644 (file)
@@ -81,9 +81,12 @@ void cond_synchronize_rcu(unsigned long oldstate);
 
 extern unsigned long rcutorture_testseq;
 extern unsigned long rcutorture_vernum;
-long rcu_batches_completed(void);
-long rcu_batches_completed_bh(void);
-long rcu_batches_completed_sched(void);
+unsigned long rcu_batches_started(void);
+unsigned long rcu_batches_started_bh(void);
+unsigned long rcu_batches_started_sched(void);
+unsigned long rcu_batches_completed(void);
+unsigned long rcu_batches_completed_bh(void);
+unsigned long rcu_batches_completed_sched(void);
 void show_rcu_gp_kthreads(void);
 
 void rcu_force_quiescent_state(void);
@@ -97,4 +100,6 @@ extern int rcu_scheduler_active __read_mostly;
 
 bool rcu_is_watching(void);
 
+void rcu_all_qs(void);
+
 #endif /* __LINUX_RCUTREE_H */
index 4419b99d8d6ec19010815c7ed759e513abccb95d..116655d922691b3143b8c9d840b944fed3a70adb 100644 (file)
@@ -468,7 +468,7 @@ bool regmap_reg_in_ranges(unsigned int reg,
  *
  * @reg: Offset of the register within the regmap bank
  * @lsb: lsb of the register field.
- * @reg: msb of the register field.
+ * @msb: msb of the register field.
  * @id_size: port size if it has some ports
  * @id_offset: address offset for each ports
  */
index 5479394fefcec75bce9a090093d9d5d6e81e5b5d..5dd65acc2a69e5188bb1f5d95ac1223c67fd36bc 100644 (file)
@@ -32,6 +32,8 @@ struct da9211_pdata {
         * 2 : 2 phase 2 buck
         */
        int num_buck;
+       int gpio_ren[DA9211_MAX_REGULATORS];
+       struct device_node *reg_node[DA9211_MAX_REGULATORS];
        struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
 };
 #endif
index 5f1e9ca47417febff0811df407abe43d99563786..d4ad5b5a02bb478a422b349406efba00997bab76 100644 (file)
@@ -21,6 +21,7 @@
 
 struct regmap;
 struct regulator_dev;
+struct regulator_config;
 struct regulator_init_data;
 struct regulator_enable_gpio;
 
@@ -205,6 +206,15 @@ enum regulator_type {
  * @supply_name: Identifying the regulator supply
  * @of_match: Name used to identify regulator in DT.
  * @regulators_node: Name of node containing regulator definitions in DT.
+ * @of_parse_cb: Optional callback called only if of_match is present.
+ *               Will be called for each regulator parsed from DT, during
+ *               init_data parsing.
+ *               The regulator_config passed as argument to the callback will
+ *               be a copy of config passed to regulator_register, valid only
+ *               for this particular call. Callback may freely change the
+ *               config but it cannot store it for later usage.
+ *               Callback should return 0 on success or negative ERRNO
+ *               indicating failure.
  * @id: Numerical identifier for the regulator.
  * @ops: Regulator operations table.
  * @irq: Interrupt number for the regulator.
@@ -251,6 +261,9 @@ struct regulator_desc {
        const char *supply_name;
        const char *of_match;
        const char *regulators_node;
+       int (*of_parse_cb)(struct device_node *,
+                           const struct regulator_desc *,
+                           struct regulator_config *);
        int id;
        bool continuous_voltage_range;
        unsigned n_voltages;
index 0b08d05d470b56cacec467d5368810517d4a1752..b07562e082c416a2b98fc08818f35111c8237288 100644 (file)
@@ -191,15 +191,22 @@ struct regulator_init_data {
        void *driver_data;      /* core does not touch this */
 };
 
-int regulator_suspend_prepare(suspend_state_t state);
-int regulator_suspend_finish(void);
-
 #ifdef CONFIG_REGULATOR
 void regulator_has_full_constraints(void);
+int regulator_suspend_prepare(suspend_state_t state);
+int regulator_suspend_finish(void);
 #else
 static inline void regulator_has_full_constraints(void)
 {
 }
+static inline int regulator_suspend_prepare(suspend_state_t state)
+{
+       return 0;
+}
+static inline int regulator_suspend_finish(void)
+{
+       return 0;
+}
 #endif
 
 #endif
diff --git a/include/linux/regulator/mt6397-regulator.h b/include/linux/regulator/mt6397-regulator.h
new file mode 100644 (file)
index 0000000..30cc596
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Flora Fu <flora.fu@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_REGULATOR_MT6397_H
+#define __LINUX_REGULATOR_MT6397_H
+
+enum {
+       MT6397_ID_VPCA15 = 0,
+       MT6397_ID_VPCA7,
+       MT6397_ID_VSRAMCA15,
+       MT6397_ID_VSRAMCA7,
+       MT6397_ID_VCORE,
+       MT6397_ID_VGPU,
+       MT6397_ID_VDRM,
+       MT6397_ID_VIO18 = 7,
+       MT6397_ID_VTCXO,
+       MT6397_ID_VA28,
+       MT6397_ID_VCAMA,
+       MT6397_ID_VIO28,
+       MT6397_ID_VUSB,
+       MT6397_ID_VMC,
+       MT6397_ID_VMCH,
+       MT6397_ID_VEMC3V3,
+       MT6397_ID_VGP1,
+       MT6397_ID_VGP2,
+       MT6397_ID_VGP3,
+       MT6397_ID_VGP4,
+       MT6397_ID_VGP5,
+       MT6397_ID_VGP6,
+       MT6397_ID_VIBR,
+       MT6397_ID_RG_MAX,
+};
+
+#define MT6397_MAX_REGULATOR   MT6397_ID_RG_MAX
+#define MT6397_REGULATOR_ID97  0x97
+#define MT6397_REGULATOR_ID91  0x91
+
+#endif /* __LINUX_REGULATOR_MT6397_H */
index 364f7a7c43db3db23e67805983e90b3dd0187837..70c6c66c5bcf16cc4be324a1aaf40b1981e56413 100644 (file)
 #define PFUZE200_VGEN5         11
 #define PFUZE200_VGEN6         12
 
+#define PFUZE3000_SW1A         0
+#define PFUZE3000_SW1B         1
+#define PFUZE3000_SW2          2
+#define PFUZE3000_SW3          3
+#define PFUZE3000_SWBST                4
+#define PFUZE3000_VSNVS                5
+#define PFUZE3000_VREFDDR      6
+#define PFUZE3000_VLDO1                7
+#define PFUZE3000_VLDO2                8
+#define PFUZE3000_VCCSD                9
+#define PFUZE3000_V33          10
+#define PFUZE3000_VLDO3                11
+#define PFUZE3000_VLDO4                12
+
 struct regulator_init_data;
 
 struct pfuze_regulator_platform_data {
index 6d6be09a2fe50fff38f54d582aa547eccf24296c..dcad7ee0d7466c8e7ffd7a050e814db551c95cac 100644 (file)
@@ -161,7 +161,7 @@ extern void devm_rtc_device_unregister(struct device *dev,
 extern int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm);
 extern int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm);
 extern int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs);
-extern int rtc_set_ntp_time(struct timespec now);
+extern int rtc_set_ntp_time(struct timespec64 now);
 int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm);
 extern int rtc_read_alarm(struct rtc_device *rtc,
                        struct rtc_wkalrm *alrm);
index 93dff5fff524b720e9af7ff84098bb7760e0cd6d..be91db2a701702265a2fb013a0ab878dfd467822 100644 (file)
@@ -151,6 +151,13 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
 static inline void kick_all_cpus_sync(void) {  }
 static inline void wake_up_all_idle_cpus(void) {  }
 
+#ifdef CONFIG_UP_LATE_INIT
+extern void __init up_late_init(void);
+static inline void smp_init(void) { up_late_init(); }
+#else
+static inline void smp_init(void) { }
+#endif
+
 #endif /* !SMP */
 
 /*
index b2b1afbb32024ebbb80be196ea276a7ff53ae43e..cd519a11c2c6723d5d679ffed8cb4320eba6e318 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
  * Written by:
  * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
  */
index bc8677c8eba92c2ed22e23a1089d19486062a512..e69e9b51b21a39b1eacb0284cc08f95c7e56fb53 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 #ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_
index 555d254e660662483b8b09ecd075554295848ea6..fdd1d1d51da5d89efc352039f8ec2333fec5872a 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 #ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_
index 4835486f58e5abbe71d2535a6f563241ae4d3126..381d368b91b413d1fe99462e13a11e61e8dda5b1 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  */
 
 #ifndef __LINUX_SPI_MXS_SPI_H__
index d5a316550177299fefaee37d6bf3e6a0e3322a8a..6d36dacec4baa1cd88a6bfebd259c4b685a34876 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 #ifndef __linux_pxa2xx_spi_h
 #define __linux_pxa2xx_spi_h
@@ -57,7 +53,6 @@ struct pxa2xx_spi_chip {
 #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
 
 #include <linux/clk.h>
-#include <mach/dma.h>
 
 extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
 
index e546b2ceb6231ddfa83298cfae0dcabee910c1e8..a693188cc08b40a7663dcfff821191d88df3ffb3 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
  */
 
 #ifndef __LINUX_SPI_RENESAS_SPI_H__
index a1121f872ac1482fc5a96c96462d7d63afda2d86..aa0d440ab4f060a38f4ed7f1c59e48b4c349b736 100644 (file)
@@ -9,10 +9,6 @@
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #ifndef SH_HSPI_H
 #define SH_HSPI_H
index 88a14d81c49e3f5a19e89e26b93c41e09af2f427..b087a85f5f72a3511c0c97fa36b22f27b0b6cc1a 100644 (file)
@@ -7,6 +7,8 @@ struct sh_msiof_spi_info {
        u16 num_chipselect;
        unsigned int dma_tx_id;
        unsigned int dma_rx_id;
+       u32 dtdl;
+       u32 syncdl;
 };
 
 #endif /* __SPI_SH_MSIOF_H__ */
index a6ef2a8e6de4bc47b976494c0f59b9b4fa9d2c9d..ed9489d893a487f250868f8de603c84e707feaf8 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #ifndef __LINUX_SPI_H
@@ -260,6 +256,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @pump_messages: work struct for scheduling work to the message pump
  * @queue_lock: spinlock to syncronise access to message queue
  * @queue: message queue
+ * @idling: the device is entering idle state
  * @cur_msg: the currently in-flight message
  * @cur_msg_prepared: spi_prepare_message was called for the currently
  *                    in-flight message
@@ -425,6 +422,7 @@ struct spi_master {
        spinlock_t                      queue_lock;
        struct list_head                queue;
        struct spi_message              *cur_msg;
+       bool                            idling;
        bool                            busy;
        bool                            running;
        bool                            rt;
index 60b59187e590a9cb2c01cc99d6ea422c7d4d998f..414c6fddfcf097433515a9783b34200656046f96 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 
 struct tle62x0_pdata {
index 8f721e465e05b968ed1b5223c4b1e7eb61be78e2..563b3b1799a86cca2c09501028fbad990809b104 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
  */
 
 #ifndef _LINUX_SPI_TSC2005_H
index a2783cb5d2753f6c6eb04e48086f461abedb8b6f..9cfd9623fb0325a692c4929cac048912644dc0ac 100644 (file)
@@ -45,7 +45,7 @@ struct rcu_batch {
 #define RCU_BATCH_INIT(name) { NULL, &(name.head) }
 
 struct srcu_struct {
-       unsigned completed;
+       unsigned long completed;
        struct srcu_struct_array __percpu *per_cpu_ref;
        spinlock_t queue_lock; /* protect ->batch_queue, ->running */
        bool running;
@@ -102,13 +102,11 @@ void process_srcu(struct work_struct *work);
  * define and init a srcu struct at build time.
  * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
  */
-#define DEFINE_SRCU(name)                                              \
+#define __DEFINE_SRCU(name, is_static)                                 \
        static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
-       struct srcu_struct name = __SRCU_STRUCT_INIT(name);
-
-#define DEFINE_STATIC_SRCU(name)                                       \
-       static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
-       static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+       is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name)              __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name)       __DEFINE_SRCU(name, static)
 
 /**
  * call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -135,7 +133,7 @@ int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
 void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
 void synchronize_srcu(struct srcu_struct *sp);
 void synchronize_srcu_expedited(struct srcu_struct *sp);
-long srcu_batches_completed(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
 void srcu_barrier(struct srcu_struct *sp);
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index 9b63d13ba82b3748cc2fd5b61992fde76d8859a2..3eaae47542751962579a3c6736f18917e4da7ad3 100644 (file)
@@ -33,6 +33,7 @@ extern time64_t ktime_get_real_seconds(void);
 
 extern int __getnstimeofday64(struct timespec64 *tv);
 extern void getnstimeofday64(struct timespec64 *tv);
+extern void getboottime64(struct timespec64 *ts);
 
 #if BITS_PER_LONG == 64
 /**
@@ -72,6 +73,11 @@ static inline struct timespec get_monotonic_coarse(void)
 {
        return get_monotonic_coarse64();
 }
+
+static inline void getboottime(struct timespec *ts)
+{
+       return getboottime64(ts);
+}
 #else
 /**
  * Deprecated. Use do_settimeofday64().
@@ -129,9 +135,15 @@ static inline struct timespec get_monotonic_coarse(void)
 {
        return timespec64_to_timespec(get_monotonic_coarse64());
 }
-#endif
 
-extern void getboottime(struct timespec *ts);
+static inline void getboottime(struct timespec *ts)
+{
+       struct timespec64 ts64;
+
+       getboottime64(&ts64);
+       *ts = timespec64_to_timespec(ts64);
+}
+#endif
 
 #define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
 #define ktime_get_real_ts64(ts)        getnstimeofday64(ts)
@@ -217,6 +229,11 @@ static inline void get_monotonic_boottime(struct timespec *ts)
        *ts = ktime_to_timespec(ktime_get_boottime());
 }
 
+static inline void get_monotonic_boottime64(struct timespec64 *ts)
+{
+       *ts = ktime_to_timespec64(ktime_get_boottime());
+}
+
 static inline void timekeeping_clocktai(struct timespec *ts)
 {
        *ts = ktime_to_timespec(ktime_get_clocktai());
index e08e21e5f601bcec81ea21131c4e58e69c611850..c72851328ca9cc071620cfdef3892ca91db6d96a 100644 (file)
@@ -173,7 +173,7 @@ extern void syscall_unregfunc(void);
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args),                     \
                                TP_CONDITION(cond),,);                  \
-               if (IS_ENABLED(CONFIG_LOCKDEP)) {                       \
+               if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) {             \
                        rcu_read_lock_sched_notrace();                  \
                        rcu_dereference_sched(__tracepoint_##name.funcs);\
                        rcu_read_unlock_sched_notrace();                \
index 2232ed16635ae0929c97f62e1c8c1b09109f0649..537d58eea8a084f1b51fdc3c702eb09758962406 100644 (file)
@@ -363,7 +363,6 @@ do {                                                                        \
  */
 #define wait_event_cmd(wq, condition, cmd1, cmd2)                      \
 do {                                                                   \
-       might_sleep();                                                  \
        if (condition)                                                  \
                break;                                                  \
        __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
@@ -990,6 +989,32 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
                                       mode);
 }
 
+/**
+ * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ * @timeout: timeout, in jiffies
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared. This is similar to wait_on_bit(), except also takes a
+ * timeout parameter.
+ *
+ * Returned value will be zero if the bit was cleared before the
+ * @timeout elapsed, or non-zero if the @timeout elapsed or process
+ * received a signal and the mode permitted wakeup on that signal.
+ */
+static inline int
+wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
+{
+       might_sleep();
+       if (!test_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit_timeout(word, bit,
+                                              bit_wait_timeout,
+                                              mode, timeout);
+}
+
 /**
  * wait_on_bit_action - wait for a bit to be cleared
  * @word: the word being waited on, a kernel virtual address
index 7ee2df083542365e9d317fa1dbc2bbcfbbe34aa6..dc8fd81412bf319b7ba59c8b12c65c9363db75f6 100644 (file)
@@ -22,9 +22,9 @@ struct flow_keys {
                __be32 ports;
                __be16 port16[2];
        };
-       u16 thoff;
-       u16 n_proto;
-       u8 ip_proto;
+       u16     thoff;
+       __be16  n_proto;
+       u8      ip_proto;
 };
 
 bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
index f7cbd703d15d24edca61cf9e159cb1ce3857cb5b..09cf5aebb28368fbb93c974f14a78c3fa9a6f408 100644 (file)
@@ -181,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
        return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
 }
 
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           const struct ip_options *sopt,
                           __be32 daddr, __be32 saddr,
                           const struct ip_reply_arg *arg,
index 4292929392b0127479c49c5da3bb33f053f4428c..6e416f6d3e3cbe9ed17406b1c504f33023d76509 100644 (file)
@@ -671,6 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
+u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
+                       struct in6_addr *src);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 void ipv6_proxy_select_ident(struct sk_buff *skb);
 
 int ip6_dst_hoplimit(struct dst_entry *dst);
@@ -708,7 +711,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
                                        __be32 flowlabel, bool autolabel)
 {
        if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
-               __be32 hash;
+               u32 hash;
 
                hash = skb_get_hash(skb);
 
@@ -718,7 +721,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
                 */
                hash ^= hash >> 12;
 
-               flowlabel = hash & IPV6_FLOWLABEL_MASK;
+               flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
        }
 
        return flowlabel;
index 3ae969e3acf016474413e1209381c60091befe1a..9eaaa788458607004cb5f160e77c38de02da17ec 100644 (file)
@@ -530,6 +530,8 @@ enum nft_chain_type {
 
 int nft_chain_validate_dependency(const struct nft_chain *chain,
                                  enum nft_chain_type type);
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+                             unsigned int hook_flags);
 
 struct nft_stats {
        u64                     bytes;
index 24945cefc4fde6bfaf9c4560080c91b2e3b12d0d..0ffef1a38efcc2f75e78dab09aa5e111f5b8b72f 100644 (file)
@@ -52,6 +52,7 @@ struct netns_ipv4 {
        struct inet_peer_base   *peers;
        struct tcpm_hash_bucket *tcp_metrics_hash;
        unsigned int            tcp_metrics_hash_log;
+       struct sock  * __percpu *tcp_sk;
        struct netns_frags      frags;
 #ifdef CONFIG_NETFILTER
        struct xt_table         *iptable_filter;
index 3d282cbb66bf1015d28f140ed3bc031ac43afaed..c605d305c577074d11bee6f19479dda8a4949ee3 100644 (file)
@@ -79,6 +79,9 @@ struct Qdisc {
        struct netdev_queue     *dev_queue;
 
        struct gnet_stats_rate_est64    rate_est;
+       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_queue __percpu *cpu_qstats;
+
        struct Qdisc            *next_sched;
        struct sk_buff          *gso_skb;
        /*
@@ -86,15 +89,9 @@ struct Qdisc {
         */
        unsigned long           state;
        struct sk_buff_head     q;
-       union {
-               struct gnet_stats_basic_packed bstats;
-               struct gnet_stats_basic_cpu __percpu *cpu_bstats;
-       } __packed;
+       struct gnet_stats_basic_packed bstats;
        unsigned int            __state;
-       union {
-               struct gnet_stats_queue qstats;
-               struct gnet_stats_queue __percpu *cpu_qstats;
-       } __packed;
+       struct gnet_stats_queue qstats;
        struct rcu_head         rcu_head;
        int                     padded;
        atomic_t                refcnt;
index f50f29faf76f1fbcc5237de63c76f7c8200f4a6b..9d9111ef43ae305ad60e11c4b848b2b5f0a7eec3 100644 (file)
@@ -834,8 +834,8 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
 void tcp_get_allowed_congestion_control(char *buf, size_t len);
 int tcp_set_allowed_congestion_control(char *allowed);
 int tcp_set_congestion_control(struct sock *sk, const char *name);
-void tcp_slow_start(struct tcp_sock *tp, u32 acked);
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
 
 u32 tcp_reno_ssthresh(struct sock *sk);
 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
index 0d74f1de99aa89dee233ed408815459e2ad66d5f..65994a19e84055e7b4f4d8cde83a07eb41c1a69a 100644 (file)
@@ -1707,10 +1707,7 @@ static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t
 
 static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
 {
-       size_t copy_sz;
-
-       copy_sz = min_t(size_t, len, udata->outlen);
-       return copy_to_user(udata->outbuf, src, copy_sz) ? -EFAULT : 0;
+       return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
 }
 
 /**
index 2609048c1d442581a2530f79cc2135aa679f8cb1..3a34f6edc2d1ff6bb9280b3bd9b1a203c5393c68 100644 (file)
@@ -286,7 +286,7 @@ struct ak4113 {
        ak4113_write_t *write;
        ak4113_read_t *read;
        void *private_data;
-       unsigned int init:1;
+       atomic_t wq_processing;
        spinlock_t lock;
        unsigned char regmap[AK4113_WRITABLE_REGS];
        struct snd_kcontrol *kctls[AK4113_CONTROLS];
index 52f02a60dba731e500bbac2a355b1bd5d12a676f..069299a88915b6f27506c06471b08a7519590e88 100644 (file)
@@ -168,7 +168,7 @@ struct ak4114 {
        ak4114_write_t * write;
        ak4114_read_t * read;
        void * private_data;
-       unsigned int init: 1;
+       atomic_t wq_processing;
        spinlock_t lock;
        unsigned char regmap[6];
        unsigned char txcsb[5];
index b4fca9aed2a2b00296ce040428b830b93095d562..ac8b333acb4dd721596db8ddf9bf7fcbc7ab6c1d 100644 (file)
@@ -498,6 +498,7 @@ int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
                                unsigned int mask, unsigned int value);
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
+struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec);
 struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
 void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
 
index 13391d288107a6aa4325c34ba16a00450b7af814..0e763576515398be7bcbea3e1d4b50a25201c853 100644 (file)
        { TLB_LOCAL_SHOOTDOWN,          "local shootdown" },            \
        { TLB_LOCAL_MM_SHOOTDOWN,       "local mm shootdown" }
 
-TRACE_EVENT(tlb_flush,
+TRACE_EVENT_CONDITION(tlb_flush,
 
        TP_PROTO(int reason, unsigned long pages),
        TP_ARGS(reason, pages),
 
+       TP_CONDITION(cpu_online(smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(          int, reason)
                __field(unsigned long,  pages)
index 139b5067345b2ecb2daa1881d8b484ad76cf382a..27609dfcce25916120521b23215dd473fab0051a 100644 (file)
@@ -763,7 +763,7 @@ perf_trace_##call(void *__data, proto)                                      \
        struct ftrace_event_call *event_call = __data;                  \
        struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
        struct ftrace_raw_##call *entry;                                \
-       struct pt_regs __regs;                                          \
+       struct pt_regs *__regs;                                         \
        u64 __addr = 0, __count = 1;                                    \
        struct task_struct *__task = NULL;                              \
        struct hlist_head *head;                                        \
@@ -782,18 +782,19 @@ perf_trace_##call(void *__data, proto)                                    \
                             sizeof(u64));                              \
        __entry_size -= sizeof(u32);                                    \
                                                                        \
-       perf_fetch_caller_regs(&__regs);                                \
        entry = perf_trace_buf_prepare(__entry_size,                    \
                        event_call->event.type, &__regs, &rctx);        \
        if (!entry)                                                     \
                return;                                                 \
                                                                        \
+       perf_fetch_caller_regs(__regs);                                 \
+                                                                       \
        tstruct                                                         \
                                                                        \
        { assign; }                                                     \
                                                                        \
        perf_trace_buf_submit(entry, __entry_size, rctx, __addr,        \
-               __count, &__regs, head, __task);                        \
+               __count, __regs, head, __task);                         \
 }
 
 /*
index 4275b961bf60f65ec9d93a2dbe680e66d184b6a2..867cc5084afbfce8ab24cfd87d5f52cda93697de 100644 (file)
@@ -90,7 +90,6 @@ enum {
 };
 
 enum {
-       IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
        IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 };
@@ -202,32 +201,6 @@ struct ib_uverbs_query_device_resp {
        __u8  reserved[4];
 };
 
-enum {
-       IB_USER_VERBS_EX_QUERY_DEVICE_ODP =             1ULL << 0,
-};
-
-struct ib_uverbs_ex_query_device {
-       __u32 comp_mask;
-       __u32 reserved;
-};
-
-struct ib_uverbs_odp_caps {
-       __u64 general_caps;
-       struct {
-               __u32 rc_odp_caps;
-               __u32 uc_odp_caps;
-               __u32 ud_odp_caps;
-       } per_transport_caps;
-       __u32 reserved;
-};
-
-struct ib_uverbs_ex_query_device_resp {
-       struct ib_uverbs_query_device_resp base;
-       __u32 comp_mask;
-       __u32 reserved;
-       struct ib_uverbs_odp_caps odp_caps;
-};
-
 struct ib_uverbs_query_port {
        __u64 response;
        __u8  port_num;
index 9afb971497f4c9972b0dcf14071edf9ef81320be..1354ac09b5163a097ea8e0ca3146716cdc051363 100644 (file)
@@ -470,7 +470,6 @@ choice
 config TREE_RCU
        bool "Tree-based hierarchical RCU"
        depends on !PREEMPT && SMP
-       select IRQ_WORK
        help
          This option selects the RCU implementation that is
          designed for very large SMP system with hundreds or
@@ -480,7 +479,6 @@ config TREE_RCU
 config PREEMPT_RCU
        bool "Preemptible tree-based hierarchical RCU"
        depends on PREEMPT
-       select IRQ_WORK
        help
          This option selects the RCU implementation that is
          designed for very large SMP systems with hundreds or
@@ -501,9 +499,17 @@ config TINY_RCU
 
 endchoice
 
+config SRCU
+       bool
+       help
+         This option selects the sleepable version of RCU. This version
+         permits arbitrary sleeping or blocking within RCU read-side critical
+         sections.
+
 config TASKS_RCU
        bool "Task_based RCU implementation using voluntary context switch"
        default n
+       select SRCU
        help
          This option enables a task-based RCU implementation that uses
          only voluntary context switch (not preemption!), idle, and
@@ -668,9 +674,10 @@ config RCU_BOOST
 
 config RCU_KTHREAD_PRIO
        int "Real-time priority to use for RCU worker threads"
-       range 1 99
-       depends on RCU_BOOST
-       default 1
+       range 1 99 if RCU_BOOST
+       range 0 99 if !RCU_BOOST
+       default 1 if RCU_BOOST
+       default 0 if !RCU_BOOST
        help
          This option specifies the SCHED_FIFO priority value that will be
          assigned to the rcuc/n and rcub/n threads and is also the value
@@ -1595,6 +1602,7 @@ config PERF_EVENTS
        depends on HAVE_PERF_EVENTS
        select ANON_INODES
        select IRQ_WORK
+       select SRCU
        help
          Enable kernel support for various performance events provided
          by software and hardware.
index 61b993767db53e8401dc1223fa6e028f7edafd74..179ada15d08a4df01eb3ff459cb861617bd1f00a 100644 (file)
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
 
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/smp.h>
-#endif
-
 static int kernel_init(void *);
 
 extern void init_IRQ(void);
@@ -351,15 +347,6 @@ __setup("rdinit=", rdinit_setup);
 
 #ifndef CONFIG_SMP
 static const unsigned int setup_max_cpus = NR_CPUS;
-#ifdef CONFIG_X86_LOCAL_APIC
-static void __init smp_init(void)
-{
-       APIC_init_uniprocessor();
-}
-#else
-#define smp_init()     do { } while (0)
-#endif
-
 static inline void setup_nr_cpu_ids(void) { }
 static inline void smp_prepare_cpus(unsigned int maxcpus) { }
 #endif
index 76768ee812b27b7a48e13710ec23326af9b828af..08561f1acd130bd68314e03278402e629b028ba4 100644 (file)
@@ -231,6 +231,10 @@ config RWSEM_SPIN_ON_OWNER
        def_bool y
        depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
 
+config LOCK_SPIN_ON_OWNER
+       def_bool y
+       depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
+
 config ARCH_USE_QUEUE_RWLOCK
        bool
 
index 5d220234b3ca5aa2feebe4b80ddba1bfd63a8eda..1972b161c61e98fbe3e3ce003744cf1d2e8c5b1c 100644 (file)
@@ -58,22 +58,23 @@ static int cpu_hotplug_disabled;
 
 static struct {
        struct task_struct *active_writer;
-       struct mutex lock; /* Synchronizes accesses to refcount, */
+       /* wait queue to wake up the active_writer */
+       wait_queue_head_t wq;
+       /* verifies that no writer will get active while readers are active */
+       struct mutex lock;
        /*
         * Also blocks the new readers during
         * an ongoing cpu hotplug operation.
         */
-       int refcount;
-       /* And allows lockless put_online_cpus(). */
-       atomic_t puts_pending;
+       atomic_t refcount;
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map dep_map;
 #endif
 } cpu_hotplug = {
        .active_writer = NULL,
+       .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-       .refcount = 0,
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        .dep_map = {.name = "cpu_hotplug.lock" },
 #endif
@@ -86,15 +87,6 @@ static struct {
 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
 
-static void apply_puts_pending(int max)
-{
-       int delta;
-
-       if (atomic_read(&cpu_hotplug.puts_pending) >= max) {
-               delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
-               cpu_hotplug.refcount -= delta;
-       }
-}
 
 void get_online_cpus(void)
 {
@@ -103,8 +95,7 @@ void get_online_cpus(void)
                return;
        cpuhp_lock_acquire_read();
        mutex_lock(&cpu_hotplug.lock);
-       apply_puts_pending(65536);
-       cpu_hotplug.refcount++;
+       atomic_inc(&cpu_hotplug.refcount);
        mutex_unlock(&cpu_hotplug.lock);
 }
 EXPORT_SYMBOL_GPL(get_online_cpus);
@@ -116,8 +107,7 @@ bool try_get_online_cpus(void)
        if (!mutex_trylock(&cpu_hotplug.lock))
                return false;
        cpuhp_lock_acquire_tryread();
-       apply_puts_pending(65536);
-       cpu_hotplug.refcount++;
+       atomic_inc(&cpu_hotplug.refcount);
        mutex_unlock(&cpu_hotplug.lock);
        return true;
 }
@@ -125,20 +115,18 @@ EXPORT_SYMBOL_GPL(try_get_online_cpus);
 
 void put_online_cpus(void)
 {
+       int refcount;
+
        if (cpu_hotplug.active_writer == current)
                return;
-       if (!mutex_trylock(&cpu_hotplug.lock)) {
-               atomic_inc(&cpu_hotplug.puts_pending);
-               cpuhp_lock_release();
-               return;
-       }
 
-       if (WARN_ON(!cpu_hotplug.refcount))
-               cpu_hotplug.refcount++; /* try to fix things up */
+       refcount = atomic_dec_return(&cpu_hotplug.refcount);
+       if (WARN_ON(refcount < 0)) /* try to fix things up */
+               atomic_inc(&cpu_hotplug.refcount);
+
+       if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
+               wake_up(&cpu_hotplug.wq);
 
-       if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
-               wake_up_process(cpu_hotplug.active_writer);
-       mutex_unlock(&cpu_hotplug.lock);
        cpuhp_lock_release();
 
 }
@@ -168,18 +156,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
  */
 void cpu_hotplug_begin(void)
 {
-       cpu_hotplug.active_writer = current;
+       DEFINE_WAIT(wait);
 
+       cpu_hotplug.active_writer = current;
        cpuhp_lock_acquire();
+
        for (;;) {
                mutex_lock(&cpu_hotplug.lock);
-               apply_puts_pending(1);
-               if (likely(!cpu_hotplug.refcount))
-                       break;
-               __set_current_state(TASK_UNINTERRUPTIBLE);
+               prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
+               if (likely(!atomic_read(&cpu_hotplug.refcount)))
+                               break;
                mutex_unlock(&cpu_hotplug.lock);
                schedule();
        }
+       finish_wait(&cpu_hotplug.wq, &wait);
 }
 
 void cpu_hotplug_done(void)
index 882f835a0d859e011848069ed6ee716f3def4dee..7f2fbb8b5069b3258bdd9721c60b850f965953d1 100644 (file)
@@ -872,22 +872,32 @@ void perf_pmu_enable(struct pmu *pmu)
                pmu->pmu_enable(pmu);
 }
 
-static DEFINE_PER_CPU(struct list_head, rotation_list);
+static DEFINE_PER_CPU(struct list_head, active_ctx_list);
 
 /*
- * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
- * because they're strictly cpu affine and rotate_start is called with IRQs
- * disabled, while rotate_context is called from IRQ context.
+ * perf_event_ctx_activate(), perf_event_ctx_deactivate(), and
+ * perf_event_task_tick() are fully serialized because they're strictly cpu
+ * affine and perf_event_ctx{activate,deactivate} are called with IRQs
+ * disabled, while perf_event_task_tick is called from IRQ context.
  */
-static void perf_pmu_rotate_start(struct pmu *pmu)
+static void perf_event_ctx_activate(struct perf_event_context *ctx)
 {
-       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-       struct list_head *head = this_cpu_ptr(&rotation_list);
+       struct list_head *head = this_cpu_ptr(&active_ctx_list);
 
        WARN_ON(!irqs_disabled());
 
-       if (list_empty(&cpuctx->rotation_list))
-               list_add(&cpuctx->rotation_list, head);
+       WARN_ON(!list_empty(&ctx->active_ctx_list));
+
+       list_add(&ctx->active_ctx_list, head);
+}
+
+static void perf_event_ctx_deactivate(struct perf_event_context *ctx)
+{
+       WARN_ON(!irqs_disabled());
+
+       WARN_ON(list_empty(&ctx->active_ctx_list));
+
+       list_del_init(&ctx->active_ctx_list);
 }
 
 static void get_ctx(struct perf_event_context *ctx)
@@ -906,6 +916,84 @@ static void put_ctx(struct perf_event_context *ctx)
        }
 }
 
+/*
+ * Because of perf_event::ctx migration in sys_perf_event_open::move_group and
+ * perf_pmu_migrate_context() we need some magic.
+ *
+ * Those places that change perf_event::ctx will hold both
+ * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
+ *
+ * Lock ordering is by mutex address. There is one other site where
+ * perf_event_context::mutex nests and that is put_event(). But remember that
+ * that is a parent<->child context relation, and migration does not affect
+ * children, therefore these two orderings should not interact.
+ *
+ * The change in perf_event::ctx does not affect children (as claimed above)
+ * because the sys_perf_event_open() case will install a new event and break
+ * the ctx parent<->child relation, and perf_pmu_migrate_context() is only
+ * concerned with cpuctx and that doesn't have children.
+ *
+ * The places that change perf_event::ctx will issue:
+ *
+ *   perf_remove_from_context();
+ *   synchronize_rcu();
+ *   perf_install_in_context();
+ *
+ * to affect the change. The remove_from_context() + synchronize_rcu() should
+ * quiesce the event, after which we can install it in the new location. This
+ * means that only external vectors (perf_fops, prctl) can perturb the event
+ * while in transit. Therefore all such accessors should also acquire
+ * perf_event_context::mutex to serialize against this.
+ *
+ * However; because event->ctx can change while we're waiting to acquire
+ * ctx->mutex we must be careful and use the below perf_event_ctx_lock()
+ * function.
+ *
+ * Lock order:
+ *     task_struct::perf_event_mutex
+ *       perf_event_context::mutex
+ *         perf_event_context::lock
+ *         perf_event::child_mutex;
+ *         perf_event::mmap_mutex
+ *         mmap_sem
+ */
+static struct perf_event_context *
+perf_event_ctx_lock_nested(struct perf_event *event, int nesting)
+{
+       struct perf_event_context *ctx;
+
+again:
+       rcu_read_lock();
+       ctx = ACCESS_ONCE(event->ctx);
+       if (!atomic_inc_not_zero(&ctx->refcount)) {
+               rcu_read_unlock();
+               goto again;
+       }
+       rcu_read_unlock();
+
+       mutex_lock_nested(&ctx->mutex, nesting);
+       if (event->ctx != ctx) {
+               mutex_unlock(&ctx->mutex);
+               put_ctx(ctx);
+               goto again;
+       }
+
+       return ctx;
+}
+
+static inline struct perf_event_context *
+perf_event_ctx_lock(struct perf_event *event)
+{
+       return perf_event_ctx_lock_nested(event, 0);
+}
+
+static void perf_event_ctx_unlock(struct perf_event *event,
+                                 struct perf_event_context *ctx)
+{
+       mutex_unlock(&ctx->mutex);
+       put_ctx(ctx);
+}
+
 /*
  * This must be done under the ctx->lock, such as to serialize against
  * context_equiv(), therefore we cannot call put_ctx() since that might end up
@@ -1155,8 +1243,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
                ctx->nr_branch_stack++;
 
        list_add_rcu(&event->event_entry, &ctx->event_list);
-       if (!ctx->nr_events)
-               perf_pmu_rotate_start(ctx->pmu);
        ctx->nr_events++;
        if (event->attr.inherit_stat)
                ctx->nr_stat++;
@@ -1275,6 +1361,8 @@ static void perf_group_attach(struct perf_event *event)
        if (group_leader == event)
                return;
 
+       WARN_ON_ONCE(group_leader->ctx != event->ctx);
+
        if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
                        !is_software_event(event))
                group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
@@ -1296,6 +1384,10 @@ static void
 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 {
        struct perf_cpu_context *cpuctx;
+
+       WARN_ON_ONCE(event->ctx != ctx);
+       lockdep_assert_held(&ctx->lock);
+
        /*
         * We can have double detach due to exit/hot-unplug + close.
         */
@@ -1380,6 +1472,8 @@ static void perf_group_detach(struct perf_event *event)
 
                /* Inherit group flags from the previous leader */
                sibling->group_flags = event->group_flags;
+
+               WARN_ON_ONCE(sibling->ctx != event->ctx);
        }
 
 out:
@@ -1442,6 +1536,10 @@ event_sched_out(struct perf_event *event,
 {
        u64 tstamp = perf_event_time(event);
        u64 delta;
+
+       WARN_ON_ONCE(event->ctx != ctx);
+       lockdep_assert_held(&ctx->lock);
+
        /*
         * An event which could not be activated because of
         * filter mismatch still needs to have its timings
@@ -1471,7 +1569,8 @@ event_sched_out(struct perf_event *event,
 
        if (!is_software_event(event))
                cpuctx->active_oncpu--;
-       ctx->nr_active--;
+       if (!--ctx->nr_active)
+               perf_event_ctx_deactivate(ctx);
        if (event->attr.freq && event->attr.sample_freq)
                ctx->nr_freq--;
        if (event->attr.exclusive || !cpuctx->active_oncpu)
@@ -1654,7 +1753,7 @@ int __perf_event_disable(void *info)
  * is the current context on this CPU and preemption is disabled,
  * hence we can't get into perf_event_task_sched_out for this context.
  */
-void perf_event_disable(struct perf_event *event)
+static void _perf_event_disable(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
@@ -1695,6 +1794,19 @@ retry:
        }
        raw_spin_unlock_irq(&ctx->lock);
 }
+
+/*
+ * Strictly speaking kernel users cannot create groups and therefore this
+ * interface does not need the perf_event_ctx_lock() magic.
+ */
+void perf_event_disable(struct perf_event *event)
+{
+       struct perf_event_context *ctx;
+
+       ctx = perf_event_ctx_lock(event);
+       _perf_event_disable(event);
+       perf_event_ctx_unlock(event, ctx);
+}
 EXPORT_SYMBOL_GPL(perf_event_disable);
 
 static void perf_set_shadow_time(struct perf_event *event,
@@ -1782,7 +1894,8 @@ event_sched_in(struct perf_event *event,
 
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
-       ctx->nr_active++;
+       if (!ctx->nr_active++)
+               perf_event_ctx_activate(ctx);
        if (event->attr.freq && event->attr.sample_freq)
                ctx->nr_freq++;
 
@@ -2158,7 +2271,7 @@ unlock:
  * perf_event_for_each_child or perf_event_for_each as described
  * for perf_event_disable.
  */
-void perf_event_enable(struct perf_event *event)
+static void _perf_event_enable(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
        struct task_struct *task = ctx->task;
@@ -2214,9 +2327,21 @@ retry:
 out:
        raw_spin_unlock_irq(&ctx->lock);
 }
+
+/*
+ * See perf_event_disable();
+ */
+void perf_event_enable(struct perf_event *event)
+{
+       struct perf_event_context *ctx;
+
+       ctx = perf_event_ctx_lock(event);
+       _perf_event_enable(event);
+       perf_event_ctx_unlock(event, ctx);
+}
 EXPORT_SYMBOL_GPL(perf_event_enable);
 
-int perf_event_refresh(struct perf_event *event, int refresh)
+static int _perf_event_refresh(struct perf_event *event, int refresh)
 {
        /*
         * not supported on inherited events
@@ -2225,10 +2350,25 @@ int perf_event_refresh(struct perf_event *event, int refresh)
                return -EINVAL;
 
        atomic_add(refresh, &event->event_limit);
-       perf_event_enable(event);
+       _perf_event_enable(event);
 
        return 0;
 }
+
+/*
+ * See perf_event_disable()
+ */
+int perf_event_refresh(struct perf_event *event, int refresh)
+{
+       struct perf_event_context *ctx;
+       int ret;
+
+       ctx = perf_event_ctx_lock(event);
+       ret = _perf_event_refresh(event, refresh);
+       perf_event_ctx_unlock(event, ctx);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(perf_event_refresh);
 
 static void ctx_sched_out(struct perf_event_context *ctx,
@@ -2612,12 +2752,6 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
 
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
-
-       /*
-        * Since these rotations are per-cpu, we need to ensure the
-        * cpu-context we got scheduled on is actually rotating.
-        */
-       perf_pmu_rotate_start(ctx->pmu);
 }
 
 /*
@@ -2905,25 +3039,18 @@ static void rotate_ctx(struct perf_event_context *ctx)
                list_rotate_left(&ctx->flexible_groups);
 }
 
-/*
- * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
- * because they're strictly cpu affine and rotate_start is called with IRQs
- * disabled, while rotate_context is called from IRQ context.
- */
 static int perf_rotate_context(struct perf_cpu_context *cpuctx)
 {
        struct perf_event_context *ctx = NULL;
-       int rotate = 0, remove = 1;
+       int rotate = 0;
 
        if (cpuctx->ctx.nr_events) {
-               remove = 0;
                if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
                        rotate = 1;
        }
 
        ctx = cpuctx->task_ctx;
        if (ctx && ctx->nr_events) {
-               remove = 0;
                if (ctx->nr_events != ctx->nr_active)
                        rotate = 1;
        }
@@ -2947,8 +3074,6 @@ static int perf_rotate_context(struct perf_cpu_context *cpuctx)
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 done:
-       if (remove)
-               list_del_init(&cpuctx->rotation_list);
 
        return rotate;
 }
@@ -2966,9 +3091,8 @@ bool perf_event_can_stop_tick(void)
 
 void perf_event_task_tick(void)
 {
-       struct list_head *head = this_cpu_ptr(&rotation_list);
-       struct perf_cpu_context *cpuctx, *tmp;
-       struct perf_event_context *ctx;
+       struct list_head *head = this_cpu_ptr(&active_ctx_list);
+       struct perf_event_context *ctx, *tmp;
        int throttled;
 
        WARN_ON(!irqs_disabled());
@@ -2976,14 +3100,8 @@ void perf_event_task_tick(void)
        __this_cpu_inc(perf_throttled_seq);
        throttled = __this_cpu_xchg(perf_throttled_count, 0);
 
-       list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
-               ctx = &cpuctx->ctx;
+       list_for_each_entry_safe(ctx, tmp, head, active_ctx_list)
                perf_adjust_freq_unthr_context(ctx, throttled);
-
-               ctx = cpuctx->task_ctx;
-               if (ctx)
-                       perf_adjust_freq_unthr_context(ctx, throttled);
-       }
 }
 
 static int event_enable_on_exec(struct perf_event *event,
@@ -3142,6 +3260,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
 {
        raw_spin_lock_init(&ctx->lock);
        mutex_init(&ctx->mutex);
+       INIT_LIST_HEAD(&ctx->active_ctx_list);
        INIT_LIST_HEAD(&ctx->pinned_groups);
        INIT_LIST_HEAD(&ctx->flexible_groups);
        INIT_LIST_HEAD(&ctx->event_list);
@@ -3421,7 +3540,16 @@ static void perf_remove_from_owner(struct perf_event *event)
        rcu_read_unlock();
 
        if (owner) {
-               mutex_lock(&owner->perf_event_mutex);
+               /*
+                * If we're here through perf_event_exit_task() we're already
+                * holding ctx->mutex which would be an inversion wrt. the
+                * normal lock order.
+                *
+                * However we can safely take this lock because its the child
+                * ctx->mutex.
+                */
+               mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING);
+
                /*
                 * We have to re-check the event->owner field, if it is cleared
                 * we raced with perf_event_exit_task(), acquiring the mutex
@@ -3440,7 +3568,7 @@ static void perf_remove_from_owner(struct perf_event *event)
  */
 static void put_event(struct perf_event *event)
 {
-       struct perf_event_context *ctx = event->ctx;
+       struct perf_event_context *ctx;
 
        if (!atomic_long_dec_and_test(&event->refcount))
                return;
@@ -3448,7 +3576,6 @@ static void put_event(struct perf_event *event)
        if (!is_kernel_event(event))
                perf_remove_from_owner(event);
 
-       WARN_ON_ONCE(ctx->parent_ctx);
        /*
         * There are two ways this annotation is useful:
         *
@@ -3461,7 +3588,8 @@ static void put_event(struct perf_event *event)
         *     the last filedesc died, so there is no possibility
         *     to trigger the AB-BA case.
         */
-       mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
+       ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
+       WARN_ON_ONCE(ctx->parent_ctx);
        perf_remove_from_context(event, true);
        mutex_unlock(&ctx->mutex);
 
@@ -3547,12 +3675,13 @@ static int perf_event_read_group(struct perf_event *event,
                                   u64 read_format, char __user *buf)
 {
        struct perf_event *leader = event->group_leader, *sub;
-       int n = 0, size = 0, ret = -EFAULT;
        struct perf_event_context *ctx = leader->ctx;
-       u64 values[5];
+       int n = 0, size = 0, ret;
        u64 count, enabled, running;
+       u64 values[5];
+
+       lockdep_assert_held(&ctx->mutex);
 
-       mutex_lock(&ctx->mutex);
        count = perf_event_read_value(leader, &enabled, &running);
 
        values[n++] = 1 + leader->nr_siblings;
@@ -3567,7 +3696,7 @@ static int perf_event_read_group(struct perf_event *event,
        size = n * sizeof(u64);
 
        if (copy_to_user(buf, values, size))
-               goto unlock;
+               return -EFAULT;
 
        ret = size;
 
@@ -3581,14 +3710,11 @@ static int perf_event_read_group(struct perf_event *event,
                size = n * sizeof(u64);
 
                if (copy_to_user(buf + ret, values, size)) {
-                       ret = -EFAULT;
-                       goto unlock;
+                       return -EFAULT;
                }
 
                ret += size;
        }
-unlock:
-       mutex_unlock(&ctx->mutex);
 
        return ret;
 }
@@ -3660,8 +3786,14 @@ static ssize_t
 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
        struct perf_event *event = file->private_data;
+       struct perf_event_context *ctx;
+       int ret;
+
+       ctx = perf_event_ctx_lock(event);
+       ret = perf_read_hw(event, buf, count);
+       perf_event_ctx_unlock(event, ctx);
 
-       return perf_read_hw(event, buf, count);
+       return ret;
 }
 
 static unsigned int perf_poll(struct file *file, poll_table *wait)
@@ -3687,7 +3819,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        return events;
 }
 
-static void perf_event_reset(struct perf_event *event)
+static void _perf_event_reset(struct perf_event *event)
 {
        (void)perf_event_read(event);
        local64_set(&event->count, 0);
@@ -3706,6 +3838,7 @@ static void perf_event_for_each_child(struct perf_event *event,
        struct perf_event *child;
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
+
        mutex_lock(&event->child_mutex);
        func(event);
        list_for_each_entry(child, &event->child_list, child_list)
@@ -3719,14 +3852,13 @@ static void perf_event_for_each(struct perf_event *event,
        struct perf_event_context *ctx = event->ctx;
        struct perf_event *sibling;
 
-       WARN_ON_ONCE(ctx->parent_ctx);
-       mutex_lock(&ctx->mutex);
+       lockdep_assert_held(&ctx->mutex);
+
        event = event->group_leader;
 
        perf_event_for_each_child(event, func);
        list_for_each_entry(sibling, &event->sibling_list, group_entry)
                perf_event_for_each_child(sibling, func);
-       mutex_unlock(&ctx->mutex);
 }
 
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
@@ -3796,25 +3928,24 @@ static int perf_event_set_output(struct perf_event *event,
                                 struct perf_event *output_event);
 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
 
-static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
 {
-       struct perf_event *event = file->private_data;
        void (*func)(struct perf_event *);
        u32 flags = arg;
 
        switch (cmd) {
        case PERF_EVENT_IOC_ENABLE:
-               func = perf_event_enable;
+               func = _perf_event_enable;
                break;
        case PERF_EVENT_IOC_DISABLE:
-               func = perf_event_disable;
+               func = _perf_event_disable;
                break;
        case PERF_EVENT_IOC_RESET:
-               func = perf_event_reset;
+               func = _perf_event_reset;
                break;
 
        case PERF_EVENT_IOC_REFRESH:
-               return perf_event_refresh(event, arg);
+               return _perf_event_refresh(event, arg);
 
        case PERF_EVENT_IOC_PERIOD:
                return perf_event_period(event, (u64 __user *)arg);
@@ -3861,6 +3992,19 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
+static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       struct perf_event *event = file->private_data;
+       struct perf_event_context *ctx;
+       long ret;
+
+       ctx = perf_event_ctx_lock(event);
+       ret = _perf_ioctl(event, cmd, arg);
+       perf_event_ctx_unlock(event, ctx);
+
+       return ret;
+}
+
 #ifdef CONFIG_COMPAT
 static long perf_compat_ioctl(struct file *file, unsigned int cmd,
                                unsigned long arg)
@@ -3883,11 +4027,15 @@ static long perf_compat_ioctl(struct file *file, unsigned int cmd,
 
 int perf_event_task_enable(void)
 {
+       struct perf_event_context *ctx;
        struct perf_event *event;
 
        mutex_lock(&current->perf_event_mutex);
-       list_for_each_entry(event, &current->perf_event_list, owner_entry)
-               perf_event_for_each_child(event, perf_event_enable);
+       list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+               ctx = perf_event_ctx_lock(event);
+               perf_event_for_each_child(event, _perf_event_enable);
+               perf_event_ctx_unlock(event, ctx);
+       }
        mutex_unlock(&current->perf_event_mutex);
 
        return 0;
@@ -3895,11 +4043,15 @@ int perf_event_task_enable(void)
 
 int perf_event_task_disable(void)
 {
+       struct perf_event_context *ctx;
        struct perf_event *event;
 
        mutex_lock(&current->perf_event_mutex);
-       list_for_each_entry(event, &current->perf_event_list, owner_entry)
-               perf_event_for_each_child(event, perf_event_disable);
+       list_for_each_entry(event, &current->perf_event_list, owner_entry) {
+               ctx = perf_event_ctx_lock(event);
+               perf_event_for_each_child(event, _perf_event_disable);
+               perf_event_ctx_unlock(event, ctx);
+       }
        mutex_unlock(&current->perf_event_mutex);
 
        return 0;
@@ -5889,6 +6041,8 @@ end:
        rcu_read_unlock();
 }
 
+DEFINE_PER_CPU(struct pt_regs, __perf_regs[4]);
+
 int perf_swevent_get_recursion_context(void)
 {
        struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
@@ -5904,21 +6058,30 @@ inline void perf_swevent_put_recursion_context(int rctx)
        put_recursion_context(swhash->recursion, rctx);
 }
 
-void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
+void ___perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
        struct perf_sample_data data;
-       int rctx;
 
-       preempt_disable_notrace();
-       rctx = perf_swevent_get_recursion_context();
-       if (rctx < 0)
+       if (WARN_ON_ONCE(!regs))
                return;
 
        perf_sample_data_init(&data, addr, 0);
-
        do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
+}
+
+void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
+{
+       int rctx;
+
+       preempt_disable_notrace();
+       rctx = perf_swevent_get_recursion_context();
+       if (unlikely(rctx < 0))
+               goto fail;
+
+       ___perf_sw_event(event_id, nr, regs, addr);
 
        perf_swevent_put_recursion_context(rctx);
+fail:
        preempt_enable_notrace();
 }
 
@@ -6776,12 +6939,10 @@ skip_type:
                __perf_event_init_context(&cpuctx->ctx);
                lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
                lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
-               cpuctx->ctx.type = cpu_context;
                cpuctx->ctx.pmu = pmu;
 
                __perf_cpu_hrtimer_init(cpuctx, cpu);
 
-               INIT_LIST_HEAD(&cpuctx->rotation_list);
                cpuctx->unique_pmu = pmu;
        }
 
@@ -6854,6 +7015,20 @@ void perf_pmu_unregister(struct pmu *pmu)
 }
 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
 
+static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
+{
+       int ret;
+
+       if (!try_module_get(pmu->module))
+               return -ENODEV;
+       event->pmu = pmu;
+       ret = pmu->event_init(event);
+       if (ret)
+               module_put(pmu->module);
+
+       return ret;
+}
+
 struct pmu *perf_init_event(struct perf_event *event)
 {
        struct pmu *pmu = NULL;
@@ -6866,24 +7041,14 @@ struct pmu *perf_init_event(struct perf_event *event)
        pmu = idr_find(&pmu_idr, event->attr.type);
        rcu_read_unlock();
        if (pmu) {
-               if (!try_module_get(pmu->module)) {
-                       pmu = ERR_PTR(-ENODEV);
-                       goto unlock;
-               }
-               event->pmu = pmu;
-               ret = pmu->event_init(event);
+               ret = perf_try_init_event(pmu, event);
                if (ret)
                        pmu = ERR_PTR(ret);
                goto unlock;
        }
 
        list_for_each_entry_rcu(pmu, &pmus, entry) {
-               if (!try_module_get(pmu->module)) {
-                       pmu = ERR_PTR(-ENODEV);
-                       goto unlock;
-               }
-               event->pmu = pmu;
-               ret = pmu->event_init(event);
+               ret = perf_try_init_event(pmu, event);
                if (!ret)
                        goto unlock;
 
@@ -7247,6 +7412,15 @@ out:
        return ret;
 }
 
+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+       if (b < a)
+               swap(a, b);
+
+       mutex_lock(a);
+       mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
 /**
  * sys_perf_event_open - open a performance event, associate it to a task/cpu
  *
@@ -7262,7 +7436,7 @@ SYSCALL_DEFINE5(perf_event_open,
        struct perf_event *group_leader = NULL, *output_event = NULL;
        struct perf_event *event, *sibling;
        struct perf_event_attr attr;
-       struct perf_event_context *ctx;
+       struct perf_event_context *ctx, *uninitialized_var(gctx);
        struct file *event_file = NULL;
        struct fd group = {NULL, 0};
        struct task_struct *task = NULL;
@@ -7420,7 +7594,19 @@ SYSCALL_DEFINE5(perf_event_open,
                 * task or CPU context:
                 */
                if (move_group) {
-                       if (group_leader->ctx->type != ctx->type)
+                       /*
+                        * Make sure we're both on the same task, or both
+                        * per-cpu events.
+                        */
+                       if (group_leader->ctx->task != ctx->task)
+                               goto err_context;
+
+                       /*
+                        * Make sure we're both events for the same CPU;
+                        * grouping events for different CPUs is broken; since
+                        * you can never concurrently schedule them anyhow.
+                        */
+                       if (group_leader->cpu != event->cpu)
                                goto err_context;
                } else {
                        if (group_leader->ctx != ctx)
@@ -7448,43 +7634,68 @@ SYSCALL_DEFINE5(perf_event_open,
        }
 
        if (move_group) {
-               struct perf_event_context *gctx = group_leader->ctx;
-
-               mutex_lock(&gctx->mutex);
-               perf_remove_from_context(group_leader, false);
+               gctx = group_leader->ctx;
 
                /*
-                * Removing from the context ends up with disabled
-                * event. What we want here is event in the initial
-                * startup state, ready to be add into new context.
+                * See perf_event_ctx_lock() for comments on the details
+                * of swizzling perf_event::ctx.
                 */
-               perf_event__state_init(group_leader);
+               mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+               perf_remove_from_context(group_leader, false);
+
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
                        perf_remove_from_context(sibling, false);
-                       perf_event__state_init(sibling);
                        put_ctx(gctx);
                }
-               mutex_unlock(&gctx->mutex);
-               put_ctx(gctx);
+       } else {
+               mutex_lock(&ctx->mutex);
        }
 
        WARN_ON_ONCE(ctx->parent_ctx);
-       mutex_lock(&ctx->mutex);
 
        if (move_group) {
+               /*
+                * Wait for everybody to stop referencing the events through
+                * the old lists, before installing it on new lists.
+                */
                synchronize_rcu();
-               perf_install_in_context(ctx, group_leader, group_leader->cpu);
-               get_ctx(ctx);
+
+               /*
+                * Install the group siblings before the group leader.
+                *
+                * Because a group leader will try and install the entire group
+                * (through the sibling list, which is still in-tact), we can
+                * end up with siblings installed in the wrong context.
+                *
+                * By installing siblings first we NO-OP because they're not
+                * reachable through the group lists.
+                */
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
+                       perf_event__state_init(sibling);
                        perf_install_in_context(ctx, sibling, sibling->cpu);
                        get_ctx(ctx);
                }
+
+               /*
+                * Removing from the context ends up with disabled
+                * event. What we want here is event in the initial
+                * startup state, ready to be add into new context.
+                */
+               perf_event__state_init(group_leader);
+               perf_install_in_context(ctx, group_leader, group_leader->cpu);
+               get_ctx(ctx);
        }
 
        perf_install_in_context(ctx, event, event->cpu);
        perf_unpin_context(ctx);
+
+       if (move_group) {
+               mutex_unlock(&gctx->mutex);
+               put_ctx(gctx);
+       }
        mutex_unlock(&ctx->mutex);
 
        put_online_cpus();
@@ -7592,7 +7803,11 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
        dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
 
-       mutex_lock(&src_ctx->mutex);
+       /*
+        * See perf_event_ctx_lock() for comments on the details
+        * of swizzling perf_event::ctx.
+        */
+       mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
                perf_remove_from_context(event, false);
@@ -7600,11 +7815,36 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
        }
-       mutex_unlock(&src_ctx->mutex);
 
+       /*
+        * Wait for the events to quiesce before re-instating them.
+        */
        synchronize_rcu();
 
-       mutex_lock(&dst_ctx->mutex);
+       /*
+        * Re-instate events in 2 passes.
+        *
+        * Skip over group leaders and only install siblings on this first
+        * pass, siblings will not get enabled without a leader, however a
+        * leader will enable its siblings, even if those are still on the old
+        * context.
+        */
+       list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
+               if (event->group_leader == event)
+                       continue;
+
+               list_del(&event->migrate_entry);
+               if (event->state >= PERF_EVENT_STATE_OFF)
+                       event->state = PERF_EVENT_STATE_INACTIVE;
+               account_event_cpu(event, dst_cpu);
+               perf_install_in_context(dst_ctx, event, dst_cpu);
+               get_ctx(dst_ctx);
+       }
+
+       /*
+        * Once all the siblings are setup properly, install the group leaders
+        * to make it go.
+        */
        list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
                list_del(&event->migrate_entry);
                if (event->state >= PERF_EVENT_STATE_OFF)
@@ -7614,6 +7854,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
                get_ctx(dst_ctx);
        }
        mutex_unlock(&dst_ctx->mutex);
+       mutex_unlock(&src_ctx->mutex);
 }
 EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
 
@@ -7800,14 +8041,19 @@ static void perf_free_event(struct perf_event *event,
 
        put_event(parent);
 
+       raw_spin_lock_irq(&ctx->lock);
        perf_group_detach(event);
        list_del_event(event, ctx);
+       raw_spin_unlock_irq(&ctx->lock);
        free_event(event);
 }
 
 /*
- * free an unexposed, unused context as created by inheritance by
+ * Free an unexposed, unused context as created by inheritance by
  * perf_event_init_task below, used by fork() in case of fail.
+ *
+ * Not all locks are strictly required, but take them anyway to be nice and
+ * help out with the lockdep assertions.
  */
 void perf_event_free_task(struct task_struct *task)
 {
@@ -8126,7 +8372,7 @@ static void __init perf_event_init_all_cpus(void)
        for_each_possible_cpu(cpu) {
                swhash = &per_cpu(swevent_htable, cpu);
                mutex_init(&swhash->hlist_mutex);
-               INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
+               INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
        }
 }
 
@@ -8147,22 +8393,11 @@ static void perf_event_init_cpu(int cpu)
 }
 
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
-static void perf_pmu_rotate_stop(struct pmu *pmu)
-{
-       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-
-       WARN_ON(!irqs_disabled());
-
-       list_del_init(&cpuctx->rotation_list);
-}
-
 static void __perf_event_exit_context(void *__info)
 {
        struct remove_event re = { .detach_group = true };
        struct perf_event_context *ctx = __info;
 
-       perf_pmu_rotate_stop(ctx->pmu);
-
        rcu_read_lock();
        list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
                __perf_remove_from_context(&re);
index 146a5792b1d2aaf9412eaf2610f1f4cd1f05a037..eadb95ce7aace86925b9639399e29feb5be33323 100644 (file)
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/circ_buf.h>
+#include <linux/poll.h>
 
 #include "internal.h"
 
 static void perf_output_wakeup(struct perf_output_handle *handle)
 {
-       atomic_set(&handle->rb->poll, POLL_IN);
+       atomic_set(&handle->rb->poll, POLLIN);
 
        handle->event->pending_wakeup = 1;
        irq_work_queue(&handle->event->pending);
index 63678b573d6135201700db85ede47d5111082a9c..4eeb63de7e54e895506e468acbcd4b3bed4271c6 100644 (file)
@@ -2258,7 +2258,7 @@ static long futex_wait_restart(struct restart_block *restart)
  * if there are waiters then it will block, it does PI, etc. (Due to
  * races the kernel might see a 0 value of the futex too.)
  */
-static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
+static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
                         ktime_t *time, int trylock)
 {
        struct hrtimer_sleeper timeout, *to = NULL;
@@ -2953,11 +2953,11 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
        case FUTEX_WAKE_OP:
                return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
        case FUTEX_LOCK_PI:
-               return futex_lock_pi(uaddr, flags, val, timeout, 0);
+               return futex_lock_pi(uaddr, flags, timeout, 0);
        case FUTEX_UNLOCK_PI:
                return futex_unlock_pi(uaddr, flags);
        case FUTEX_TRYLOCK_PI:
-               return futex_lock_pi(uaddr, flags, 0, timeout, 1);
+               return futex_lock_pi(uaddr, flags, NULL, 1);
        case FUTEX_WAIT_REQUEUE_PI:
                val3 = FUTEX_BITSET_MATCH_ANY;
                return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
index 8541bfdfd232bb4213629f265cbb68a6bfb50c72..4ca8eb1519755ac17e314259fa595d18f76db058 100644 (file)
@@ -1,5 +1,5 @@
 
-obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o
+obj-y += mutex.o semaphore.o rwsem.o
 
 ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_lockdep.o = -pg
@@ -14,6 +14,7 @@ ifeq ($(CONFIG_PROC_FS),y)
 obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
 endif
 obj-$(CONFIG_SMP) += spinlock.o
+obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
 obj-$(CONFIG_SMP) += lglock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
index 4d60986fcbee74a4fde3906e0d87fc113c5e8172..d1fe2ba5bac958bc85da8e8868408d8c6c809dc3 100644 (file)
@@ -108,20 +108,4 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
        arch_mcs_spin_unlock_contended(&next->locked);
 }
 
-/*
- * Cancellable version of the MCS lock above.
- *
- * Intended for adaptive spinning of sleeping locks:
- * mutex_lock()/rwsem_down_{read,write}() etc.
- */
-
-struct optimistic_spin_node {
-       struct optimistic_spin_node *next, *prev;
-       int locked; /* 1 if lock acquired */
-       int cpu; /* encoded CPU # value */
-};
-
-extern bool osq_lock(struct optimistic_spin_queue *lock);
-extern void osq_unlock(struct optimistic_spin_queue *lock);
-
 #endif /* __LINUX_MCS_SPINLOCK_H */
index 454195194d4a133f2d600d0d3659b2ee5306b7ad..94674e5919cba54e339addf0c7c7cf2b90f75c27 100644 (file)
@@ -81,7 +81,7 @@ __visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
  * The mutex must later on be released by the same task that
  * acquired it. Recursive locking is not allowed. The task
  * may not exit without first unlocking the mutex. Also, kernel
- * memory where the mutex resides mutex must not be freed with
+ * memory where the mutex resides must not be freed with
  * the mutex still locked. The mutex must first be initialized
  * (or statically defined) before it can be locked. memset()-ing
  * the mutex to 0 is not allowed.
@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
 }
 
 /*
- * after acquiring lock with fastpath or when we lost out in contested
+ * After acquiring lock with fastpath or when we lost out in contested
  * slowpath, set ctx and wake up any waiters so they can recheck.
  *
  * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
@@ -191,19 +191,32 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
        spin_unlock_mutex(&lock->base.wait_lock, flags);
 }
 
-
-#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 /*
- * In order to avoid a stampede of mutex spinners from acquiring the mutex
- * more or less simultaneously, the spinners need to acquire a MCS lock
- * first before spinning on the owner field.
+ * After acquiring lock in the slowpath set ctx and wake up any
+ * waiters so they can recheck.
  *
+ * Callers must hold the mutex wait_lock.
  */
+static __always_inline void
+ww_mutex_set_context_slowpath(struct ww_mutex *lock,
+                             struct ww_acquire_ctx *ctx)
+{
+       struct mutex_waiter *cur;
 
-/*
- * Mutex spinning code migrated from kernel/sched/core.c
- */
+       ww_mutex_lock_acquired(lock, ctx);
+       lock->ctx = ctx;
+
+       /*
+        * Give any possible sleeping processes the chance to wake up,
+        * so they can recheck if they have to back off.
+        */
+       list_for_each_entry(cur, &lock->base.wait_list, list) {
+               debug_mutex_wake_waiter(&lock->base, cur);
+               wake_up_process(cur->task);
+       }
+}
 
+#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
 static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
 {
        if (lock->owner != owner)
@@ -307,6 +320,11 @@ static bool mutex_optimistic_spin(struct mutex *lock,
        if (!mutex_can_spin_on_owner(lock))
                goto done;
 
+       /*
+        * In order to avoid a stampede of mutex spinners trying to
+        * acquire the mutex all at once, the spinners need to take a
+        * MCS (queued) lock first before spinning on the owner field.
+        */
        if (!osq_lock(&lock->osq))
                goto done;
 
@@ -469,7 +487,7 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
 EXPORT_SYMBOL(ww_mutex_unlock);
 
 static inline int __sched
-__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
+__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
 {
        struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
        struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
@@ -557,7 +575,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                }
 
                if (use_ww_ctx && ww_ctx->acquired > 0) {
-                       ret = __mutex_lock_check_stamp(lock, ww_ctx);
+                       ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
                        if (ret)
                                goto err;
                }
@@ -569,6 +587,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                schedule_preempt_disabled();
                spin_lock_mutex(&lock->wait_lock, flags);
        }
+       __set_task_state(task, TASK_RUNNING);
+
        mutex_remove_waiter(lock, &waiter, current_thread_info());
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))
@@ -582,23 +602,7 @@ skip_wait:
 
        if (use_ww_ctx) {
                struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
-               struct mutex_waiter *cur;
-
-               /*
-                * This branch gets optimized out for the common case,
-                * and is only important for ww_mutex_lock.
-                */
-               ww_mutex_lock_acquired(ww, ww_ctx);
-               ww->ctx = ww_ctx;
-
-               /*
-                * Give any possible sleeping processes the chance to wake up,
-                * so they can recheck if they have to back off.
-                */
-               list_for_each_entry(cur, &lock->wait_list, list) {
-                       debug_mutex_wake_waiter(lock, cur);
-                       wake_up_process(cur->task);
-               }
+               ww_mutex_set_context_slowpath(ww, ww_ctx);
        }
 
        spin_unlock_mutex(&lock->wait_lock, flags);
similarity index 98%
rename from kernel/locking/mcs_spinlock.c
rename to kernel/locking/osq_lock.c
index 9887a905a7626278d8ccf6da8176c447e2aad988..c112d00341b05773934ecdb2977c0d2aca1d5c11 100644 (file)
@@ -1,8 +1,6 @@
 #include <linux/percpu.h>
 #include <linux/sched.h>
-#include "mcs_spinlock.h"
-
-#ifdef CONFIG_SMP
+#include <linux/osq_lock.h>
 
 /*
  * An MCS like lock especially tailored for optimistic spinning for sleeping
@@ -111,7 +109,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
         * cmpxchg in an attempt to undo our queueing.
         */
 
-       while (!smp_load_acquire(&node->locked)) {
+       while (!ACCESS_ONCE(node->locked)) {
                /*
                 * If we need to reschedule bail... so we can block.
                 */
@@ -203,6 +201,3 @@ void osq_unlock(struct optimistic_spin_queue *lock)
        if (next)
                ACCESS_ONCE(next->locked) = 1;
 }
-
-#endif
-
index 7c98873a30777f131541a36889631c1efea79320..3059bc2f022daa6e4d8d976c39a7d8a8f546d813 100644 (file)
@@ -1130,6 +1130,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
                set_current_state(state);
        }
 
+       __set_current_state(TASK_RUNNING);
        return ret;
 }
 
@@ -1188,10 +1189,9 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
        ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
 
        if (likely(!ret))
+               /* sleep on the mutex */
                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
-       set_current_state(TASK_RUNNING);
-
        if (unlikely(ret)) {
                remove_waiter(lock, &waiter);
                rt_mutex_handle_deadlock(ret, chwalk, &waiter);
@@ -1626,10 +1626,9 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
 
        set_current_state(TASK_INTERRUPTIBLE);
 
+       /* sleep on the mutex */
        ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
 
-       set_current_state(TASK_RUNNING);
-
        if (unlikely(ret))
                remove_waiter(lock, waiter);
 
index 2c93571162cb7573f17a1eb7a0a424e07b061a25..2555ae15ec14c78d6c8f5030fea52daa74b5a5c9 100644 (file)
@@ -154,7 +154,7 @@ void __sched __down_read(struct rw_semaphore *sem)
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
        }
 
-       tsk->state = TASK_RUNNING;
+       __set_task_state(tsk, TASK_RUNNING);
  out:
        ;
 }
index 7628c3fc37ca30902a6952fd86ffd4387e70eb57..2f7cc4076f50aa0c534c22e527ab3d1f11ce9a66 100644 (file)
@@ -242,8 +242,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
                schedule();
        }
 
-       tsk->state = TASK_RUNNING;
-
+       __set_task_state(tsk, TASK_RUNNING);
        return sem;
 }
 EXPORT_SYMBOL(rwsem_down_read_failed);
index 4803da6eab62f182354707c10f48be35a8b54fb5..ae9fc7cc360ebea6088db4ae0206e452d856214b 100644 (file)
@@ -402,6 +402,7 @@ int raw_notifier_call_chain(struct raw_notifier_head *nh,
 }
 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
 
+#ifdef CONFIG_SRCU
 /*
  *     SRCU notifier chain routines.    Registration and unregistration
  *     use a mutex, and call_chain is synchronized by SRCU (no locks).
@@ -528,6 +529,8 @@ void srcu_init_notifier_head(struct srcu_notifier_head *nh)
 }
 EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
 
+#endif /* CONFIG_SRCU */
+
 static ATOMIC_NOTIFIER_HEAD(die_chain);
 
 int notrace notify_die(enum die_val val, const char *str,
index 48b28d387c7f77b2e3d36bc751b3e221c7634d67..7e01f78f041778abe405c9115c15e10a77f64d03 100644 (file)
@@ -251,6 +251,7 @@ config APM_EMULATION
 
 config PM_OPP
        bool
+       select SRCU
        ---help---
          SOCs have a standard set of tuples consisting of frequency and
          voltage pairs that the device will support per voltage domain. This
index e6fae503d1bc54519a45ba838fe3d8622e4333f4..50a808424b06af45fdd0fd6ab3ae732e1917680d 100644 (file)
@@ -1,4 +1,5 @@
-obj-y += update.o srcu.o
+obj-y += update.o
+obj-$(CONFIG_SRCU) += srcu.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
 obj-$(CONFIG_TREE_RCU) += tree.o
 obj-$(CONFIG_PREEMPT_RCU) += tree.o
index 07bb02eda844bf16eee540ba01a99beb7c52ac60..80adef7d4c3d01d9ef9ed95c483956d2a858854f 100644 (file)
@@ -137,4 +137,10 @@ int rcu_jiffies_till_stall_check(void);
 
 void rcu_early_boot_tests(void);
 
+/*
+ * This function really isn't for public consumption, but RCU is special in
+ * that context switches can allow the state machine to make progress.
+ */
+extern void resched_cpu(int cpu);
+
 #endif /* __LINUX_RCU_H */
index 4d559baf06e0c7171a7a86acbeed9926fc8c9d7d..30d42aa55d83de9b111bd05945114641409f3596 100644 (file)
@@ -244,7 +244,8 @@ struct rcu_torture_ops {
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *rrsp);
        void (*readunlock)(int idx);
-       int (*completed)(void);
+       unsigned long (*started)(void);
+       unsigned long (*completed)(void);
        void (*deferred_free)(struct rcu_torture *p);
        void (*sync)(void);
        void (*exp_sync)(void);
@@ -296,11 +297,6 @@ static void rcu_torture_read_unlock(int idx) __releases(RCU)
        rcu_read_unlock();
 }
 
-static int rcu_torture_completed(void)
-{
-       return rcu_batches_completed();
-}
-
 /*
  * Update callback in the pipe.  This should be invoked after a grace period.
  */
@@ -356,7 +352,7 @@ rcu_torture_cb(struct rcu_head *p)
                cur_ops->deferred_free(rp);
 }
 
-static int rcu_no_completed(void)
+static unsigned long rcu_no_completed(void)
 {
        return 0;
 }
@@ -377,7 +373,8 @@ static struct rcu_torture_ops rcu_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,
        .readunlock     = rcu_torture_read_unlock,
-       .completed      = rcu_torture_completed,
+       .started        = rcu_batches_started,
+       .completed      = rcu_batches_completed,
        .deferred_free  = rcu_torture_deferred_free,
        .sync           = synchronize_rcu,
        .exp_sync       = synchronize_rcu_expedited,
@@ -407,11 +404,6 @@ static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
        rcu_read_unlock_bh();
 }
 
-static int rcu_bh_torture_completed(void)
-{
-       return rcu_batches_completed_bh();
-}
-
 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 {
        call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
@@ -423,7 +415,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .readlock       = rcu_bh_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_bh_torture_read_unlock,
-       .completed      = rcu_bh_torture_completed,
+       .started        = rcu_batches_started_bh,
+       .completed      = rcu_batches_completed_bh,
        .deferred_free  = rcu_bh_torture_deferred_free,
        .sync           = synchronize_rcu_bh,
        .exp_sync       = synchronize_rcu_bh_expedited,
@@ -466,6 +459,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_torture_read_unlock,
+       .started        = rcu_no_completed,
        .completed      = rcu_no_completed,
        .deferred_free  = rcu_busted_torture_deferred_free,
        .sync           = synchronize_rcu_busted,
@@ -510,7 +504,7 @@ static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
        srcu_read_unlock(&srcu_ctl, idx);
 }
 
-static int srcu_torture_completed(void)
+static unsigned long srcu_torture_completed(void)
 {
        return srcu_batches_completed(&srcu_ctl);
 }
@@ -564,6 +558,7 @@ static struct rcu_torture_ops srcu_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
+       .started        = NULL,
        .completed      = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
@@ -600,7 +595,8 @@ static struct rcu_torture_ops sched_ops = {
        .readlock       = sched_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = sched_torture_read_unlock,
-       .completed      = rcu_no_completed,
+       .started        = rcu_batches_started_sched,
+       .completed      = rcu_batches_completed_sched,
        .deferred_free  = rcu_sched_torture_deferred_free,
        .sync           = synchronize_sched,
        .exp_sync       = synchronize_sched_expedited,
@@ -638,6 +634,7 @@ static struct rcu_torture_ops tasks_ops = {
        .readlock       = tasks_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = tasks_torture_read_unlock,
+       .started        = rcu_no_completed,
        .completed      = rcu_no_completed,
        .deferred_free  = rcu_tasks_torture_deferred_free,
        .sync           = synchronize_rcu_tasks,
@@ -1015,8 +1012,8 @@ static void rcutorture_trace_dump(void)
 static void rcu_torture_timer(unsigned long unused)
 {
        int idx;
-       int completed;
-       int completed_end;
+       unsigned long started;
+       unsigned long completed;
        static DEFINE_TORTURE_RANDOM(rand);
        static DEFINE_SPINLOCK(rand_lock);
        struct rcu_torture *p;
@@ -1024,7 +1021,10 @@ static void rcu_torture_timer(unsigned long unused)
        unsigned long long ts;
 
        idx = cur_ops->readlock();
-       completed = cur_ops->completed();
+       if (cur_ops->started)
+               started = cur_ops->started();
+       else
+               started = cur_ops->completed();
        ts = rcu_trace_clock_local();
        p = rcu_dereference_check(rcu_torture_current,
                                  rcu_read_lock_bh_held() ||
@@ -1047,14 +1047,16 @@ static void rcu_torture_timer(unsigned long unused)
                /* Should not happen, but... */
                pipe_count = RCU_TORTURE_PIPE_LEN;
        }
-       completed_end = cur_ops->completed();
+       completed = cur_ops->completed();
        if (pipe_count > 1) {
                do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
-                                         completed, completed_end);
+                                         started, completed);
                rcutorture_trace_dump();
        }
        __this_cpu_inc(rcu_torture_count[pipe_count]);
-       completed = completed_end - completed;
+       completed = completed - started;
+       if (cur_ops->started)
+               completed++;
        if (completed > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                completed = RCU_TORTURE_PIPE_LEN;
@@ -1073,8 +1075,8 @@ static void rcu_torture_timer(unsigned long unused)
 static int
 rcu_torture_reader(void *arg)
 {
-       int completed;
-       int completed_end;
+       unsigned long started;
+       unsigned long completed;
        int idx;
        DEFINE_TORTURE_RANDOM(rand);
        struct rcu_torture *p;
@@ -1093,7 +1095,10 @@ rcu_torture_reader(void *arg)
                                mod_timer(&t, jiffies + 1);
                }
                idx = cur_ops->readlock();
-               completed = cur_ops->completed();
+               if (cur_ops->started)
+                       started = cur_ops->started();
+               else
+                       started = cur_ops->completed();
                ts = rcu_trace_clock_local();
                p = rcu_dereference_check(rcu_torture_current,
                                          rcu_read_lock_bh_held() ||
@@ -1114,14 +1119,16 @@ rcu_torture_reader(void *arg)
                        /* Should not happen, but... */
                        pipe_count = RCU_TORTURE_PIPE_LEN;
                }
-               completed_end = cur_ops->completed();
+               completed = cur_ops->completed();
                if (pipe_count > 1) {
                        do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
-                                                 ts, completed, completed_end);
+                                                 ts, started, completed);
                        rcutorture_trace_dump();
                }
                __this_cpu_inc(rcu_torture_count[pipe_count]);
-               completed = completed_end - completed;
+               completed = completed - started;
+               if (cur_ops->started)
+                       completed++;
                if (completed > RCU_TORTURE_PIPE_LEN) {
                        /* Should not happen, but... */
                        completed = RCU_TORTURE_PIPE_LEN;
@@ -1420,6 +1427,9 @@ static int rcu_torture_barrier(void *arg)
                cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
                if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
                        n_rcu_torture_barrier_error++;
+                       pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
+                              atomic_read(&barrier_cbs_invoked),
+                              n_barrier_cbs);
                        WARN_ON_ONCE(1);
                }
                n_barrier_successes++;
index e037f3eb2f7bf2f44a6de08cdcfe89b05d4d5bca..445bf8ffe3fb27dfc58aa411b476ef105bc33645 100644 (file)
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(srcu_barrier);
  * Report the number of batches, correlated with, but not necessarily
  * precisely the same as, the number of grace periods that have elapsed.
  */
-long srcu_batches_completed(struct srcu_struct *sp)
+unsigned long srcu_batches_completed(struct srcu_struct *sp)
 {
        return sp->completed;
 }
index 0db5649f88179958d7ab26f982036a70c388a817..cc9ceca7bde1fd5f0036ad841033071e5fec0499 100644 (file)
@@ -47,54 +47,14 @@ static void __call_rcu(struct rcu_head *head,
                       void (*func)(struct rcu_head *rcu),
                       struct rcu_ctrlblk *rcp);
 
-static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-
 #include "tiny_plugin.h"
 
-/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
-static void rcu_idle_enter_common(long long newval)
-{
-       if (newval) {
-               RCU_TRACE(trace_rcu_dyntick(TPS("--="),
-                                           rcu_dynticks_nesting, newval));
-               rcu_dynticks_nesting = newval;
-               return;
-       }
-       RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
-                                   rcu_dynticks_nesting, newval));
-       if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
-               struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
-
-               RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
-                                           rcu_dynticks_nesting, newval));
-               ftrace_dump(DUMP_ALL);
-               WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
-                         current->pid, current->comm,
-                         idle->pid, idle->comm); /* must be idle task! */
-       }
-       rcu_sched_qs(); /* implies rcu_bh_inc() */
-       barrier();
-       rcu_dynticks_nesting = newval;
-}
-
 /*
  * Enter idle, which is an extended quiescent state if we have fully
- * entered that mode (i.e., if the new value of dynticks_nesting is zero).
+ * entered that mode.
  */
 void rcu_idle_enter(void)
 {
-       unsigned long flags;
-       long long newval;
-
-       local_irq_save(flags);
-       WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
-       if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
-           DYNTICK_TASK_NEST_VALUE)
-               newval = 0;
-       else
-               newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
-       rcu_idle_enter_common(newval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_enter);
 
@@ -103,55 +63,14 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter);
  */
 void rcu_irq_exit(void)
 {
-       unsigned long flags;
-       long long newval;
-
-       local_irq_save(flags);
-       newval = rcu_dynticks_nesting - 1;
-       WARN_ON_ONCE(newval < 0);
-       rcu_idle_enter_common(newval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_irq_exit);
 
-/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
-static void rcu_idle_exit_common(long long oldval)
-{
-       if (oldval) {
-               RCU_TRACE(trace_rcu_dyntick(TPS("++="),
-                                           oldval, rcu_dynticks_nesting));
-               return;
-       }
-       RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
-       if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
-               struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
-
-               RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
-                         oldval, rcu_dynticks_nesting));
-               ftrace_dump(DUMP_ALL);
-               WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
-                         current->pid, current->comm,
-                         idle->pid, idle->comm); /* must be idle task! */
-       }
-}
-
 /*
  * Exit idle, so that we are no longer in an extended quiescent state.
  */
 void rcu_idle_exit(void)
 {
-       unsigned long flags;
-       long long oldval;
-
-       local_irq_save(flags);
-       oldval = rcu_dynticks_nesting;
-       WARN_ON_ONCE(rcu_dynticks_nesting < 0);
-       if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
-               rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
-       else
-               rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
-       rcu_idle_exit_common(oldval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_idle_exit);
 
@@ -160,15 +79,6 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
  */
 void rcu_irq_enter(void)
 {
-       unsigned long flags;
-       long long oldval;
-
-       local_irq_save(flags);
-       oldval = rcu_dynticks_nesting;
-       rcu_dynticks_nesting++;
-       WARN_ON_ONCE(rcu_dynticks_nesting == 0);
-       rcu_idle_exit_common(oldval);
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(rcu_irq_enter);
 
@@ -179,22 +89,12 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
  */
 bool notrace __rcu_is_watching(void)
 {
-       return rcu_dynticks_nesting;
+       return true;
 }
 EXPORT_SYMBOL(__rcu_is_watching);
 
 #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 
-/*
- * Test whether the current CPU was interrupted from idle.  Nested
- * interrupts don't count, we must be running at the first interrupt
- * level.
- */
-static int rcu_is_cpu_rrupt_from_idle(void)
-{
-       return rcu_dynticks_nesting <= 1;
-}
-
 /*
  * Helper function for rcu_sched_qs() and rcu_bh_qs().
  * Also irqs are disabled to avoid confusion due to interrupt handlers
@@ -250,7 +150,7 @@ void rcu_bh_qs(void)
 void rcu_check_callbacks(int user)
 {
        RCU_TRACE(check_cpu_stalls());
-       if (user || rcu_is_cpu_rrupt_from_idle())
+       if (user)
                rcu_sched_qs();
        else if (!in_softirq())
                rcu_bh_qs();
@@ -357,6 +257,11 @@ static void __call_rcu(struct rcu_head *head,
        rcp->curtail = &head->next;
        RCU_TRACE(rcp->qlen++);
        local_irq_restore(flags);
+
+       if (unlikely(is_idle_task(current))) {
+               /* force scheduling for rcu_sched_qs() */
+               resched_cpu(0);
+       }
 }
 
 /*
@@ -383,6 +288,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
 void __init rcu_init(void)
 {
        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+       RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
+       RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
 
        rcu_early_boot_tests();
 }
index 858c5656912724c64e1ea20d5497edc9e8946b7d..f94e209a10d615a5a4f7d379e623918f533ce814 100644 (file)
@@ -145,17 +145,16 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
        rcp->ticks_this_gp++;
        j = jiffies;
        js = ACCESS_ONCE(rcp->jiffies_stall);
-       if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
+       if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
                pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
-                      rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
+                      rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
                       jiffies - rcp->gp_start, rcp->qlen);
                dump_stack();
-       }
-       if (*rcp->curtail && ULONG_CMP_GE(j, js))
                ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
                        3 * rcu_jiffies_till_stall_check() + 3;
-       else if (ULONG_CMP_GE(j, js))
+       } else if (ULONG_CMP_GE(j, js)) {
                ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
+       }
 }
 
 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
index 7680fc2750361a91fc8f749a006555899aa92677..48d640ca1a05b8c0f83fe2b217b925a6dec69fa4 100644 (file)
@@ -156,6 +156,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 
+/* rcuc/rcub kthread realtime priority */
+static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
+module_param(kthread_prio, int, 0644);
+
 /*
  * Track the rcutorture test sequence number and the update version
  * number within a given test.  The rcutorture_testseq is incremented
@@ -215,6 +219,9 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 };
 
+DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
+EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
+
 /*
  * Let the RCU core know that this CPU has gone through the scheduler,
  * which is a quiescent state.  This is called when the need for a
@@ -284,6 +291,22 @@ void rcu_note_context_switch(void)
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
+/*
+ * Register a quiesecent state for all RCU flavors.  If there is an
+ * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
+ * dyntick-idle quiescent state visible to other CPUs (but only for those
+ * RCU flavors in desparate need of a quiescent state, which will normally
+ * be none of them).  Either way, do a lightweight quiescent state for
+ * all RCU flavors.
+ */
+void rcu_all_qs(void)
+{
+       if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+               rcu_momentary_dyntick_idle();
+       this_cpu_inc(rcu_qs_ctr);
+}
+EXPORT_SYMBOL_GPL(rcu_all_qs);
+
 static long blimit = 10;       /* Maximum callbacks per rcu_do_batch. */
 static long qhimark = 10000;   /* If this many pending, ignore blimit. */
 static long qlowmark = 100;    /* Once only this many pending, use blimit. */
@@ -315,18 +338,54 @@ static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(void);
 
 /*
- * Return the number of RCU-sched batches processed thus far for debug & stats.
+ * Return the number of RCU batches started thus far for debug & stats.
+ */
+unsigned long rcu_batches_started(void)
+{
+       return rcu_state_p->gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started);
+
+/*
+ * Return the number of RCU-sched batches started thus far for debug & stats.
+ */
+unsigned long rcu_batches_started_sched(void)
+{
+       return rcu_sched_state.gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+
+/*
+ * Return the number of RCU BH batches started thus far for debug & stats.
  */
-long rcu_batches_completed_sched(void)
+unsigned long rcu_batches_started_bh(void)
+{
+       return rcu_bh_state.gpnum;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
+
+/*
+ * Return the number of RCU batches completed thus far for debug & stats.
+ */
+unsigned long rcu_batches_completed(void)
+{
+       return rcu_state_p->completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU-sched batches completed thus far for debug & stats.
+ */
+unsigned long rcu_batches_completed_sched(void)
 {
        return rcu_sched_state.completed;
 }
 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 
 /*
- * Return the number of RCU BH batches processed thus far for debug & stats.
+ * Return the number of RCU BH batches completed thus far for debug & stats.
  */
-long rcu_batches_completed_bh(void)
+unsigned long rcu_batches_completed_bh(void)
 {
        return rcu_bh_state.completed;
 }
@@ -759,39 +818,71 @@ void rcu_irq_enter(void)
 /**
  * rcu_nmi_enter - inform RCU of entry to NMI context
  *
- * If the CPU was idle with dynamic ticks active, and there is no
- * irq handler running, this updates rdtp->dynticks_nmi to let the
- * RCU grace-period handling know that the CPU is active.
+ * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
+ * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
+ * that the CPU is active.  This implementation permits nested NMIs, as
+ * long as the nesting level does not overflow an int.  (You will probably
+ * run out of stack space first.)
  */
 void rcu_nmi_enter(void)
 {
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int incby = 2;
 
-       if (rdtp->dynticks_nmi_nesting == 0 &&
-           (atomic_read(&rdtp->dynticks) & 0x1))
-               return;
-       rdtp->dynticks_nmi_nesting++;
-       smp_mb__before_atomic();  /* Force delay from prior write. */
-       atomic_inc(&rdtp->dynticks);
-       /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
-       smp_mb__after_atomic();  /* See above. */
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       /* Complain about underflow. */
+       WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
+
+       /*
+        * If idle from RCU viewpoint, atomically increment ->dynticks
+        * to mark non-idle and increment ->dynticks_nmi_nesting by one.
+        * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
+        * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
+        * to be in the outermost NMI handler that interrupted an RCU-idle
+        * period (observation due to Andy Lutomirski).
+        */
+       if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
+               smp_mb__before_atomic();  /* Force delay from prior write. */
+               atomic_inc(&rdtp->dynticks);
+               /* atomic_inc() before later RCU read-side crit sects */
+               smp_mb__after_atomic();  /* See above. */
+               WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+               incby = 1;
+       }
+       rdtp->dynticks_nmi_nesting += incby;
+       barrier();
 }
 
 /**
  * rcu_nmi_exit - inform RCU of exit from NMI context
  *
- * If the CPU was idle with dynamic ticks active, and there is no
- * irq handler running, this updates rdtp->dynticks_nmi to let the
- * RCU grace-period handling know that the CPU is no longer active.
+ * If we are returning from the outermost NMI handler that interrupted an
+ * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
+ * to let the RCU grace-period handling know that the CPU is back to
+ * being RCU-idle.
  */
 void rcu_nmi_exit(void)
 {
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
-       if (rdtp->dynticks_nmi_nesting == 0 ||
-           --rdtp->dynticks_nmi_nesting != 0)
+       /*
+        * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
+        * (We are exiting an NMI handler, so RCU better be paying attention
+        * to us!)
+        */
+       WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
+       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+
+       /*
+        * If the nesting level is not 1, the CPU wasn't RCU-idle, so
+        * leave it in non-RCU-idle state.
+        */
+       if (rdtp->dynticks_nmi_nesting != 1) {
+               rdtp->dynticks_nmi_nesting -= 2;
                return;
+       }
+
+       /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
+       rdtp->dynticks_nmi_nesting = 0;
        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
        smp_mb__before_atomic();  /* See above. */
        atomic_inc(&rdtp->dynticks);
@@ -898,16 +989,13 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
                return 1;
        } else {
+               if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
+                                rdp->mynode->gpnum))
+                       ACCESS_ONCE(rdp->gpwrap) = true;
                return 0;
        }
 }
 
-/*
- * This function really isn't for public consumption, but RCU is special in
- * that context switches can allow the state machine to make progress.
- */
-extern void resched_cpu(int cpu);
-
 /*
  * Return true if the specified CPU has passed through a quiescent
  * state by virtue of being in or having passed through an dynticks
@@ -1011,6 +1099,22 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        j1 = rcu_jiffies_till_stall_check();
        ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
        rsp->jiffies_resched = j + j1 / 2;
+       rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
+}
+
+/*
+ * Complain about starvation of grace-period kthread.
+ */
+static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
+{
+       unsigned long gpa;
+       unsigned long j;
+
+       j = jiffies;
+       gpa = ACCESS_ONCE(rsp->gp_activity);
+       if (j - gpa > 2 * HZ)
+               pr_err("%s kthread starved for %ld jiffies!\n",
+                      rsp->name, j - gpa);
 }
 
 /*
@@ -1033,11 +1137,13 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
        }
 }
 
-static void print_other_cpu_stall(struct rcu_state *rsp)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 {
        int cpu;
        long delta;
        unsigned long flags;
+       unsigned long gpa;
+       unsigned long j;
        int ndetected = 0;
        struct rcu_node *rnp = rcu_get_root(rsp);
        long totqlen = 0;
@@ -1075,30 +1181,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
 
-       /*
-        * Now rat on any tasks that got kicked up to the root rcu_node
-        * due to CPU offlining.
-        */
-       rnp = rcu_get_root(rsp);
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       ndetected += rcu_print_task_stall(rnp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-
        print_cpu_stall_info_end();
        for_each_possible_cpu(cpu)
                totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
        pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start),
               (long)rsp->gpnum, (long)rsp->completed, totqlen);
-       if (ndetected == 0)
-               pr_err("INFO: Stall ended before state dump start\n");
-       else
+       if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
+       } else {
+               if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
+                   ACCESS_ONCE(rsp->completed) == gpnum) {
+                       pr_err("INFO: Stall ended before state dump start\n");
+               } else {
+                       j = jiffies;
+                       gpa = ACCESS_ONCE(rsp->gp_activity);
+                       pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld\n",
+                              rsp->name, j - gpa, j, gpa,
+                              jiffies_till_next_fqs);
+                       /* In this case, the current CPU might be at fault. */
+                       sched_show_task(current);
+               }
+       }
 
        /* Complain about tasks blocking the grace period. */
-
        rcu_print_detail_task_stall(rsp);
 
+       rcu_check_gp_kthread_starvation(rsp);
+
        force_quiescent_state(rsp);  /* Kick them all. */
 }
 
@@ -1123,6 +1233,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
        pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
                jiffies - rsp->gp_start,
                (long)rsp->gpnum, (long)rsp->completed, totqlen);
+
+       rcu_check_gp_kthread_starvation(rsp);
+
        rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1193,7 +1306,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
                   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
 
                /* They had a few time units to dump stack, so complain. */
-               print_other_cpu_stall(rsp);
+               print_other_cpu_stall(rsp, gpnum);
        }
 }
 
@@ -1530,7 +1643,8 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
        bool ret;
 
        /* Handle the ends of any preceding grace periods first. */
-       if (rdp->completed == rnp->completed) {
+       if (rdp->completed == rnp->completed &&
+           !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
 
                /* No grace period end, so just accelerate recent callbacks. */
                ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1545,7 +1659,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
        }
 
-       if (rdp->gpnum != rnp->gpnum) {
+       if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
                /*
                 * If the current grace period is waiting for this CPU,
                 * set up to detect a quiescent state, otherwise don't
@@ -1554,8 +1668,10 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                rdp->gpnum = rnp->gpnum;
                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
                rdp->passed_quiesce = 0;
+               rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
                zero_cpu_stall_ticks(rdp);
+               ACCESS_ONCE(rdp->gpwrap) = false;
        }
        return ret;
 }
@@ -1569,7 +1685,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
        local_irq_save(flags);
        rnp = rdp->mynode;
        if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
-            rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
+            rdp->completed == ACCESS_ONCE(rnp->completed) &&
+            !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
            !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
                local_irq_restore(flags);
                return;
@@ -1589,6 +1706,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       ACCESS_ONCE(rsp->gp_activity) = jiffies;
        rcu_bind_gp_kthread();
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
@@ -1649,6 +1767,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
+               ACCESS_ONCE(rsp->gp_activity) = jiffies;
        }
 
        mutex_unlock(&rsp->onoff_mutex);
@@ -1665,6 +1784,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
        unsigned long maxj;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       ACCESS_ONCE(rsp->gp_activity) = jiffies;
        rsp->n_force_qs++;
        if (fqs_state == RCU_SAVE_DYNTICK) {
                /* Collect dyntick-idle snapshots. */
@@ -1703,6 +1823,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
+       ACCESS_ONCE(rsp->gp_activity) = jiffies;
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
        gp_duration = jiffies - rsp->gp_start;
@@ -1739,6 +1860,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                nocb += rcu_future_gp_cleanup(rsp, rnp);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
+               ACCESS_ONCE(rsp->gp_activity) = jiffies;
        }
        rnp = rcu_get_root(rsp);
        raw_spin_lock_irq(&rnp->lock);
@@ -1788,6 +1910,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        if (rcu_gp_init(rsp))
                                break;
                        cond_resched_rcu_qs();
+                       ACCESS_ONCE(rsp->gp_activity) = jiffies;
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
                                               ACCESS_ONCE(rsp->gpnum),
@@ -1831,9 +1954,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                                       ACCESS_ONCE(rsp->gpnum),
                                                       TPS("fqsend"));
                                cond_resched_rcu_qs();
+                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_rcu_qs();
+                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
                                                       ACCESS_ONCE(rsp->gpnum),
@@ -2010,8 +2135,10 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
        rnp = rdp->mynode;
        raw_spin_lock_irqsave(&rnp->lock, flags);
        smp_mb__after_unlock_lock();
-       if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
-           rnp->completed == rnp->gpnum) {
+       if ((rdp->passed_quiesce == 0 &&
+            rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
+           rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
+           rdp->gpwrap) {
 
                /*
                 * The grace period in which this quiescent state was
@@ -2020,6 +2147,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 * within the current grace period.
                 */
                rdp->passed_quiesce = 0;        /* need qs for new gp. */
+               rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
@@ -2064,7 +2192,8 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
         * Was there a quiescent state since the beginning of the grace
         * period? If no, then exit and wait for the next call.
         */
-       if (!rdp->passed_quiesce)
+       if (!rdp->passed_quiesce &&
+           rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
                return;
 
        /*
@@ -2194,6 +2323,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
                               TPS("cpuofl"));
 }
 
+/*
+ * All CPUs for the specified rcu_node structure have gone offline,
+ * and all tasks that were preempted within an RCU read-side critical
+ * section while running on one of those CPUs have since exited their RCU
+ * read-side critical section.  Some other CPU is reporting this fact with
+ * the specified rcu_node structure's ->lock held and interrupts disabled.
+ * This function therefore goes up the tree of rcu_node structures,
+ * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
+ * the leaf rcu_node structure's ->qsmaskinit field has already been
+ * updated
+ *
+ * This function does check that the specified rcu_node structure has
+ * all CPUs offline and no blocked tasks, so it is OK to invoke it
+ * prematurely.  That said, invoking it after the fact will cost you
+ * a needless lock acquisition.  So once it has done its work, don't
+ * invoke it again.
+ */
+static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
+{
+       long mask;
+       struct rcu_node *rnp = rnp_leaf;
+
+       if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+               return;
+       for (;;) {
+               mask = rnp->grpmask;
+               rnp = rnp->parent;
+               if (!rnp)
+                       break;
+               raw_spin_lock(&rnp->lock); /* irqs already disabled. */
+               smp_mb__after_unlock_lock(); /* GP memory ordering. */
+               rnp->qsmaskinit &= ~mask;
+               if (rnp->qsmaskinit) {
+                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+                       return;
+               }
+               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+       }
+}
+
 /*
  * The CPU has been completely removed, and some other CPU is reporting
  * this fact from process context.  Do the remainder of the cleanup,
@@ -2204,8 +2373,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 {
        unsigned long flags;
-       unsigned long mask;
-       int need_report = 0;
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
@@ -2219,40 +2386,15 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
        /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
        rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
        rcu_adopt_orphan_cbs(rsp, flags);
+       raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
 
-       /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
-       mask = rdp->grpmask;    /* rnp->grplo is constant. */
-       do {
-               raw_spin_lock(&rnp->lock);      /* irqs already disabled. */
-               smp_mb__after_unlock_lock();
-               rnp->qsmaskinit &= ~mask;
-               if (rnp->qsmaskinit != 0) {
-                       if (rnp != rdp->mynode)
-                               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-                       break;
-               }
-               if (rnp == rdp->mynode)
-                       need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
-               else
-                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
-               mask = rnp->grpmask;
-               rnp = rnp->parent;
-       } while (rnp != NULL);
-
-       /*
-        * We still hold the leaf rcu_node structure lock here, and
-        * irqs are still disabled.  The reason for this subterfuge is
-        * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
-        * held leads to deadlock.
-        */
-       raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
-       rnp = rdp->mynode;
-       if (need_report & RCU_OFL_TASKS_NORM_GP)
-               rcu_report_unblock_qs_rnp(rnp, flags);
-       else
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       if (need_report & RCU_OFL_TASKS_EXP_GP)
-               rcu_report_exp_rnp(rsp, rnp, true);
+       /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
+       raw_spin_lock_irqsave(&rnp->lock, flags);
+       smp_mb__after_unlock_lock();    /* Enforce GP memory-order guarantee. */
+       rnp->qsmaskinit &= ~rdp->grpmask;
+       if (rnp->qsmaskinit == 0 && !rcu_preempt_has_tasks(rnp))
+               rcu_cleanup_dead_rnp(rnp);
+       rcu_report_qs_rnp(rdp->grpmask, rsp, rnp, flags); /* Rlses rnp->lock. */
        WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
                  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
                  cpu, rdp->qlen, rdp->nxtlist);
@@ -2268,6 +2410,10 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 {
 }
 
+static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
+{
+}
+
 static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
 {
 }
@@ -2464,12 +2610,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
                }
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
-       rnp = rcu_get_root(rsp);
-       if (rnp->qsmask == 0) {
-               raw_spin_lock_irqsave(&rnp->lock, flags);
-               smp_mb__after_unlock_lock();
-               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
-       }
 }
 
 /*
@@ -2569,7 +2709,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  * Schedule RCU callback invocation.  If the specified type of RCU
  * does not support RCU priority boosting, just do a direct call,
  * otherwise wake up the per-CPU kernel kthread.  Note that because we
- * are running on the current CPU with interrupts disabled, the
+ * are running on the current CPU with softirqs disabled, the
  * rcu_cpu_kthread_task cannot disappear out from under us.
  */
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -3109,9 +3249,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
 
        /* Is the RCU core waiting for a quiescent state from this CPU? */
        if (rcu_scheduler_fully_active &&
-           rdp->qs_pending && !rdp->passed_quiesce) {
+           rdp->qs_pending && !rdp->passed_quiesce &&
+           rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
                rdp->n_rp_qs_pending++;
-       } else if (rdp->qs_pending && rdp->passed_quiesce) {
+       } else if (rdp->qs_pending &&
+                  (rdp->passed_quiesce ||
+                   rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
                rdp->n_rp_report_qs++;
                return 1;
        }
@@ -3135,7 +3278,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* Has a new RCU grace period started? */
-       if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
+       if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
+           unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
                rdp->n_rp_gp_started++;
                return 1;
        }
@@ -3318,6 +3462,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
                        } else {
                                _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
                                                   rsp->n_barrier_done);
+                               smp_mb__before_atomic();
                                atomic_inc(&rsp->barrier_cpu_count);
                                __call_rcu(&rdp->barrier_head,
                                           rcu_barrier_callback, rsp, cpu, 0);
@@ -3385,9 +3530,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        /* Set up local state, ensuring consistent view of global state. */
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
-       init_callback_list(rdp);
-       rdp->qlen_lazy = 0;
-       ACCESS_ONCE(rdp->qlen) = 0;
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -3444,6 +3586,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
                        rdp->gpnum = rnp->completed;
                        rdp->completed = rnp->completed;
                        rdp->passed_quiesce = 0;
+                       rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                        rdp->qs_pending = 0;
                        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
                }
@@ -3535,17 +3678,35 @@ static int rcu_pm_notify(struct notifier_block *self,
 static int __init rcu_spawn_gp_kthread(void)
 {
        unsigned long flags;
+       int kthread_prio_in = kthread_prio;
        struct rcu_node *rnp;
        struct rcu_state *rsp;
+       struct sched_param sp;
        struct task_struct *t;
 
+       /* Force priority into range. */
+       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+               kthread_prio = 1;
+       else if (kthread_prio < 0)
+               kthread_prio = 0;
+       else if (kthread_prio > 99)
+               kthread_prio = 99;
+       if (kthread_prio != kthread_prio_in)
+               pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
+                        kthread_prio, kthread_prio_in);
+
        rcu_scheduler_fully_active = 1;
        for_each_rcu_flavor(rsp) {
-               t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
+               t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
                BUG_ON(IS_ERR(t));
                rnp = rcu_get_root(rsp);
                raw_spin_lock_irqsave(&rnp->lock, flags);
                rsp->gp_kthread = t;
+               if (kthread_prio) {
+                       sp.sched_priority = kthread_prio;
+                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+               }
+               wake_up_process(t);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        }
        rcu_spawn_nocb_kthreads();
index 8e7b1843896ebcc0fe13ed51da7cea68f14f0a36..119de399eb2f7e532f607e85f2d31c1b9e324541 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/threads.h>
 #include <linux/cpumask.h>
 #include <linux/seqlock.h>
-#include <linux/irq_work.h>
 
 /*
  * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@@ -172,11 +171,6 @@ struct rcu_node {
                                /*  queued on this rcu_node structure that */
                                /*  are blocking the current grace period, */
                                /*  there can be no such task. */
-       struct completion boost_completion;
-                               /* Used to ensure that the rt_mutex used */
-                               /*  to carry out the boosting is fully */
-                               /*  released with no future boostee accesses */
-                               /*  before that rt_mutex is re-initialized. */
        struct rt_mutex boost_mtx;
                                /* Used only for the priority-boosting */
                                /*  side effect, not as a lock. */
@@ -257,9 +251,12 @@ struct rcu_data {
                                        /*  in order to detect GP end. */
        unsigned long   gpnum;          /* Highest gp number that this CPU */
                                        /*  is aware of having started. */
+       unsigned long   rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
+                                       /*  for rcu_all_qs() invocations. */
        bool            passed_quiesce; /* User-mode/idle loop etc. */
        bool            qs_pending;     /* Core waits for quiesc state. */
        bool            beenonline;     /* CPU online at least once. */
+       bool            gpwrap;         /* Possible gpnum/completed wrap. */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
        unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
 #ifdef CONFIG_RCU_CPU_STALL_INFO
@@ -340,14 +337,10 @@ struct rcu_data {
 #ifdef CONFIG_RCU_NOCB_CPU
        struct rcu_head *nocb_head;     /* CBs waiting for kthread. */
        struct rcu_head **nocb_tail;
-       atomic_long_t nocb_q_count;     /* # CBs waiting for kthread */
-       atomic_long_t nocb_q_count_lazy; /*  (approximate). */
+       atomic_long_t nocb_q_count;     /* # CBs waiting for nocb */
+       atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */
        struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
        struct rcu_head **nocb_follower_tail;
-       atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
-       atomic_long_t nocb_follower_count_lazy; /*  (approximate). */
-       int nocb_p_count;               /* # CBs being invoked by kthread */
-       int nocb_p_count_lazy;          /*  (approximate). */
        wait_queue_head_t nocb_wq;      /* For nocb kthreads to sleep on. */
        struct task_struct *nocb_kthread;
        int nocb_defer_wakeup;          /* Defer wakeup of nocb_kthread. */
@@ -356,8 +349,6 @@ struct rcu_data {
        struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
                                        /* CBs waiting for GP. */
        struct rcu_head **nocb_gp_tail;
-       long nocb_gp_count;
-       long nocb_gp_count_lazy;
        bool nocb_leader_sleep;         /* Is the nocb leader thread asleep? */
        struct rcu_data *nocb_next_follower;
                                        /* Next follower in wakeup chain. */
@@ -488,10 +479,14 @@ struct rcu_state {
                                                /*  due to no GP active. */
        unsigned long gp_start;                 /* Time at which GP started, */
                                                /*  but in jiffies. */
+       unsigned long gp_activity;              /* Time of last GP kthread */
+                                               /*  activity in jiffies. */
        unsigned long jiffies_stall;            /* Time at which to check */
                                                /*  for CPU stalls. */
        unsigned long jiffies_resched;          /* Time at which to resched */
                                                /*  a reluctant CPU. */
+       unsigned long n_force_qs_gpstart;       /* Snapshot of n_force_qs at */
+                                               /*  GP start. */
        unsigned long gp_max;                   /* Maximum GP duration in */
                                                /*  jiffies. */
        const char *name;                       /* Name of structure. */
@@ -514,13 +509,6 @@ extern struct list_head rcu_struct_flavors;
 #define for_each_rcu_flavor(rsp) \
        list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
 
-/* Return values for rcu_preempt_offline_tasks(). */
-
-#define RCU_OFL_TASKS_NORM_GP  0x1             /* Tasks blocking normal */
-                                               /*  GP were moved to root. */
-#define RCU_OFL_TASKS_EXP_GP   0x2             /* Tasks blocking expedited */
-                                               /*  GP were moved to root. */
-
 /*
  * RCU implementation internal declarations:
  */
@@ -546,27 +534,16 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
 
 /* Forward declarations for rcutree_plugin.h */
 static void rcu_bootup_announce(void);
-long rcu_batches_completed(void);
 static void rcu_preempt_note_context_switch(void);
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
 #ifdef CONFIG_HOTPLUG_CPU
-static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
-                                     unsigned long flags);
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
-#ifdef CONFIG_HOTPLUG_CPU
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                    struct rcu_node *rnp,
-                                    struct rcu_data *rdp);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 static void rcu_preempt_check_callbacks(void);
 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU)
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-                              bool wake);
-#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PREEMPT_RCU) */
 static void __init __rcu_init_preempt(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -622,24 +599,15 @@ static void rcu_dynticks_task_exit(void);
 #endif /* #ifndef RCU_TREE_NONCORE */
 
 #ifdef CONFIG_RCU_TRACE
-#ifdef CONFIG_RCU_NOCB_CPU
-/* Sum up queue lengths for tracing. */
+/* Read out queue lengths for tracing. */
 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 {
-       *ql = atomic_long_read(&rdp->nocb_q_count) +
-             rdp->nocb_p_count +
-             atomic_long_read(&rdp->nocb_follower_count) +
-             rdp->nocb_p_count + rdp->nocb_gp_count;
-       *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
-              rdp->nocb_p_count_lazy +
-              atomic_long_read(&rdp->nocb_follower_count_lazy) +
-              rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
-}
+#ifdef CONFIG_RCU_NOCB_CPU
+       *ql = atomic_long_read(&rdp->nocb_q_count);
+       *qll = atomic_long_read(&rdp->nocb_q_count_lazy);
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
-static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
-{
        *ql = 0;
        *qll = 0;
-}
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+}
 #endif /* #ifdef CONFIG_RCU_TRACE */
index 3ec85cb5d544b8588fd574a80e19bd564079533f..2e850a51bb8fe285179fee76124dbc375851f09a 100644 (file)
 
 #include "../locking/rtmutex_common.h"
 
-/* rcuc/rcub kthread realtime priority */
-static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
-module_param(kthread_prio, int, 0644);
-
 /*
  * Control variables for per-CPU and per-rcu_node kthreads.  These
  * handle all flavors of RCU.
@@ -103,6 +99,8 @@ RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
 static struct rcu_state *rcu_state_p = &rcu_preempt_state;
 
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                              bool wake);
 
 /*
  * Tell them what RCU they are running.
@@ -113,25 +111,6 @@ static void __init rcu_bootup_announce(void)
        rcu_bootup_announce_oddness();
 }
 
-/*
- * Return the number of RCU-preempt batches processed thus far
- * for debug and statistics.
- */
-static long rcu_batches_completed_preempt(void)
-{
-       return rcu_preempt_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
-
-/*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
-long rcu_batches_completed(void)
-{
-       return rcu_batches_completed_preempt();
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
 /*
  * Record a preemptible-RCU quiescent state for the specified CPU.  Note
  * that this just means that the task currently running on the CPU is
@@ -306,6 +285,15 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
        return np;
 }
 
+/*
+ * Return true if the specified rcu_node structure has tasks that were
+ * preempted within an RCU read-side critical section.
+ */
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
+{
+       return !list_empty(&rnp->blkd_tasks);
+}
+
 /*
  * Handle special cases during rcu_read_unlock(), such as needing to
  * notify RCU core processing or task having blocked during the RCU
@@ -313,9 +301,10 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  */
 void rcu_read_unlock_special(struct task_struct *t)
 {
-       int empty;
-       int empty_exp;
-       int empty_exp_now;
+       bool empty;
+       bool empty_exp;
+       bool empty_norm;
+       bool empty_exp_now;
        unsigned long flags;
        struct list_head *np;
 #ifdef CONFIG_RCU_BOOST
@@ -367,7 +356,8 @@ void rcu_read_unlock_special(struct task_struct *t)
                                break;
                        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
                }
-               empty = !rcu_preempt_blocked_readers_cgp(rnp);
+               empty = !rcu_preempt_has_tasks(rnp);
+               empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
                empty_exp = !rcu_preempted_readers_exp(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
@@ -386,6 +376,14 @@ void rcu_read_unlock_special(struct task_struct *t)
                drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
+               /*
+                * If this was the last task on the list, go see if we
+                * need to propagate ->qsmaskinit bit clearing up the
+                * rcu_node tree.
+                */
+               if (!empty && !rcu_preempt_has_tasks(rnp))
+                       rcu_cleanup_dead_rnp(rnp);
+
                /*
                 * If this was the last task on the current list, and if
                 * we aren't waiting on any CPUs, report the quiescent state.
@@ -393,7 +391,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                 * so we must take a snapshot of the expedited state.
                 */
                empty_exp_now = !rcu_preempted_readers_exp(rnp);
-               if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+               if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
                                                         rnp->gpnum,
                                                         0, rnp->qsmask,
@@ -408,10 +406,8 @@ void rcu_read_unlock_special(struct task_struct *t)
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (drop_boost_mutex) {
+               if (drop_boost_mutex)
                        rt_mutex_unlock(&rnp->boost_mtx);
-                       complete(&rnp->boost_completion);
-               }
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
@@ -519,99 +515,13 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 {
        WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
-       if (!list_empty(&rnp->blkd_tasks))
+       if (rcu_preempt_has_tasks(rnp))
                rnp->gp_tasks = rnp->blkd_tasks.next;
        WARN_ON_ONCE(rnp->qsmask);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-/*
- * Handle tasklist migration for case in which all CPUs covered by the
- * specified rcu_node have gone offline.  Move them up to the root
- * rcu_node.  The reason for not just moving them to the immediate
- * parent is to remove the need for rcu_read_unlock_special() to
- * make more than two attempts to acquire the target rcu_node's lock.
- * Returns true if there were tasks blocking the current RCU grace
- * period.
- *
- * Returns 1 if there was previously a task blocking the current grace
- * period on the specified rcu_node structure.
- *
- * The caller must hold rnp->lock with irqs disabled.
- */
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                    struct rcu_node *rnp,
-                                    struct rcu_data *rdp)
-{
-       struct list_head *lp;
-       struct list_head *lp_root;
-       int retval = 0;
-       struct rcu_node *rnp_root = rcu_get_root(rsp);
-       struct task_struct *t;
-
-       if (rnp == rnp_root) {
-               WARN_ONCE(1, "Last CPU thought to be offlined?");
-               return 0;  /* Shouldn't happen: at least one CPU online. */
-       }
-
-       /* If we are on an internal node, complain bitterly. */
-       WARN_ON_ONCE(rnp != rdp->mynode);
-
-       /*
-        * Move tasks up to root rcu_node.  Don't try to get fancy for
-        * this corner-case operation -- just put this node's tasks
-        * at the head of the root node's list, and update the root node's
-        * ->gp_tasks and ->exp_tasks pointers to those of this node's,
-        * if non-NULL.  This might result in waiting for more tasks than
-        * absolutely necessary, but this is a good performance/complexity
-        * tradeoff.
-        */
-       if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
-               retval |= RCU_OFL_TASKS_NORM_GP;
-       if (rcu_preempted_readers_exp(rnp))
-               retval |= RCU_OFL_TASKS_EXP_GP;
-       lp = &rnp->blkd_tasks;
-       lp_root = &rnp_root->blkd_tasks;
-       while (!list_empty(lp)) {
-               t = list_entry(lp->next, typeof(*t), rcu_node_entry);
-               raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
-               smp_mb__after_unlock_lock();
-               list_del(&t->rcu_node_entry);
-               t->rcu_blocked_node = rnp_root;
-               list_add(&t->rcu_node_entry, lp_root);
-               if (&t->rcu_node_entry == rnp->gp_tasks)
-                       rnp_root->gp_tasks = rnp->gp_tasks;
-               if (&t->rcu_node_entry == rnp->exp_tasks)
-                       rnp_root->exp_tasks = rnp->exp_tasks;
-#ifdef CONFIG_RCU_BOOST
-               if (&t->rcu_node_entry == rnp->boost_tasks)
-                       rnp_root->boost_tasks = rnp->boost_tasks;
-#endif /* #ifdef CONFIG_RCU_BOOST */
-               raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
-       }
-
-       rnp->gp_tasks = NULL;
-       rnp->exp_tasks = NULL;
-#ifdef CONFIG_RCU_BOOST
-       rnp->boost_tasks = NULL;
-       /*
-        * In case root is being boosted and leaf was not.  Make sure
-        * that we boost the tasks blocking the current grace period
-        * in this case.
-        */
-       raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
-       smp_mb__after_unlock_lock();
-       if (rnp_root->boost_tasks != NULL &&
-           rnp_root->boost_tasks != rnp_root->gp_tasks &&
-           rnp_root->boost_tasks != rnp_root->exp_tasks)
-               rnp_root->boost_tasks = rnp_root->gp_tasks;
-       raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
-       return retval;
-}
-
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
 /*
@@ -771,7 +681,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        smp_mb__after_unlock_lock();
-       if (list_empty(&rnp->blkd_tasks)) {
+       if (!rcu_preempt_has_tasks(rnp)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
        } else {
                rnp->exp_tasks = rnp->blkd_tasks.next;
@@ -932,15 +842,6 @@ static void __init rcu_bootup_announce(void)
        rcu_bootup_announce_oddness();
 }
 
-/*
- * Return the number of RCU batches processed thus far for debug & stats.
- */
-long rcu_batches_completed(void)
-{
-       return rcu_batches_completed_sched();
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
 /*
  * Because preemptible RCU does not exist, we never have to check for
  * CPUs being in quiescent states.
@@ -960,11 +861,12 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-/* Because preemptible RCU does not exist, no quieting of tasks. */
-static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
-       __releases(rnp->lock)
+/*
+ * Because there is no preemptible RCU, there can be no readers blocked.
+ */
+static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
 {
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       return false;
 }
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -996,23 +898,6 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
        WARN_ON_ONCE(rnp->qsmask);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Because preemptible RCU does not exist, it never needs to migrate
- * tasks that were blocked within RCU read-side critical sections, and
- * such non-existent tasks cannot possibly have been blocking the current
- * grace period.
- */
-static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
-                                    struct rcu_node *rnp,
-                                    struct rcu_data *rdp)
-{
-       return 0;
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
  * Because preemptible RCU does not exist, it never has any callbacks
  * to check.
@@ -1031,20 +916,6 @@ void synchronize_rcu_expedited(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
-#ifdef CONFIG_HOTPLUG_CPU
-
-/*
- * Because preemptible RCU does not exist, there is never any need to
- * report on tasks preempted in RCU read-side critical sections during
- * expedited RCU grace periods.
- */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-                              bool wake)
-{
-}
-
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
  * Because preemptible RCU does not exist, rcu_barrier() is just
  * another name for rcu_barrier_sched().
@@ -1080,7 +951,7 @@ void exit_rcu(void)
 
 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
 {
-       if (list_empty(&rnp->blkd_tasks))
+       if (!rcu_preempt_has_tasks(rnp))
                rnp->n_balk_blkd_tasks++;
        else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
                rnp->n_balk_exp_gp_tasks++;
@@ -1127,7 +998,8 @@ static int rcu_boost(struct rcu_node *rnp)
        struct task_struct *t;
        struct list_head *tb;
 
-       if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
+       if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
+           ACCESS_ONCE(rnp->boost_tasks) == NULL)
                return 0;  /* Nothing left to boost. */
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1175,15 +1047,11 @@ static int rcu_boost(struct rcu_node *rnp)
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
-       init_completion(&rnp->boost_completion);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        /* Lock only for side effect: boosts task t's priority. */
        rt_mutex_lock(&rnp->boost_mtx);
        rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
 
-       /* Wait for boostee to be done w/boost_mtx before reinitializing. */
-       wait_for_completion(&rnp->boost_completion);
-
        return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
               ACCESS_ONCE(rnp->boost_tasks) != NULL;
 }
@@ -1416,12 +1284,8 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
                if ((mask & 0x1) && cpu != outgoingcpu)
                        cpumask_set_cpu(cpu, cm);
-       if (cpumask_weight(cm) == 0) {
+       if (cpumask_weight(cm) == 0)
                cpumask_setall(cm);
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
-                       cpumask_clear_cpu(cpu, cm);
-               WARN_ON_ONCE(cpumask_weight(cm) == 0);
-       }
        set_cpus_allowed_ptr(t, cm);
        free_cpumask_var(cm);
 }
@@ -1446,12 +1310,8 @@ static void __init rcu_spawn_boost_kthreads(void)
        for_each_possible_cpu(cpu)
                per_cpu(rcu_cpu_has_work, cpu) = 0;
        BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
-       rnp = rcu_get_root(rcu_state_p);
-       (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
-       if (NUM_RCU_NODES > 1) {
-               rcu_for_each_leaf_node(rcu_state_p, rnp)
-                       (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
-       }
+       rcu_for_each_leaf_node(rcu_state_p, rnp)
+               (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
 }
 
 static void rcu_prepare_kthreads(int cpu)
@@ -1605,7 +1465,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
                 * completed since we last checked and there are
                 * callbacks not yet ready to invoke.
                 */
-               if (rdp->completed != rnp->completed &&
+               if ((rdp->completed != rnp->completed ||
+                    unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
                    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
                        note_gp_changes(rsp, rdp);
 
@@ -1898,11 +1759,12 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
                ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
+       pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u fqs=%ld %s\n",
               cpu, ticks_value, ticks_title,
               atomic_read(&rdtp->dynticks) & 0xfff,
               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
+              ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
               fast_no_hz);
 }
 
@@ -2056,9 +1918,26 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
 static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
 {
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+       unsigned long ret;
+#ifdef CONFIG_PROVE_RCU
        struct rcu_head *rhp;
+#endif /* #ifdef CONFIG_PROVE_RCU */
 
-       /* No-CBs CPUs might have callbacks on any of three lists. */
+       /*
+        * Check count of all no-CBs callbacks awaiting invocation.
+        * There needs to be a barrier before this function is called,
+        * but associated with a prior determination that no more
+        * callbacks would be posted.  In the worst case, the first
+        * barrier in _rcu_barrier() suffices (but the caller cannot
+        * necessarily rely on this, not a substitute for the caller
+        * getting the concurrency design right!).  There must also be
+        * a barrier between the following load an posting of a callback
+        * (if a callback is in fact needed).  This is associated with an
+        * atomic_inc() in the caller.
+        */
+       ret = atomic_long_read(&rdp->nocb_q_count);
+
+#ifdef CONFIG_PROVE_RCU
        rhp = ACCESS_ONCE(rdp->nocb_head);
        if (!rhp)
                rhp = ACCESS_ONCE(rdp->nocb_gp_head);
@@ -2072,8 +1951,9 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
                       cpu, rhp->func);
                WARN_ON_ONCE(1);
        }
+#endif /* #ifdef CONFIG_PROVE_RCU */
 
-       return !!rhp;
+       return !!ret;
 }
 
 /*
@@ -2095,9 +1975,10 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        struct task_struct *t;
 
        /* Enqueue the callback on the nocb list and update counts. */
+       atomic_long_add(rhcount, &rdp->nocb_q_count);
+       /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
        old_rhpp = xchg(&rdp->nocb_tail, rhtp);
        ACCESS_ONCE(*old_rhpp) = rhp;
-       atomic_long_add(rhcount, &rdp->nocb_q_count);
        atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
        smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
 
@@ -2288,9 +2169,6 @@ wait_again:
                /* Move callbacks to wait-for-GP list, which is empty. */
                ACCESS_ONCE(rdp->nocb_head) = NULL;
                rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
-               rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
-               rdp->nocb_gp_count_lazy =
-                       atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
                gotcbs = true;
        }
 
@@ -2338,9 +2216,6 @@ wait_again:
                /* Append callbacks to follower's "done" list. */
                tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
                *tail = rdp->nocb_gp_head;
-               atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
-               atomic_long_add(rdp->nocb_gp_count_lazy,
-                               &rdp->nocb_follower_count_lazy);
                smp_mb__after_atomic(); /* Store *tail before wakeup. */
                if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
                        /*
@@ -2415,13 +2290,11 @@ static int rcu_nocb_kthread(void *arg)
                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
                ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
                tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
-               c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
-               cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
-               rdp->nocb_p_count += c;
-               rdp->nocb_p_count_lazy += cl;
 
                /* Each pass through the following loop invokes a callback. */
-               trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
+               trace_rcu_batch_start(rdp->rsp->name,
+                                     atomic_long_read(&rdp->nocb_q_count_lazy),
+                                     atomic_long_read(&rdp->nocb_q_count), -1);
                c = cl = 0;
                while (list) {
                        next = list->next;
@@ -2443,9 +2316,9 @@ static int rcu_nocb_kthread(void *arg)
                        list = next;
                }
                trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
-               ACCESS_ONCE(rdp->nocb_p_count) = rdp->nocb_p_count - c;
-               ACCESS_ONCE(rdp->nocb_p_count_lazy) =
-                                               rdp->nocb_p_count_lazy - cl;
+               smp_mb__before_atomic();  /* _add after CB invocation. */
+               atomic_long_add(-c, &rdp->nocb_q_count);
+               atomic_long_add(-cl, &rdp->nocb_q_count_lazy);
                rdp->n_nocbs_invoked += c;
        }
        return 0;
index 5cdc62e1beeb635a36ee87098a7f38110a651382..fbb6240509ea7768210e989a63d7ee007a793297 100644 (file)
@@ -46,6 +46,8 @@
 #define RCU_TREE_NONCORE
 #include "tree.h"
 
+DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
+
 static int r_open(struct inode *inode, struct file *file,
                                        const struct seq_operations *op)
 {
@@ -115,11 +117,13 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
 
        if (!rdp->beenonline)
                return;
-       seq_printf(m, "%3d%cc=%ld g=%ld pq=%d qp=%d",
+       seq_printf(m, "%3d%cc=%ld g=%ld pq=%d/%d qp=%d",
                   rdp->cpu,
                   cpu_is_offline(rdp->cpu) ? '!' : ' ',
                   ulong2long(rdp->completed), ulong2long(rdp->gpnum),
-                  rdp->passed_quiesce, rdp->qs_pending);
+                  rdp->passed_quiesce,
+                  rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
+                  rdp->qs_pending);
        seq_printf(m, " dt=%d/%llx/%d df=%lu",
                   atomic_read(&rdp->dynticks->dynticks),
                   rdp->dynticks->dynticks_nesting,
index 607f852b4d04ab3b70379bf42528aa40e5f588c7..7052d3fd4e7bd87a29bd144cbca1086621040251 100644 (file)
@@ -268,6 +268,15 @@ bool try_wait_for_completion(struct completion *x)
        unsigned long flags;
        int ret = 1;
 
+       /*
+        * Since x->done will need to be locked only
+        * in the non-blocking case, we check x->done
+        * first without taking the lock so we can
+        * return early in the blocking case.
+        */
+       if (!ACCESS_ONCE(x->done))
+               return 0;
+
        spin_lock_irqsave(&x->wait.lock, flags);
        if (!x->done)
                ret = 0;
@@ -288,13 +297,6 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
-       unsigned long flags;
-       int ret = 1;
-
-       spin_lock_irqsave(&x->wait.lock, flags);
-       if (!x->done)
-               ret = 0;
-       spin_unlock_irqrestore(&x->wait.lock, flags);
-       return ret;
+       return !!ACCESS_ONCE(x->done);
 }
 EXPORT_SYMBOL(completion_done);
index c0accc00566eb774a022870635c60e63da6ee198..1f37fe7f77a45de9a1055f7884d5c61f567e1174 100644 (file)
@@ -119,7 +119,9 @@ void update_rq_clock(struct rq *rq)
 {
        s64 delta;
 
-       if (rq->skip_clock_update > 0)
+       lockdep_assert_held(&rq->lock);
+
+       if (rq->clock_skip_update & RQCF_ACT_SKIP)
                return;
 
        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
@@ -490,6 +492,11 @@ static __init void init_hrtick(void)
  */
 void hrtick_start(struct rq *rq, u64 delay)
 {
+       /*
+        * Don't schedule slices shorter than 10000ns, that just
+        * doesn't make sense. Rely on vruntime for fairness.
+        */
+       delay = max_t(u64, delay, 10000LL);
        __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
                        HRTIMER_MODE_REL_PINNED, 0);
 }
@@ -1046,7 +1053,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
         * this case, we can save a useless back to back clock update.
         */
        if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
-               rq->skip_clock_update = 1;
+               rq_clock_skip_update(rq, true);
 }
 
 #ifdef CONFIG_SMP
@@ -1082,7 +1089,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                if (p->sched_class->migrate_task_rq)
                        p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
-               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
+               perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -1814,6 +1821,10 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_period = 0;
        dl_se->flags = 0;
        dl_se->dl_bw = 0;
+
+       dl_se->dl_throttled = 0;
+       dl_se->dl_new = 1;
+       dl_se->dl_yielded = 0;
 }
 
 /*
@@ -1832,6 +1843,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
        p->se.prev_sum_exec_runtime     = 0;
        p->se.nr_migrations             = 0;
        p->se.vruntime                  = 0;
+#ifdef CONFIG_SMP
+       p->se.avg.decay_count           = 0;
+#endif
        INIT_LIST_HEAD(&p->se.group_node);
 
 #ifdef CONFIG_SCHEDSTATS
@@ -1839,7 +1853,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif
 
        RB_CLEAR_NODE(&p->dl.rb_node);
-       hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       init_dl_task_timer(&p->dl);
        __dl_clear_params(p);
 
        INIT_LIST_HEAD(&p->rt.run_list);
@@ -2049,6 +2063,9 @@ static inline int dl_bw_cpus(int i)
  * allocated bandwidth to reflect the new situation.
  *
  * This function is called while holding p's rq->lock.
+ *
+ * XXX we should delay bw change until the task's 0-lag point, see
+ * __setparam_dl().
  */
 static int dl_overflow(struct task_struct *p, int policy,
                       const struct sched_attr *attr)
@@ -2748,6 +2765,10 @@ again:
  *          - explicit schedule() call
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
+ *
+ * WARNING: all callers must re-check need_resched() afterward and reschedule
+ * accordingly in case an event triggered the need for rescheduling (such as
+ * an interrupt waking up a task) while preemption was disabled in __schedule().
  */
 static void __sched __schedule(void)
 {
@@ -2756,7 +2777,6 @@ static void __sched __schedule(void)
        struct rq *rq;
        int cpu;
 
-need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
@@ -2776,6 +2796,8 @@ need_resched:
        smp_mb__before_spinlock();
        raw_spin_lock_irq(&rq->lock);
 
+       rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+
        switch_count = &prev->nivcsw;
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2800,13 +2822,13 @@ need_resched:
                switch_count = &prev->nvcsw;
        }
 
-       if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
+       if (task_on_rq_queued(prev))
                update_rq_clock(rq);
 
        next = pick_next_task(rq, prev);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
-       rq->skip_clock_update = 0;
+       rq->clock_skip_update = 0;
 
        if (likely(prev != next)) {
                rq->nr_switches++;
@@ -2821,8 +2843,6 @@ need_resched:
        post_schedule(rq);
 
        sched_preempt_enable_no_resched();
-       if (need_resched())
-               goto need_resched;
 }
 
 static inline void sched_submit_work(struct task_struct *tsk)
@@ -2842,7 +2862,9 @@ asmlinkage __visible void __sched schedule(void)
        struct task_struct *tsk = current;
 
        sched_submit_work(tsk);
-       __schedule();
+       do {
+               __schedule();
+       } while (need_resched());
 }
 EXPORT_SYMBOL(schedule);
 
@@ -2877,6 +2899,21 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
+static void preempt_schedule_common(void)
+{
+       do {
+               __preempt_count_add(PREEMPT_ACTIVE);
+               __schedule();
+               __preempt_count_sub(PREEMPT_ACTIVE);
+
+               /*
+                * Check again in case we missed a preemption opportunity
+                * between schedule and now.
+                */
+               barrier();
+       } while (need_resched());
+}
+
 #ifdef CONFIG_PREEMPT
 /*
  * this is the entry point to schedule() from in-kernel preemption
@@ -2892,17 +2929,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
        if (likely(!preemptible()))
                return;
 
-       do {
-               __preempt_count_add(PREEMPT_ACTIVE);
-               __schedule();
-               __preempt_count_sub(PREEMPT_ACTIVE);
-
-               /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
-                */
-               barrier();
-       } while (need_resched());
+       preempt_schedule_common();
 }
 NOKPROBE_SYMBOL(preempt_schedule);
 EXPORT_SYMBOL(preempt_schedule);
@@ -3251,15 +3278,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
 {
        struct sched_dl_entity *dl_se = &p->dl;
 
-       init_dl_task_timer(dl_se);
        dl_se->dl_runtime = attr->sched_runtime;
        dl_se->dl_deadline = attr->sched_deadline;
        dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
        dl_se->flags = attr->sched_flags;
        dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
-       dl_se->dl_throttled = 0;
-       dl_se->dl_new = 1;
-       dl_se->dl_yielded = 0;
+
+       /*
+        * Changing the parameters of a task is 'tricky' and we're not doing
+        * the correct thing -- also see task_dead_dl() and switched_from_dl().
+        *
+        * What we SHOULD do is delay the bandwidth release until the 0-lag
+        * point. This would include retaining the task_struct until that time
+        * and change dl_overflow() to not immediately decrement the current
+        * amount.
+        *
+        * Instead we retain the current runtime/deadline and let the new
+        * parameters take effect after the current reservation period lapses.
+        * This is safe (albeit pessimistic) because the 0-lag point is always
+        * before the current scheduling deadline.
+        *
+        * We can still have temporary overloads because we do not delay the
+        * change in bandwidth until that time; so admission control is
+        * not on the safe side. It does however guarantee tasks will never
+        * consume more than promised.
+        */
 }
 
 /*
@@ -3382,6 +3425,20 @@ static bool check_same_owner(struct task_struct *p)
        return match;
 }
 
+static bool dl_param_changed(struct task_struct *p,
+               const struct sched_attr *attr)
+{
+       struct sched_dl_entity *dl_se = &p->dl;
+
+       if (dl_se->dl_runtime != attr->sched_runtime ||
+               dl_se->dl_deadline != attr->sched_deadline ||
+               dl_se->dl_period != attr->sched_period ||
+               dl_se->flags != attr->sched_flags)
+               return true;
+
+       return false;
+}
+
 static int __sched_setscheduler(struct task_struct *p,
                                const struct sched_attr *attr,
                                bool user)
@@ -3510,7 +3567,7 @@ recheck:
                        goto change;
                if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
                        goto change;
-               if (dl_policy(policy))
+               if (dl_policy(policy) && dl_param_changed(p, attr))
                        goto change;
 
                p->sched_reset_on_fork = reset_on_fork;
@@ -4202,17 +4259,10 @@ SYSCALL_DEFINE0(sched_yield)
        return 0;
 }
 
-static void __cond_resched(void)
-{
-       __preempt_count_add(PREEMPT_ACTIVE);
-       __schedule();
-       __preempt_count_sub(PREEMPT_ACTIVE);
-}
-
 int __sched _cond_resched(void)
 {
        if (should_resched()) {
-               __cond_resched();
+               preempt_schedule_common();
                return 1;
        }
        return 0;
@@ -4237,7 +4287,7 @@ int __cond_resched_lock(spinlock_t *lock)
        if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
-                       __cond_resched();
+                       preempt_schedule_common();
                else
                        cpu_relax();
                ret = 1;
@@ -4253,7 +4303,7 @@ int __sched __cond_resched_softirq(void)
 
        if (should_resched()) {
                local_bh_enable();
-               __cond_resched();
+               preempt_schedule_common();
                local_bh_disable();
                return 1;
        }
@@ -4508,9 +4558,10 @@ void sched_show_task(struct task_struct *p)
 {
        unsigned long free = 0;
        int ppid;
-       unsigned state;
+       unsigned long state = p->state;
 
-       state = p->state ? __ffs(p->state) + 1 : 0;
+       if (state)
+               state = __ffs(state) + 1;
        printk(KERN_INFO "%-15.15s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
 #if BITS_PER_LONG == 32
@@ -4642,6 +4693,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
        struct dl_bw *cur_dl_b;
        unsigned long flags;
 
+       if (!cpumask_weight(cur))
+               return ret;
+
        rcu_read_lock_sched();
        cur_dl_b = dl_bw_of(cpumask_any(cur));
        trial_cpus = cpumask_weight(trial);
@@ -4740,7 +4794,7 @@ static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
 
 void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 {
-       if (p->sched_class && p->sched_class->set_cpus_allowed)
+       if (p->sched_class->set_cpus_allowed)
                p->sched_class->set_cpus_allowed(p, new_mask);
 
        cpumask_copy(&p->cpus_allowed, new_mask);
@@ -7249,6 +7303,11 @@ void __init sched_init(void)
        atomic_inc(&init_mm.mm_count);
        enter_lazy_tlb(&init_mm, current);
 
+       /*
+        * During early bootup we pretend to be a normal task:
+        */
+       current->sched_class = &fair_sched_class;
+
        /*
         * Make us the idle thread. Technically, schedule() should not be
         * called from this thread, however somewhere below it might be,
@@ -7259,11 +7318,6 @@ void __init sched_init(void)
 
        calc_load_update = jiffies + LOAD_FREQ;
 
-       /*
-        * During early bootup we pretend to be a normal task:
-        */
-       current->sched_class = &fair_sched_class;
-
 #ifdef CONFIG_SMP
        zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
        /* May be allocated at isolcpus cmdline parse time */
@@ -7292,13 +7346,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
         * since we will exit with TASK_RUNNING make sure we enter with it,
         * otherwise we will destroy state.
         */
-       if (WARN_ONCE(current->state != TASK_RUNNING,
+       WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
                        "do not call blocking ops when !TASK_RUNNING; "
                        "state=%lx set at [<%p>] %pS\n",
                        current->state,
                        (void *)current->task_state_change,
-                       (void *)current->task_state_change))
-               __set_current_state(TASK_RUNNING);
+                       (void *)current->task_state_change);
 
        ___might_sleep(file, line, preempt_offset);
 }
@@ -7325,6 +7378,9 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
                        in_atomic(), irqs_disabled(),
                        current->pid, current->comm);
 
+       if (task_stack_end_corrupted(current))
+               printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
+
        debug_show_held_locks(current);
        if (irqs_disabled())
                print_irqtrace_events(current);
index 539ca3ce071b2858f437cd0d0265d473093be9e2..c6acb07466bb82b1143af4aba1da5e483f628e4f 100644 (file)
@@ -107,7 +107,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
        int best_cpu = -1;
        const struct sched_dl_entity *dl_se = &p->dl;
 
-       if (later_mask && cpumask_and(later_mask, later_mask, cp->free_cpus)) {
+       if (later_mask &&
+           cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
                best_cpu = cpumask_any(later_mask);
                goto out;
        } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
@@ -185,6 +186,26 @@ out:
        raw_spin_unlock_irqrestore(&cp->lock, flags);
 }
 
+/*
+ * cpudl_set_freecpu - Set the cpudl.free_cpus
+ * @cp: the cpudl max-heap context
+ * @cpu: rd attached cpu
+ */
+void cpudl_set_freecpu(struct cpudl *cp, int cpu)
+{
+       cpumask_set_cpu(cpu, cp->free_cpus);
+}
+
+/*
+ * cpudl_clear_freecpu - Clear the cpudl.free_cpus
+ * @cp: the cpudl max-heap context
+ * @cpu: rd attached cpu
+ */
+void cpudl_clear_freecpu(struct cpudl *cp, int cpu)
+{
+       cpumask_clear_cpu(cpu, cp->free_cpus);
+}
+
 /*
  * cpudl_init - initialize the cpudl structure
  * @cp: the cpudl max-heap context
@@ -203,7 +224,7 @@ int cpudl_init(struct cpudl *cp)
        if (!cp->elements)
                return -ENOMEM;
 
-       if (!alloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
+       if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
                kfree(cp->elements);
                return -ENOMEM;
        }
@@ -211,8 +232,6 @@ int cpudl_init(struct cpudl *cp)
        for_each_possible_cpu(i)
                cp->elements[i].idx = IDX_INVALID;
 
-       cpumask_setall(cp->free_cpus);
-
        return 0;
 }
 
index 020039bd1326852126480fa5e5038f58d26ca18c..1a0a6ef2fbe1be030e32895571a1d267e26579e4 100644 (file)
@@ -24,6 +24,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
               struct cpumask *later_mask);
 void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid);
 int cpudl_init(struct cpudl *cp);
+void cpudl_set_freecpu(struct cpudl *cp, int cpu);
+void cpudl_clear_freecpu(struct cpudl *cp, int cpu);
 void cpudl_cleanup(struct cpudl *cp);
 #endif /* CONFIG_SMP */
 
index b52092f2636d50e8a816b2e7e20a648b00d6bb70..a027799ae130d3623ff4351f08c3cf456979bfbc 100644 (file)
@@ -350,6 +350,11 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
                dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
                dl_se->runtime = pi_se->dl_runtime;
        }
+
+       if (dl_se->dl_yielded)
+               dl_se->dl_yielded = 0;
+       if (dl_se->dl_throttled)
+               dl_se->dl_throttled = 0;
 }
 
 /*
@@ -536,23 +541,19 @@ again:
 
        sched_clock_tick();
        update_rq_clock(rq);
-       dl_se->dl_throttled = 0;
-       dl_se->dl_yielded = 0;
-       if (task_on_rq_queued(p)) {
-               enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
-               if (dl_task(rq->curr))
-                       check_preempt_curr_dl(rq, p, 0);
-               else
-                       resched_curr(rq);
+       enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
+       if (dl_task(rq->curr))
+               check_preempt_curr_dl(rq, p, 0);
+       else
+               resched_curr(rq);
 #ifdef CONFIG_SMP
-               /*
-                * Queueing this task back might have overloaded rq,
-                * check if we need to kick someone away.
-                */
-               if (has_pushable_dl_tasks(rq))
-                       push_dl_task(rq);
+       /*
+        * Queueing this task back might have overloaded rq,
+        * check if we need to kick someone away.
+        */
+       if (has_pushable_dl_tasks(rq))
+               push_dl_task(rq);
 #endif
-       }
 unlock:
        raw_spin_unlock(&rq->lock);
 
@@ -613,10 +614,9 @@ static void update_curr_dl(struct rq *rq)
 
        dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
        if (dl_runtime_exceeded(rq, dl_se)) {
+               dl_se->dl_throttled = 1;
                __dequeue_task_dl(rq, curr, 0);
-               if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
-                       dl_se->dl_throttled = 1;
-               else
+               if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 
                if (!is_leftmost(curr, &rq->dl))
@@ -853,7 +853,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
         * its rq, the bandwidth timer callback (which clearly has not
         * run yet) will take care of this.
         */
-       if (p->dl.dl_throttled)
+       if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
                return;
 
        enqueue_dl_entity(&p->dl, pi_se, flags);
@@ -1073,7 +1073,13 @@ static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
 {
        update_curr_dl(rq);
 
-       if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
+       /*
+        * Even when we have runtime, update_curr_dl() might have resulted in us
+        * not being the leftmost task anymore. In that case NEED_RESCHED will
+        * be set and schedule() will start a new hrtick for the next task.
+        */
+       if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
+           is_leftmost(p, &rq->dl))
                start_hrtick_dl(rq, p);
 }
 
@@ -1094,6 +1100,7 @@ static void task_dead_dl(struct task_struct *p)
         * Since we are TASK_DEAD we won't slip out of the domain!
         */
        raw_spin_lock_irq(&dl_b->lock);
+       /* XXX we should retain the bw until 0-lag */
        dl_b->total_bw -= p->dl.dl_bw;
        raw_spin_unlock_irq(&dl_b->lock);
 
@@ -1165,9 +1172,6 @@ static int find_later_rq(struct task_struct *task)
         * We have to consider system topology and task affinity
         * first, then we can look for a suitable cpu.
         */
-       cpumask_copy(later_mask, task_rq(task)->rd->span);
-       cpumask_and(later_mask, later_mask, cpu_active_mask);
-       cpumask_and(later_mask, later_mask, &task->cpus_allowed);
        best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
                        task, later_mask);
        if (best_cpu == -1)
@@ -1562,6 +1566,7 @@ static void rq_online_dl(struct rq *rq)
        if (rq->dl.overloaded)
                dl_set_overload(rq);
 
+       cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
        if (rq->dl.dl_nr_running > 0)
                cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
 }
@@ -1573,6 +1578,7 @@ static void rq_offline_dl(struct rq *rq)
                dl_clear_overload(rq);
 
        cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
+       cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
 }
 
 void init_sched_dl_class(void)
@@ -1614,8 +1620,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
 
 static void switched_from_dl(struct rq *rq, struct task_struct *p)
 {
+       /* XXX we should retain the bw until 0-lag */
        cancel_dl_timer(rq, p);
-
        __dl_clear_params(p);
 
        /*
index 92cc52001e74d1f9298c13cf75c6b5c3501c6379..8baaf858d25c49921eaa3d9a83235b9f0d2b8c6c 100644 (file)
@@ -305,6 +305,7 @@ do {                                                                        \
        PN(next_balance);
        SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
        PN(clock);
+       PN(clock_task);
        P(cpu_load[0]);
        P(cpu_load[1]);
        P(cpu_load[2]);
index 40667cbf371ba9e8732e6c30940cc146752ee0c3..7ce18f3c097ac4779eb4cf6ed0ad14ac1beb3eb5 100644 (file)
@@ -676,7 +676,6 @@ void init_task_runnable_average(struct task_struct *p)
 {
        u32 slice;
 
-       p->se.avg.decay_count = 0;
        slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
        p->se.avg.runnable_avg_sum = slice;
        p->se.avg.runnable_avg_period = slice;
@@ -1730,7 +1729,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
        nodes = node_online_map;
        for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
                unsigned long max_faults = 0;
-               nodemask_t max_group;
+               nodemask_t max_group = NODE_MASK_NONE;
                int a, b;
 
                /* Are there nodes at this distance from each other? */
@@ -2574,11 +2573,11 @@ static inline u64 __synchronize_entity_decay(struct sched_entity *se)
        u64 decays = atomic64_read(&cfs_rq->decay_counter);
 
        decays -= se->avg.decay_count;
+       se->avg.decay_count = 0;
        if (!decays)
                return 0;
 
        se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
-       se->avg.decay_count = 0;
 
        return decays;
 }
@@ -5157,7 +5156,7 @@ static void yield_task_fair(struct rq *rq)
                 * so we don't do microscopic update in schedule()
                 * and double the fastpath cost.
                 */
-                rq->skip_clock_update = 1;
+               rq_clock_skip_update(rq, true);
        }
 
        set_skip_buddy(se);
@@ -5949,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu)
         */
        age_stamp = ACCESS_ONCE(rq->age_stamp);
        avg = ACCESS_ONCE(rq->rt_avg);
+       delta = __rq_clock_broken(rq) - age_stamp;
 
-       delta = rq_clock(rq) - age_stamp;
        if (unlikely(delta < 0))
                delta = 0;
 
index c47fce75e66648b25e71da3d3ecdca8ec4448227..aaf1c1d5cf5d275d418cfc121f270d5a7b0f04e0 100644 (file)
@@ -47,7 +47,8 @@ static inline int cpu_idle_poll(void)
        rcu_idle_enter();
        trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
-       while (!tif_need_resched())
+       while (!tif_need_resched() &&
+               (cpu_idle_force_poll || tick_check_broadcast_expired()))
                cpu_relax();
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        rcu_idle_exit();
index ee15f5a0d1c1896c1cc6edf2d202c3f1652c87f8..f4d4b077eba0a67a5c55e6a04dee8f6ce78f322c 100644 (file)
@@ -831,11 +831,14 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                                enqueue = 1;
 
                                /*
-                                * Force a clock update if the CPU was idle,
-                                * lest wakeup -> unthrottle time accumulate.
+                                * When we're idle and a woken (rt) task is
+                                * throttled check_preempt_curr() will set
+                                * skip_update and the time between the wakeup
+                                * and this unthrottle will get accounted as
+                                * 'runtime'.
                                 */
                                if (rt_rq->rt_nr_running && rq->curr == rq->idle)
-                                       rq->skip_clock_update = -1;
+                                       rq_clock_skip_update(rq, false);
                        }
                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
                                idle = 0;
@@ -1337,7 +1340,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
             curr->prio <= p->prio)) {
                int target = find_lowest_rq(p);
 
-               if (target != -1)
+               /*
+                * Don't bother moving it if the destination CPU is
+                * not running a lower priority task.
+                */
+               if (target != -1 &&
+                   p->prio < cpu_rq(target)->rt.highest_prio.curr)
                        cpu = target;
        }
        rcu_read_unlock();
@@ -1614,6 +1622,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
 
                lowest_rq = cpu_rq(cpu);
 
+               if (lowest_rq->rt.highest_prio.curr <= task->prio) {
+                       /*
+                        * Target rq has tasks of equal or higher priority,
+                        * retrying does not release any lock and is unlikely
+                        * to yield a different result.
+                        */
+                       lowest_rq = NULL;
+                       break;
+               }
+
                /* if the prio of this runqueue changed, try again */
                if (double_lock_balance(rq, lowest_rq)) {
                        /*
index 9a2a45c970e7dcbc0c146c027acc1bab713ff4ee..0870db23d79cb3c0578b4f4b3450f5dad02c929e 100644 (file)
@@ -558,8 +558,6 @@ struct rq {
 #ifdef CONFIG_NO_HZ_FULL
        unsigned long last_sched_tick;
 #endif
-       int skip_clock_update;
-
        /* capture load from *all* tasks on this cpu: */
        struct load_weight load;
        unsigned long nr_load_updates;
@@ -588,6 +586,7 @@ struct rq {
        unsigned long next_balance;
        struct mm_struct *prev_mm;
 
+       unsigned int clock_skip_update;
        u64 clock;
        u64 clock_task;
 
@@ -687,16 +686,35 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 #define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
 #define raw_rq()               raw_cpu_ptr(&runqueues)
 
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+       return ACCESS_ONCE(rq->clock);
+}
+
 static inline u64 rq_clock(struct rq *rq)
 {
+       lockdep_assert_held(&rq->lock);
        return rq->clock;
 }
 
 static inline u64 rq_clock_task(struct rq *rq)
 {
+       lockdep_assert_held(&rq->lock);
        return rq->clock_task;
 }
 
+#define RQCF_REQ_SKIP  0x01
+#define RQCF_ACT_SKIP  0x02
+
+static inline void rq_clock_skip_update(struct rq *rq, bool skip)
+{
+       lockdep_assert_held(&rq->lock);
+       if (skip)
+               rq->clock_skip_update |= RQCF_REQ_SKIP;
+       else
+               rq->clock_skip_update &= ~RQCF_REQ_SKIP;
+}
+
 #ifdef CONFIG_NUMA
 enum numa_topology_type {
        NUMA_DIRECT,
index f032fb5284e3a340108359a0a17eccfa3b20e115..40190f28db3590140cb903d3f596883c61faaa74 100644 (file)
@@ -280,6 +280,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        unsigned int cpu;
        int ret = 0;
 
+       get_online_cpus();
        mutex_lock(&smpboot_threads_lock);
        for_each_online_cpu(cpu) {
                ret = __smpboot_create_thread(plug_thread, cpu);
@@ -292,6 +293,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
        list_add(&plug_thread->list, &hotplug_threads);
 out:
        mutex_unlock(&smpboot_threads_lock);
+       put_online_cpus();
        return ret;
 }
 EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
index 501baa9ac1be3ac37b1a7f7e1204aea2d5456971..479e4436f787646c92c42e0dbe2d940b366fbf60 100644 (file)
@@ -114,8 +114,12 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
                trace_softirqs_off(ip);
        raw_local_irq_restore(flags);
 
-       if (preempt_count() == cnt)
+       if (preempt_count() == cnt) {
+#ifdef CONFIG_DEBUG_PREEMPT
+               current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1);
+#endif
                trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
+       }
 }
 EXPORT_SYMBOL(__local_bh_disable_ip);
 #endif /* CONFIG_TRACE_IRQFLAGS */
@@ -656,9 +660,8 @@ static void run_ksoftirqd(unsigned int cpu)
                 * in the task stack here.
                 */
                __do_softirq();
-               rcu_note_context_switch();
                local_irq_enable();
-               cond_resched();
+               cond_resched_rcu_qs();
                return;
        }
        local_irq_enable();
index 37e50aadd471195bebb3dc32c9ad026dbbcf2ab7..3f5e183c3d9727934eee93ba74038cab5cf19cd5 100644 (file)
@@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
        mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
        boot = ktime_add(mono, off_boot);
        xtim = ktime_add(mono, off_real);
-       tai = ktime_add(xtim, off_tai);
+       tai = ktime_add(mono, off_tai);
 
        base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
        base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
@@ -266,7 +266,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 /*
  * Divide a ktime value by a nanosecond value
  */
-u64 ktime_divns(const ktime_t kt, s64 div)
+u64 __ktime_divns(const ktime_t kt, s64 div)
 {
        u64 dclc;
        int sft = 0;
@@ -282,7 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div)
 
        return dclc;
 }
-EXPORT_SYMBOL_GPL(ktime_divns);
+EXPORT_SYMBOL_GPL(__ktime_divns);
 #endif /* BITS_PER_LONG >= 64 */
 
 /*
@@ -440,6 +440,37 @@ static inline void debug_deactivate(struct hrtimer *timer)
        trace_hrtimer_cancel(timer);
 }
 
+#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+{
+       struct hrtimer_clock_base *base = cpu_base->clock_base;
+       ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
+       int i;
+
+       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
+               struct timerqueue_node *next;
+               struct hrtimer *timer;
+
+               next = timerqueue_getnext(&base->active);
+               if (!next)
+                       continue;
+
+               timer = container_of(next, struct hrtimer, node);
+               expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+               if (expires.tv64 < expires_next.tv64)
+                       expires_next = expires;
+       }
+       /*
+        * clock_was_set() might have changed base->offset of any of
+        * the clock bases so the result might be negative. Fix it up
+        * to prevent a false positive in clockevents_program_event().
+        */
+       if (expires_next.tv64 < 0)
+               expires_next.tv64 = 0;
+       return expires_next;
+}
+#endif
+
 /* High resolution timer related functions */
 #ifdef CONFIG_HIGH_RES_TIMERS
 
@@ -488,32 +519,7 @@ static inline int hrtimer_hres_active(void)
 static void
 hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 {
-       int i;
-       struct hrtimer_clock_base *base = cpu_base->clock_base;
-       ktime_t expires, expires_next;
-
-       expires_next.tv64 = KTIME_MAX;
-
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
-               struct hrtimer *timer;
-               struct timerqueue_node *next;
-
-               next = timerqueue_getnext(&base->active);
-               if (!next)
-                       continue;
-               timer = container_of(next, struct hrtimer, node);
-
-               expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-               /*
-                * clock_was_set() has changed base->offset so the
-                * result might be negative. Fix it up to prevent a
-                * false positive in clockevents_program_event()
-                */
-               if (expires.tv64 < 0)
-                       expires.tv64 = 0;
-               if (expires.tv64 < expires_next.tv64)
-                       expires_next = expires;
-       }
+       ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
 
        if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
                return;
@@ -586,6 +592,15 @@ static int hrtimer_reprogram(struct hrtimer *timer,
        if (expires.tv64 >= cpu_base->expires_next.tv64)
                return 0;
 
+       /*
+        * When the target cpu of the timer is currently executing
+        * hrtimer_interrupt(), then we do not touch the clock event
+        * device. hrtimer_interrupt() will reevaluate all clock bases
+        * before reprogramming the device.
+        */
+       if (cpu_base->in_hrtirq)
+               return 0;
+
        /*
         * If a hang was detected in the last timer interrupt then we
         * do not schedule a timer which is earlier than the expiry
@@ -1104,29 +1119,14 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
 ktime_t hrtimer_get_next_event(void)
 {
        struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       struct hrtimer_clock_base *base = cpu_base->clock_base;
-       ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
+       ktime_t mindelta = { .tv64 = KTIME_MAX };
        unsigned long flags;
-       int i;
 
        raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
-       if (!hrtimer_hres_active()) {
-               for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
-                       struct hrtimer *timer;
-                       struct timerqueue_node *next;
-
-                       next = timerqueue_getnext(&base->active);
-                       if (!next)
-                               continue;
-
-                       timer = container_of(next, struct hrtimer, node);
-                       delta.tv64 = hrtimer_get_expires_tv64(timer);
-                       delta = ktime_sub(delta, base->get_time());
-                       if (delta.tv64 < mindelta.tv64)
-                               mindelta.tv64 = delta.tv64;
-               }
-       }
+       if (!hrtimer_hres_active())
+               mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
+                                    ktime_get());
 
        raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 
@@ -1253,7 +1253,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
        raw_spin_lock(&cpu_base->lock);
        entry_time = now = hrtimer_update_base(cpu_base);
 retry:
-       expires_next.tv64 = KTIME_MAX;
+       cpu_base->in_hrtirq = 1;
        /*
         * We set expires_next to KTIME_MAX here with cpu_base->lock
         * held to prevent that a timer is enqueued in our queue via
@@ -1291,28 +1291,20 @@ retry:
                         * are right-of a not yet expired timer, because that
                         * timer will have to trigger a wakeup anyway.
                         */
-
-                       if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
-                               ktime_t expires;
-
-                               expires = ktime_sub(hrtimer_get_expires(timer),
-                                                   base->offset);
-                               if (expires.tv64 < 0)
-                                       expires.tv64 = KTIME_MAX;
-                               if (expires.tv64 < expires_next.tv64)
-                                       expires_next = expires;
+                       if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
                                break;
-                       }
 
                        __run_hrtimer(timer, &basenow);
                }
        }
-
+       /* Reevaluate the clock bases for the next expiry */
+       expires_next = __hrtimer_get_next_event(cpu_base);
        /*
         * Store the new expiry value so the migration code can verify
         * against it.
         */
        cpu_base->expires_next = expires_next;
+       cpu_base->in_hrtirq = 0;
        raw_spin_unlock(&cpu_base->lock);
 
        /* Reprogramming necessary ? */
index 28bf91c60a0b412d1c97911659c30d735ba40d78..4b585e0fdd22e16288f688baa1051395836461d5 100644 (file)
@@ -488,13 +488,13 @@ static void sync_cmos_clock(struct work_struct *work)
 
        getnstimeofday64(&now);
        if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
-               struct timespec adjust = timespec64_to_timespec(now);
+               struct timespec64 adjust = now;
 
                fail = -ENODEV;
                if (persistent_clock_is_local)
                        adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
 #ifdef CONFIG_GENERIC_CMOS_UPDATE
-               fail = update_persistent_clock(adjust);
+               fail = update_persistent_clock(timespec64_to_timespec(adjust));
 #endif
 #ifdef CONFIG_RTC_SYSTOHC
                if (fail == -ENODEV)
index 6a931852082f83a0c9c139a0b34d98e3d1483119..b124af25980031f3346bfcf61f664f5129c49ece 100644 (file)
@@ -1659,24 +1659,24 @@ out:
 }
 
 /**
- * getboottime - Return the real time of system boot.
- * @ts:                pointer to the timespec to be set
+ * getboottime64 - Return the real time of system boot.
+ * @ts:                pointer to the timespec64 to be set
  *
- * Returns the wall-time of boot in a timespec.
+ * Returns the wall-time of boot in a timespec64.
  *
  * This is based on the wall_to_monotonic offset and the total suspend
  * time. Calls to settimeofday will affect the value returned (which
  * basically means that however wrong your real time clock is at boot time,
  * you get the right time here).
  */
-void getboottime(struct timespec *ts)
+void getboottime64(struct timespec64 *ts)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
 
-       *ts = ktime_to_timespec(t);
+       *ts = ktime_to_timespec64(t);
 }
-EXPORT_SYMBOL_GPL(getboottime);
+EXPORT_SYMBOL_GPL(getboottime64);
 
 unsigned long get_seconds(void)
 {
index 4b9c114ee9de87d10edd154239c7cb18d237af00..6fa484de2ba1811ada052f451d1bc147e7241068 100644 (file)
@@ -261,7 +261,7 @@ void perf_trace_del(struct perf_event *p_event, int flags)
 }
 
 void *perf_trace_buf_prepare(int size, unsigned short type,
-                            struct pt_regs *regs, int *rctxp)
+                            struct pt_regs **regs, int *rctxp)
 {
        struct trace_entry *entry;
        unsigned long flags;
@@ -280,6 +280,8 @@ void *perf_trace_buf_prepare(int size, unsigned short type,
        if (*rctxp < 0)
                return NULL;
 
+       if (regs)
+               *regs = this_cpu_ptr(&__perf_regs[*rctxp]);
        raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
 
        /* zero the dead bytes from align to not leak stack to user */
index 5edb518be3458a01096351f0562fb84ce5f9b231..296079ae658300123e157d3265fe9e0f5a352bcf 100644 (file)
@@ -1148,7 +1148,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
 
-       entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
+       entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
        if (!entry)
                return;
 
@@ -1179,7 +1179,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
        size = ALIGN(__size + sizeof(u32), sizeof(u64));
        size -= sizeof(u32);
 
-       entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
+       entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
        if (!entry)
                return;
 
index c6ee36fcbf9071a22e4b86d7ba4b3da3bd047a32..f97f6e3a676ce35b1606b58fc9da694e38d83204 100644 (file)
@@ -574,7 +574,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
        size -= sizeof(u32);
 
        rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
-                               sys_data->enter_event->event.type, regs, &rctx);
+                               sys_data->enter_event->event.type, NULL, &rctx);
        if (!rec)
                return;
 
@@ -647,7 +647,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
        size -= sizeof(u32);
 
        rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
-                               sys_data->exit_event->event.type, regs, &rctx);
+                               sys_data->exit_event->event.type, NULL, &rctx);
        if (!rec)
                return;
 
index 8520acc34b185f7cb05fcbc4a2a30685dce0ef13..b11441321e7a473a6e7086f28d791dcd44b8a6f3 100644 (file)
@@ -1111,7 +1111,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
        if (hlist_empty(head))
                goto out;
 
-       entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
+       entry = perf_trace_buf_prepare(size, call->event.type, NULL, &rctx);
        if (!entry)
                goto out;
 
index 5f2ce616c0462db9b9055528110268385b2b653e..a2ca213c71ca8d2e0a93051851477db8cacb5ac1 100644 (file)
@@ -1215,6 +1215,7 @@ config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
        select TORTURE_TEST
+       select SRCU
        default n
        help
          This option provides a kernel module that runs torture tests
@@ -1257,7 +1258,7 @@ config RCU_CPU_STALL_TIMEOUT
 config RCU_CPU_STALL_INFO
        bool "Print additional diagnostics on RCU CPU stall"
        depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL
-       default n
+       default y
        help
          For each stalled CPU that is aware of the current RCU grace
          period, print out additional per-CPU diagnostic information
index 129775eb6de63a6155cea830df17b6c2476eb024..8b39e86dbab5ea2a1afad17f6a15368f846916d6 100644 (file)
@@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
 EXPORT_SYMBOL(csum_partial_copy);
 
 #ifndef csum_tcpudp_nofold
+static inline u32 from64to32(u64 x)
+{
+       /* add up 32-bit and 32-bit for 32+c bit */
+       x = (x & 0xffffffff) + (x >> 32);
+       /* add up carry.. */
+       x = (x & 0xffffffff) + (x >> 32);
+       return (u32)x;
+}
+
 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
                        unsigned short len,
                        unsigned short proto,
@@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
 #else
        s += (proto + len) << 8;
 #endif
-       s += (s >> 32);
-       return (__force __wsum)s;
+       return (__force __wsum)from64to32(s);
 }
 EXPORT_SYMBOL(csum_tcpudp_nofold);
 #endif
index 1d1ae6b078fdd9121abbd01409f01437bb67e1e8..4395b12869c832dba4a62747658f2238dd1257f7 100644 (file)
@@ -325,6 +325,7 @@ config VIRT_TO_BUS
 
 config MMU_NOTIFIER
        bool
+       select SRCU
 
 config KSM
        bool "Enable KSM for page merging"
index a900759cc8075fc8b0da9a37ebf6f93de34d8d10..8dd50ce6326fd50540b24fe6e93d12546b20594a 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return -EHWPOISON;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
index d247efab5073abfaeb9c11e33fb87a2fbb8ebdb0..15647fb0394fabc54b10206bf35590aa69b5830c 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
                else
                        ret = VM_FAULT_WRITE;
                put_page(page);
-       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
        /*
         * We must loop because handle_mm_fault() may back out if there's
         * any difficulty e.g. if pte accessed bit gets updated concurrently.
index 683b4782019b2c32626645155bb8175f3bf3a4a5..2f6893c2f01b6c9649992910d902b94c49ae8ece 100644 (file)
@@ -5773,7 +5773,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
  * mem_cgroup_migrate - migrate a charge to another page
  * @oldpage: currently charged page
  * @newpage: page to transfer the charge to
- * @lrucare: both pages might be on the LRU already
+ * @lrucare: either or both pages might be on the LRU already
  *
  * Migrate the charge from @oldpage to @newpage.
  *
index 54f3a9b0095600749fda793d7e6067bd2c7f997d..2c3536cc6c6327c9c3e58eddf75c264ddaa11911 100644 (file)
@@ -2632,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGBUS;
+               return VM_FAULT_SIGSEGV;
 
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
index b51eadf6d9528fa69ea80ad6d1c994763656e8da..28bd8c4dff6feb3b0194118781a4d1267d42bded 100644 (file)
@@ -59,6 +59,7 @@
 #endif
 
 void *high_memory;
+EXPORT_SYMBOL(high_memory);
 struct page *mem_map;
 unsigned long max_mapnr;
 unsigned long highest_memmap_pfn;
index ad83195521f2da08e136cc8674d3b37c54e3ab86..b264bda46e1be6601f35f0c2080eb00056a2435c 100644 (file)
@@ -199,7 +199,10 @@ int walk_page_range(unsigned long addr, unsigned long end,
                         */
                        if ((vma->vm_start <= addr) &&
                            (vma->vm_flags & VM_PFNMAP)) {
-                               next = vma->vm_end;
+                               if (walk->pte_hole)
+                                       err = walk->pte_hole(addr, next, walk);
+                               if (err)
+                                       break;
                                pgd = pgd_offset(walk->mm, next);
                                continue;
                        }
index 73ba1df7c8ba1bcf17f0ef2ee0930c13db56730e..993e6ba689ccd442aa33e42489e71f0e558d1943 100644 (file)
@@ -1013,7 +1013,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
                 */
                oldpage = newpage;
        } else {
-               mem_cgroup_migrate(oldpage, newpage, false);
+               mem_cgroup_migrate(oldpage, newpage, true);
                lru_cache_add_anon(newpage);
                *pagep = newpage;
        }
index b0330aecbf974e10ec8f64d3e93b012be5d689be..3244aead09267dd77b0392a0aac7afa462593305 100644 (file)
@@ -265,22 +265,12 @@ out:
        data[NFT_REG_VERDICT].verdict = NF_DROP;
 }
 
-static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
+static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
+                                     const struct nft_expr *expr,
+                                     const struct nft_data **data)
 {
-       struct nft_base_chain *basechain;
-
-       if (chain->flags & NFT_BASE_CHAIN) {
-               basechain = nft_base_chain(chain);
-
-               switch (basechain->ops[0].hooknum) {
-               case NF_BR_PRE_ROUTING:
-               case NF_BR_LOCAL_IN:
-                       break;
-               default:
-                       return -EOPNOTSUPP;
-               }
-       }
-       return 0;
+       return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
+                                                   (1 << NF_BR_LOCAL_IN));
 }
 
 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
@@ -290,7 +280,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx,
        struct nft_reject *priv = nft_expr_priv(expr);
        int icmp_code, err;
 
-       err = nft_reject_bridge_validate_hooks(ctx->chain);
+       err = nft_reject_bridge_validate(ctx, expr, NULL);
        if (err < 0)
                return err;
 
@@ -341,13 +331,6 @@ nla_put_failure:
        return -1;
 }
 
-static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
-                                     const struct nft_expr *expr,
-                                     const struct nft_data **data)
-{
-       return nft_reject_bridge_validate_hooks(ctx->chain);
-}
-
 static struct nft_expr_type nft_reject_bridge_type;
 static const struct nft_expr_ops nft_reject_bridge_ops = {
        .type           = &nft_reject_bridge_type,
index 4589ff67bfa95f2ab5ce6c6ccccd48f970a21807..67a4a36febd1a6bf554a36b4690272777cb008a5 100644 (file)
@@ -470,7 +470,6 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
        ASSERT_RTNL();
        caifdev = netdev_priv(dev);
        caif_netlink_parms(data, &caifdev->conn_req);
-       dev_net_set(caifdev->netdev, src_net);
 
        ret = register_netdevice(dev);
        if (ret)
index 171420e75b03e5b9a020e6d417b72efbed5d5aba..7fe82929f5094ee37a49dccd9df3a3e2b63a64c7 100644 (file)
@@ -2352,7 +2352,6 @@ EXPORT_SYMBOL(skb_checksum_help);
 
 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
 {
-       unsigned int vlan_depth = skb->mac_len;
        __be16 type = skb->protocol;
 
        /* Tunnel gso handlers can set protocol to ethernet. */
@@ -2366,35 +2365,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
                type = eth->h_proto;
        }
 
-       /* if skb->protocol is 802.1Q/AD then the header should already be
-        * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
-        * ETH_HLEN otherwise
-        */
-       if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
-               if (vlan_depth) {
-                       if (WARN_ON(vlan_depth < VLAN_HLEN))
-                               return 0;
-                       vlan_depth -= VLAN_HLEN;
-               } else {
-                       vlan_depth = ETH_HLEN;
-               }
-               do {
-                       struct vlan_hdr *vh;
-
-                       if (unlikely(!pskb_may_pull(skb,
-                                                   vlan_depth + VLAN_HLEN)))
-                               return 0;
-
-                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
-                       type = vh->h_vlan_encapsulated_proto;
-                       vlan_depth += VLAN_HLEN;
-               } while (type == htons(ETH_P_8021Q) ||
-                        type == htons(ETH_P_8021AD));
-       }
-
-       *depth = vlan_depth;
-
-       return type;
+       return __vlan_get_protocol(skb, type, depth);
 }
 
 /**
@@ -5323,7 +5294,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
-void netdev_adjacent_add_links(struct net_device *dev)
+static void netdev_adjacent_add_links(struct net_device *dev)
 {
        struct netdev_adjacent *iter;
 
@@ -5348,7 +5319,7 @@ void netdev_adjacent_add_links(struct net_device *dev)
        }
 }
 
-void netdev_adjacent_del_links(struct net_device *dev)
+static void netdev_adjacent_del_links(struct net_device *dev)
 {
        struct netdev_adjacent *iter;
 
@@ -6656,7 +6627,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
        if (!queue)
                return NULL;
        netdev_init_one_queue(dev, queue, NULL);
-       queue->qdisc = &noop_qdisc;
+       RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
        queue->qdisc_sleeping = &noop_qdisc;
        rcu_assign_pointer(dev->ingress_queue, queue);
 #endif
index 9cf6fe9ddc0c99e189916dee672d16e6c4efe19a..446cbaf811857171c96a01e3529a790b77cf0b8f 100644 (file)
@@ -2895,12 +2895,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
                        goto errout;
        }
 
+       if (!skb->len)
+               goto errout;
+
        rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
        return 0;
 errout:
        WARN_ON(err == -EMSGSIZE);
        kfree_skb(skb);
-       rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+       if (err)
+               rtnl_set_sk_err(net, RTNLGRP_LINK, err);
        return err;
 }
 
index b50861b22b6bea036b1a99ddf141d7ed2d6cf6cd..c373c0708d9799b0f17a969412efde7623455dbd 100644 (file)
@@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
 /*
  *     Generic function to send a packet as reply to another packet.
  *     Used to send some TCP resets/acks so far.
- *
- *     Use a fake percpu inet socket to avoid false sharing and contention.
  */
-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
-       .sk = {
-               .__sk_common = {
-                       .skc_refcnt = ATOMIC_INIT(1),
-               },
-               .sk_wmem_alloc  = ATOMIC_INIT(1),
-               .sk_allocation  = GFP_ATOMIC,
-               .sk_flags       = (1UL << SOCK_USE_WRITE_QUEUE),
-       },
-       .pmtudisc       = IP_PMTUDISC_WANT,
-       .uc_ttl         = -1,
-};
-
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
                           const struct ip_options *sopt,
                           __be32 daddr, __be32 saddr,
                           const struct ip_reply_arg *arg,
@@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
        struct ipcm_cookie ipc;
        struct flowi4 fl4;
        struct rtable *rt = skb_rtable(skb);
+       struct net *net = sock_net(sk);
        struct sk_buff *nskb;
-       struct sock *sk;
-       struct inet_sock *inet;
        int err;
 
        if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
@@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
        if (IS_ERR(rt))
                return;
 
-       inet = &get_cpu_var(unicast_sock);
+       inet_sk(sk)->tos = arg->tos;
 
-       inet->tos = arg->tos;
-       sk = &inet->sk;
        sk->sk_priority = skb->priority;
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
-       sock_net_set(sk, net);
-       __skb_queue_head_init(&sk->sk_write_queue);
        sk->sk_sndbuf = sysctl_wmem_default;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
                          arg->csumoffset) = csum_fold(csum_add(nskb->csum,
                                                                arg->csum));
                nskb->ip_summed = CHECKSUM_NONE;
-               skb_orphan(nskb);
                skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
                ip_push_pending_frames(sk, &fl4);
        }
 out:
-       put_cpu_var(unicast_sock);
-
        ip_rt_put(rt);
 }
 
index d58dd0ec3e5302c2862c8fe53bfd43ca05a3e669..52e1f2bf0ca2ff2fe3d85086465068da4bbb43f2 100644 (file)
@@ -966,6 +966,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
        if (dst->dev->mtu < mtu)
                return;
 
+       if (rt->rt_pmtu && rt->rt_pmtu < mtu)
+               return;
+
        if (mtu < ip_rt_min_pmtu)
                mtu = ip_rt_min_pmtu;
 
index bb395d46a3898136afe615c73ac311fd2832f6f1..c037644eafb7caadcb196b1c8b676bbc42abdb93 100644 (file)
@@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                tcp_slow_start(tp, acked);
        else {
                bictcp_update(ca, tp->snd_cwnd);
-               tcp_cong_avoid_ai(tp, ca->cnt);
+               tcp_cong_avoid_ai(tp, ca->cnt, 1);
        }
 }
 
index 27ead0dd16bc7e444e96781ff01b10c444678396..8670e68e2ce67a9c6a8d8185d31ea8a49f81f183 100644 (file)
@@ -291,26 +291,32 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
  * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
  * returns the leftover acks to adjust cwnd in congestion avoidance mode.
  */
-void tcp_slow_start(struct tcp_sock *tp, u32 acked)
+u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
        u32 cwnd = tp->snd_cwnd + acked;
 
        if (cwnd > tp->snd_ssthresh)
                cwnd = tp->snd_ssthresh + 1;
+       acked -= cwnd - tp->snd_cwnd;
        tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
+
+       return acked;
 }
 EXPORT_SYMBOL_GPL(tcp_slow_start);
 
-/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
-void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
+/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
+ * for every packet that was ACKed.
+ */
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 {
+       tp->snd_cwnd_cnt += acked;
        if (tp->snd_cwnd_cnt >= w) {
-               if (tp->snd_cwnd < tp->snd_cwnd_clamp)
-                       tp->snd_cwnd++;
-               tp->snd_cwnd_cnt = 0;
-       } else {
-               tp->snd_cwnd_cnt++;
+               u32 delta = tp->snd_cwnd_cnt / w;
+
+               tp->snd_cwnd_cnt -= delta * w;
+               tp->snd_cwnd += delta;
        }
+       tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
 }
 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 
@@ -329,11 +335,13 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In "safe" area, increase. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
-               tcp_slow_start(tp, acked);
+       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+               acked = tcp_slow_start(tp, acked);
+               if (!acked)
+                       return;
+       }
        /* In dangerous area, increase slowly. */
-       else
-               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+       tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
 }
 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 
index 6b6002416a73950d493661ea1459870f49917efc..4b276d1ed9807057986bd3b050e2e901bf1afec0 100644 (file)
@@ -93,9 +93,7 @@ struct bictcp {
        u32     epoch_start;    /* beginning of an epoch */
        u32     ack_cnt;        /* number of acks */
        u32     tcp_cwnd;       /* estimated tcp cwnd */
-#define ACK_RATIO_SHIFT        4
-#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
-       u16     delayed_ack;    /* estimate the ratio of Packets/ACKs << 4 */
+       u16     unused;
        u8      sample_cnt;     /* number of samples to decide curr_rtt */
        u8      found;          /* the exit point is found? */
        u32     round_start;    /* beginning of each round */
@@ -114,7 +112,6 @@ static inline void bictcp_reset(struct bictcp *ca)
        ca->bic_K = 0;
        ca->delay_min = 0;
        ca->epoch_start = 0;
-       ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
        ca->ack_cnt = 0;
        ca->tcp_cwnd = 0;
        ca->found = 0;
@@ -205,23 +202,30 @@ static u32 cubic_root(u64 a)
 /*
  * Compute congestion window to use.
  */
-static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
+static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
 {
        u32 delta, bic_target, max_cnt;
        u64 offs, t;
 
-       ca->ack_cnt++;  /* count the number of ACKs */
+       ca->ack_cnt += acked;   /* count the number of ACKed packets */
 
        if (ca->last_cwnd == cwnd &&
            (s32)(tcp_time_stamp - ca->last_time) <= HZ / 32)
                return;
 
+       /* The CUBIC function can update ca->cnt at most once per jiffy.
+        * On all cwnd reduction events, ca->epoch_start is set to 0,
+        * which will force a recalculation of ca->cnt.
+        */
+       if (ca->epoch_start && tcp_time_stamp == ca->last_time)
+               goto tcp_friendliness;
+
        ca->last_cwnd = cwnd;
        ca->last_time = tcp_time_stamp;
 
        if (ca->epoch_start == 0) {
                ca->epoch_start = tcp_time_stamp;       /* record beginning */
-               ca->ack_cnt = 1;                        /* start counting */
+               ca->ack_cnt = acked;                    /* start counting */
                ca->tcp_cwnd = cwnd;                    /* syn with cubic */
 
                if (ca->last_max_cwnd <= cwnd) {
@@ -283,6 +287,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
        if (ca->last_max_cwnd == 0 && ca->cnt > 20)
                ca->cnt = 20;   /* increase cwnd 5% per RTT */
 
+tcp_friendliness:
        /* TCP Friendly */
        if (tcp_friendliness) {
                u32 scale = beta_scale;
@@ -301,7 +306,6 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
                }
        }
 
-       ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
        if (ca->cnt == 0)                       /* cannot be zero */
                ca->cnt = 1;
 }
@@ -317,11 +321,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (tp->snd_cwnd <= tp->snd_ssthresh) {
                if (hystart && after(ack, ca->end_seq))
                        bictcp_hystart_reset(sk);
-               tcp_slow_start(tp, acked);
-       } else {
-               bictcp_update(ca, tp->snd_cwnd);
-               tcp_cong_avoid_ai(tp, ca->cnt);
+               acked = tcp_slow_start(tp, acked);
+               if (!acked)
+                       return;
        }
+       bictcp_update(ca, tp->snd_cwnd, acked);
+       tcp_cong_avoid_ai(tp, ca->cnt, acked);
 }
 
 static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -411,20 +416,10 @@ static void hystart_update(struct sock *sk, u32 delay)
  */
 static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
 {
-       const struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_sock *tp = tcp_sk(sk);
        struct bictcp *ca = inet_csk_ca(sk);
        u32 delay;
 
-       if (icsk->icsk_ca_state == TCP_CA_Open) {
-               u32 ratio = ca->delayed_ack;
-
-               ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
-               ratio += cnt;
-
-               ca->delayed_ack = clamp(ratio, 1U, ACK_RATIO_LIMIT);
-       }
-
        /* Some calls are for duplicates without timetamps */
        if (rtt_us < 0)
                return;
index a3f72d7fc06c07c43e1c00b67970eaee074e4593..d22f54482babf8bbd41972596d01326e4b06f060 100644 (file)
@@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
                arg.bound_dev_if = sk->sk_bound_dev_if;
 
        arg.tos = ip_hdr(skb)->tos;
-       ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+       ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+                             skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
@@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
        if (oif)
                arg.bound_dev_if = oif;
        arg.tos = tos;
-       ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+       ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+                             skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
@@ -2428,14 +2430,39 @@ struct proto tcp_prot = {
 };
 EXPORT_SYMBOL(tcp_prot);
 
+static void __net_exit tcp_sk_exit(struct net *net)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
+       free_percpu(net->ipv4.tcp_sk);
+}
+
 static int __net_init tcp_sk_init(struct net *net)
 {
+       int res, cpu;
+
+       net->ipv4.tcp_sk = alloc_percpu(struct sock *);
+       if (!net->ipv4.tcp_sk)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu) {
+               struct sock *sk;
+
+               res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
+                                          IPPROTO_TCP, net);
+               if (res)
+                       goto fail;
+               *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
+       }
        net->ipv4.sysctl_tcp_ecn = 2;
        return 0;
-}
 
-static void __net_exit tcp_sk_exit(struct net *net)
-{
+fail:
+       tcp_sk_exit(net);
+
+       return res;
 }
 
 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
index 6824afb65d9335532fe2bf61edce82cab8c3fd9c..333bcb2415ffca51e06f3042ae3d94b8e21c0725 100644 (file)
@@ -25,7 +25,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (tp->snd_cwnd <= tp->snd_ssthresh)
                tcp_slow_start(tp, acked);
        else
-               tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
+               tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
+                                 1);
 }
 
 static u32 tcp_scalable_ssthresh(struct sock *sk)
index a4d2d2d88dcae7c00cf4db83d8b13ce6b143b3b4..112151eeee45bff0c37ac92d78d165ba92bd4d0a 100644 (file)
@@ -159,7 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                                /* In the "non-congestive state", increase cwnd
                                 *  every rtt.
                                 */
-                               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+                               tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
                        } else {
                                /* In the "congestive state", increase cwnd
                                 * every other rtt.
index cd72732185989b41d1a3f9167eacbf44c235cc01..17d35662930d054fb6fb379a2cddb9600e6b75e3 100644 (file)
@@ -92,7 +92,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
        } else {
                /* Reno */
-               tcp_cong_avoid_ai(tp, tp->snd_cwnd);
+               tcp_cong_avoid_ai(tp, tp->snd_cwnd, 1);
        }
 
        /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
index 13cda4c6313bad7ecca5b20b5be7c8ac28bcf1ab..01ccc28a686f8cd6bc2b6c69b6d48548b627fbdc 100644 (file)
@@ -417,7 +417,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                if (code == ICMPV6_HDR_FIELD)
                        teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
 
-               if (teli && teli == info - 2) {
+               if (teli && teli == be32_to_cpu(info) - 2) {
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
                        if (tel->encap_limit == 0) {
                                net_warn_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
@@ -429,7 +429,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                }
                break;
        case ICMPV6_PKT_TOOBIG:
-               mtu = info - offset;
+               mtu = be32_to_cpu(info) - offset;
                if (mtu < IPV6_MIN_MTU)
                        mtu = IPV6_MIN_MTU;
                t->dev->mtu = mtu;
index ce69a12ae48c29276871dacb30eebea086b291a3..d28f2a2efb32e4c06eadc45dc8167bb2cb766d32 100644 (file)
@@ -537,20 +537,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        skb_copy_secmark(to, from);
 }
 
-static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
-{
-       static u32 ip6_idents_hashrnd __read_mostly;
-       u32 hash, id;
-
-       net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
-
-       hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
-       hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
-
-       id = ip_idents_reserve(hash, 1);
-       fhdr->identification = htonl(id);
-}
-
 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 {
        struct sk_buff *frag;
index 97f41a3e68d98b89d6b8f643780f8a883bd46f55..54520a0bd5e3b5feac3d4bc4221fac77ab990031 100644 (file)
@@ -9,6 +9,24 @@
 #include <net/addrconf.h>
 #include <net/secure_seq.h>
 
+u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
+{
+       u32 hash, id;
+
+       hash = __ipv6_addr_jhash(dst, hashrnd);
+       hash = __ipv6_addr_jhash(src, hash);
+
+       /* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
+        * set the hight order instead thus minimizing possible future
+        * collisions.
+        */
+       id = ip_idents_reserve(hash, 1);
+       if (unlikely(!id))
+               id = 1 << 31;
+
+       return id;
+}
+
 /* This function exists only for tap drivers that must support broken
  * clients requesting UFO without specifying an IPv6 fragment ID.
  *
@@ -22,7 +40,7 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
        static u32 ip6_proxy_idents_hashrnd __read_mostly;
        struct in6_addr buf[2];
        struct in6_addr *addrs;
-       u32 hash, id;
+       u32 id;
 
        addrs = skb_header_pointer(skb,
                                   skb_network_offset(skb) +
@@ -34,14 +52,25 @@ void ipv6_proxy_select_ident(struct sk_buff *skb)
        net_get_random_once(&ip6_proxy_idents_hashrnd,
                            sizeof(ip6_proxy_idents_hashrnd));
 
-       hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
-       hash = __ipv6_addr_jhash(&addrs[0], hash);
-
-       id = ip_idents_reserve(hash, 1);
-       skb_shinfo(skb)->ip6_frag_id = htonl(id);
+       id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+                                &addrs[1], &addrs[0]);
+       skb_shinfo(skb)->ip6_frag_id = id;
 }
 EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
 
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+{
+       static u32 ip6_idents_hashrnd __read_mostly;
+       u32 id;
+
+       net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
+
+       id = __ipv6_select_ident(ip6_idents_hashrnd, &rt->rt6i_dst.addr,
+                                &rt->rt6i_src.addr);
+       fhdr->identification = htonl(id);
+}
+EXPORT_SYMBOL(ipv6_select_ident);
+
 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 {
        u16 offset = sizeof(struct ipv6hdr);
index 213546bd6d5de5eeccfabe45c6dd4d575b475266..cdbfe5af6187c5c604462b70b26a268f4041e289 100644 (file)
@@ -1506,12 +1506,12 @@ static bool ipip6_netlink_encap_parms(struct nlattr *data[],
 
        if (data[IFLA_IPTUN_ENCAP_SPORT]) {
                ret = true;
-               ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+               ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
        }
 
        if (data[IFLA_IPTUN_ENCAP_DPORT]) {
                ret = true;
-               ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+               ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
        }
 
        return ret;
@@ -1707,9 +1707,9 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
        if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
                        tunnel->encap.type) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT,
                        tunnel->encap.sport) ||
-           nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
+           nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT,
                        tunnel->encap.dport) ||
            nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
                        tunnel->encap.flags))
index b6aa8ed182579614d3738f107e839eb2e82e5371..a56276996b72b3f5d43121d6bf93422a58eefe62 100644 (file)
@@ -52,6 +52,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
 
                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
+               /* Set the IPv6 fragment id if not set yet */
+               if (!skb_shinfo(skb)->ip6_frag_id)
+                       ipv6_proxy_select_ident(skb);
+
                segs = NULL;
                goto out;
        }
@@ -108,7 +112,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
                fptr->nexthdr = nexthdr;
                fptr->reserved = 0;
-               fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+               if (skb_shinfo(skb)->ip6_frag_id)
+                       fptr->identification = skb_shinfo(skb)->ip6_frag_id;
+               else
+                       ipv6_select_ident(fptr,
+                                         (struct rt6_info *)skb_dst(skb));
 
                /* Fragment the skb. ipv6 header and the remaining fields of the
                 * fragment header are updated in ipv6_gso_segment()
index 990decba1fe418e36e59a1f081fcf0e47188da29..b87ca32efa0b4e6edc7f251c2c32c4ba3b55659c 100644 (file)
@@ -659,16 +659,24 @@ static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
        return err;
 }
 
-static int ip_vs_route_me_harder(int af, struct sk_buff *skb)
+static int ip_vs_route_me_harder(int af, struct sk_buff *skb,
+                                unsigned int hooknum)
 {
+       if (!sysctl_snat_reroute(skb))
+               return 0;
+       /* Reroute replies only to remote clients (FORWARD and LOCAL_OUT) */
+       if (NF_INET_LOCAL_IN == hooknum)
+               return 0;
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
-               if (sysctl_snat_reroute(skb) && ip6_route_me_harder(skb) != 0)
+               struct dst_entry *dst = skb_dst(skb);
+
+               if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
+                   ip6_route_me_harder(skb) != 0)
                        return 1;
        } else
 #endif
-               if ((sysctl_snat_reroute(skb) ||
-                    skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
+               if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
                    ip_route_me_harder(skb, RTN_LOCAL) != 0)
                        return 1;
 
@@ -791,7 +799,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
                                union nf_inet_addr *snet,
                                __u8 protocol, struct ip_vs_conn *cp,
                                struct ip_vs_protocol *pp,
-                               unsigned int offset, unsigned int ihl)
+                               unsigned int offset, unsigned int ihl,
+                               unsigned int hooknum)
 {
        unsigned int verdict = NF_DROP;
 
@@ -821,7 +830,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
 #endif
                ip_vs_nat_icmp(skb, pp, cp, 1);
 
-       if (ip_vs_route_me_harder(af, skb))
+       if (ip_vs_route_me_harder(af, skb, hooknum))
                goto out;
 
        /* do the statistics and put it back */
@@ -916,7 +925,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
 
        snet.ip = iph->saddr;
        return handle_response_icmp(AF_INET, skb, &snet, cih->protocol, cp,
-                                   pp, ciph.len, ihl);
+                                   pp, ciph.len, ihl, hooknum);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -981,7 +990,8 @@ static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related,
        snet.in6 = ciph.saddr.in6;
        writable = ciph.len;
        return handle_response_icmp(AF_INET6, skb, &snet, ciph.protocol, cp,
-                                   pp, writable, sizeof(struct ipv6hdr));
+                                   pp, writable, sizeof(struct ipv6hdr),
+                                   hooknum);
 }
 #endif
 
@@ -1040,7 +1050,8 @@ static inline bool is_new_conn(const struct sk_buff *skb,
  */
 static unsigned int
 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
-               struct ip_vs_conn *cp, struct ip_vs_iphdr *iph)
+               struct ip_vs_conn *cp, struct ip_vs_iphdr *iph,
+               unsigned int hooknum)
 {
        struct ip_vs_protocol *pp = pd->pp;
 
@@ -1078,7 +1089,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
         * if it came from this machine itself.  So re-compute
         * the routing information.
         */
-       if (ip_vs_route_me_harder(af, skb))
+       if (ip_vs_route_me_harder(af, skb, hooknum))
                goto drop;
 
        IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
@@ -1181,7 +1192,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
        cp = pp->conn_out_get(af, skb, &iph, 0);
 
        if (likely(cp))
-               return handle_response(af, skb, pd, cp, &iph);
+               return handle_response(af, skb, pd, cp, &iph, hooknum);
        if (sysctl_nat_icmp_send(net) &&
            (pp->protocol == IPPROTO_TCP ||
             pp->protocol == IPPROTO_UDP ||
index 3b3ddb4fb9ee122a5b6d3a39450be38a64d6f614..1ff04bcd487154ecea5b6fc4514b799f162eeddb 100644 (file)
@@ -1134,9 +1134,11 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        /* Restore old counters on this cpu, no problem. Per-cpu statistics
         * are not exposed to userspace.
         */
+       preempt_disable();
        stats = this_cpu_ptr(newstats);
        stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
        stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+       preempt_enable();
 
        return newstats;
 }
@@ -1262,8 +1264,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
                trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN,
                                        sizeof(struct nft_trans_chain));
-               if (trans == NULL)
+               if (trans == NULL) {
+                       free_percpu(stats);
                        return -ENOMEM;
+               }
 
                nft_trans_chain_stats(trans) = stats;
                nft_trans_chain_update(trans) = true;
@@ -1319,8 +1323,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                hookfn = type->hooks[hooknum];
 
                basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
-               if (basechain == NULL)
+               if (basechain == NULL) {
+                       module_put(type->owner);
                        return -ENOMEM;
+               }
 
                if (nla[NFTA_CHAIN_COUNTERS]) {
                        stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
@@ -3753,6 +3759,24 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
 }
 EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
 
+int nft_chain_validate_hooks(const struct nft_chain *chain,
+                            unsigned int hook_flags)
+{
+       struct nft_base_chain *basechain;
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               basechain = nft_base_chain(chain);
+
+               if ((1 << basechain->ops[0].hooknum) & hook_flags)
+                       return 0;
+
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_chain_validate_hooks);
+
 /*
  * Loop detection - walk through the ruleset beginning at the destination chain
  * of a new jump until either the source chain is reached (loop) or all
index d1ffd5eb3a9b5b86495b53adb5b8bc27c17921df..9aea747b43eab7b91b3b458cd0a5b89f2d9e6927 100644 (file)
@@ -21,6 +21,21 @@ const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_masq_policy);
 
+int nft_masq_validate(const struct nft_ctx *ctx,
+                     const struct nft_expr *expr,
+                     const struct nft_data **data)
+{
+       int err;
+
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
+
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_POST_ROUTING));
+}
+EXPORT_SYMBOL_GPL(nft_masq_validate);
+
 int nft_masq_init(const struct nft_ctx *ctx,
                  const struct nft_expr *expr,
                  const struct nlattr * const tb[])
@@ -28,8 +43,8 @@ int nft_masq_init(const struct nft_ctx *ctx,
        struct nft_masq *priv = nft_expr_priv(expr);
        int err;
 
-       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-       if (err < 0)
+       err = nft_masq_validate(ctx, expr, NULL);
+       if (err)
                return err;
 
        if (tb[NFTA_MASQ_FLAGS] == NULL)
@@ -60,12 +75,5 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(nft_masq_dump);
 
-int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                     const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_masq_validate);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
index aff54fb1c8a09fdb99fd4caab0959a80f28ffc37..a0837c6c9283dc90b043750ffe1d7217c95ab239 100644 (file)
@@ -88,17 +88,40 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
        [NFTA_NAT_FLAGS]         = { .type = NLA_U32 },
 };
 
-static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                       const struct nlattr * const tb[])
+static int nft_nat_validate(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nft_data **data)
 {
        struct nft_nat *priv = nft_expr_priv(expr);
-       u32 family;
        int err;
 
        err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
        if (err < 0)
                return err;
 
+       switch (priv->type) {
+       case NFT_NAT_SNAT:
+               err = nft_chain_validate_hooks(ctx->chain,
+                                              (1 << NF_INET_POST_ROUTING) |
+                                              (1 << NF_INET_LOCAL_IN));
+               break;
+       case NFT_NAT_DNAT:
+               err = nft_chain_validate_hooks(ctx->chain,
+                                              (1 << NF_INET_PRE_ROUTING) |
+                                              (1 << NF_INET_LOCAL_OUT));
+               break;
+       }
+
+       return err;
+}
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_nat *priv = nft_expr_priv(expr);
+       u32 family;
+       int err;
+
        if (tb[NFTA_NAT_TYPE] == NULL ||
            (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
             tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
@@ -115,6 +138,10 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
                return -EINVAL;
        }
 
+       err = nft_nat_validate(ctx, expr, NULL);
+       if (err < 0)
+               return err;
+
        if (tb[NFTA_NAT_FAMILY] == NULL)
                return -EINVAL;
 
@@ -219,13 +246,6 @@ nla_put_failure:
        return -1;
 }
 
-static int nft_nat_validate(const struct nft_ctx *ctx,
-                           const struct nft_expr *expr,
-                           const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-
 static struct nft_expr_type nft_nat_type;
 static const struct nft_expr_ops nft_nat_ops = {
        .type           = &nft_nat_type,
index 9e8093f283113117ac7618c742624742e618ed3a..d7e9e93a4e90f498f7a002c33840c928a3ab17e2 100644 (file)
@@ -23,6 +23,22 @@ const struct nla_policy nft_redir_policy[NFTA_REDIR_MAX + 1] = {
 };
 EXPORT_SYMBOL_GPL(nft_redir_policy);
 
+int nft_redir_validate(const struct nft_ctx *ctx,
+                      const struct nft_expr *expr,
+                      const struct nft_data **data)
+{
+       int err;
+
+       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       if (err < 0)
+               return err;
+
+       return nft_chain_validate_hooks(ctx->chain,
+                                       (1 << NF_INET_PRE_ROUTING) |
+                                       (1 << NF_INET_LOCAL_OUT));
+}
+EXPORT_SYMBOL_GPL(nft_redir_validate);
+
 int nft_redir_init(const struct nft_ctx *ctx,
                   const struct nft_expr *expr,
                   const struct nlattr * const tb[])
@@ -30,7 +46,7 @@ int nft_redir_init(const struct nft_ctx *ctx,
        struct nft_redir *priv = nft_expr_priv(expr);
        int err;
 
-       err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
+       err = nft_redir_validate(ctx, expr, NULL);
        if (err < 0)
                return err;
 
@@ -88,12 +104,5 @@ nla_put_failure:
 }
 EXPORT_SYMBOL_GPL(nft_redir_dump);
 
-int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
-                      const struct nft_data **data)
-{
-       return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
-}
-EXPORT_SYMBOL_GPL(nft_redir_validate);
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
index 02fdde28dada498c1715b18ffa166d72ac6807d6..75532efa51cd6d54389366b0f4cc7a4de34e0fbc 100644 (file)
@@ -1438,7 +1438,7 @@ static void netlink_undo_bind(int group, long unsigned int groups,
 
        for (undo = 0; undo < group; undo++)
                if (test_bit(undo, &groups))
-                       nlk->netlink_unbind(sock_net(sk), undo);
+                       nlk->netlink_unbind(sock_net(sk), undo + 1);
 }
 
 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1476,7 +1476,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                for (group = 0; group < nlk->ngroups; group++) {
                        if (!test_bit(group, &groups))
                                continue;
-                       err = nlk->netlink_bind(net, group);
+                       err = nlk->netlink_bind(net, group + 1);
                        if (!err)
                                continue;
                        netlink_undo_bind(group, groups, sk);
index c3b0cd43eb56689e395581c4757402bad531e271..c173f69e1479bfaf643b9e5c69f4c9a151b18c67 100644 (file)
@@ -71,14 +71,14 @@ static struct ctl_table rds_sysctl_rds_table[] = {
        {
                .procname       = "max_unacked_packets",
                .data           = &rds_sysctl_max_unacked_packets,
-               .maxlen         = sizeof(unsigned long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
        {
                .procname       = "max_unacked_bytes",
                .data           = &rds_sysctl_max_unacked_bytes,
-               .maxlen         = sizeof(unsigned long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
index aad6a679fb135e9c6492a1b1cd3c1b638d823453..baef987fe2c036ae61f7108455ce1d828ec40e6c 100644 (file)
@@ -556,8 +556,9 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
 }
 EXPORT_SYMBOL(tcf_exts_change);
 
-#define tcf_exts_first_act(ext) \
-               list_first_entry(&(exts)->actions, struct tc_action, list)
+#define tcf_exts_first_act(ext)                                        \
+       list_first_entry_or_null(&(exts)->actions,              \
+                                struct tc_action, list)
 
 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
 {
@@ -603,7 +604,7 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
        struct tc_action *a = tcf_exts_first_act(exts);
-       if (tcf_action_copy_stats(skb, a, 1) < 0)
+       if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
                return -1;
 #endif
        return 0;
index 9b05924cc386ecc2cdb9816be27e439637fb37b3..333cd94ba381ff62f9512d6b95474a2aecc3ea88 100644 (file)
@@ -670,8 +670,14 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
        if (tb[TCA_FQ_FLOW_PLIMIT])
                q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
 
-       if (tb[TCA_FQ_QUANTUM])
-               q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+       if (tb[TCA_FQ_QUANTUM]) {
+               u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+
+               if (quantum > 0)
+                       q->quantum = quantum;
+               else
+                       err = -EINVAL;
+       }
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
                q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
index e49e231cef529ecaf4bf2bc3e6a168b8f8b6fc06..06320c8c1c8660cdd4e1a56968353169236fb2df 100644 (file)
@@ -2608,7 +2608,7 @@ do_addr_param:
 
                addr_param = param.v + sizeof(sctp_addip_param_t);
 
-               af = sctp_get_af_specific(param_type2af(param.p->type));
+               af = sctp_get_af_specific(param_type2af(addr_param->p.type));
                if (af == NULL)
                        break;
 
index 8eb779b9d77f2ad56c7ccdc5db081bc4b0a05028..604e718d68d35ecb305b3ad71ae774a848ea5a02 100644 (file)
@@ -5,6 +5,7 @@ config SECURITY_TOMOYO
        select SECURITYFS
        select SECURITY_PATH
        select SECURITY_NETWORK
+       select SRCU
        default n
        help
          This selects TOMOYO Linux, pathname-based access control.
index ec667f158f192c4467e020164ecd4adf30a5ce04..5d905d90d504c0d588b62bb856e7314e23488ec0 100644 (file)
@@ -81,36 +81,6 @@ struct snd_seq_dummy_port {
 
 static int my_client = -1;
 
-/*
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
- * to subscribers.
- * Note: this callback is called only after all subscribers are removed.
- */
-static int
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-{
-       struct snd_seq_dummy_port *p;
-       int i;
-       struct snd_seq_event ev;
-
-       p = private_data;
-       memset(&ev, 0, sizeof(ev));
-       if (p->duplex)
-               ev.source.port = p->connect;
-       else
-               ev.source.port = p->port;
-       ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
-       ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
-       for (i = 0; i < 16; i++) {
-               ev.data.control.channel = i;
-               ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-               ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-       }
-       return 0;
-}
-
 /*
  * event input callback - just redirect events to subscribers
  */
@@ -175,7 +145,6 @@ create_port(int idx, int type)
                | SNDRV_SEQ_PORT_TYPE_PORT;
        memset(&pcb, 0, sizeof(pcb));
        pcb.owner = THIS_MODULE;
-       pcb.unuse = dummy_unuse;
        pcb.event_input = dummy_input;
        pcb.private_free = dummy_free;
        pcb.private_data = rec;
index 1a3a6fa27158b0096de715c8e322ec0cddeb62ec..c6bba99a90b285a939865eb57bd97de3d7646d31 100644 (file)
@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
 
 static void snd_ak4113_free(struct ak4113 *chip)
 {
-       chip->init = 1; /* don't schedule new work */
-       mb();
+       atomic_inc(&chip->wq_processing);       /* don't schedule new work */
        cancel_delayed_work_sync(&chip->work);
        kfree(chip);
 }
@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
        chip->write = write;
        chip->private_data = private_data;
        INIT_DELAYED_WORK(&chip->work, ak4113_stats);
+       atomic_set(&chip->wq_processing, 0);
 
        for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
                chip->regmap[reg] = pgm[reg];
@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip)
 
 void snd_ak4113_reinit(struct ak4113 *chip)
 {
-       chip->init = 1;
-       mb();
-       flush_delayed_work(&chip->work);
+       if (atomic_inc_return(&chip->wq_processing) == 1)
+               cancel_delayed_work_sync(&chip->work);
        ak4113_init_regs(chip);
        /* bring up statistics / event queing */
-       chip->init = 0;
-       if (chip->kctls[0])
+       if (atomic_dec_and_test(&chip->wq_processing))
                schedule_delayed_work(&chip->work, HZ / 10);
 }
 EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work)
 {
        struct ak4113 *chip = container_of(work, struct ak4113, work.work);
 
-       if (!chip->init)
+       if (atomic_inc_return(&chip->wq_processing) == 1)
                snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
 
-       schedule_delayed_work(&chip->work, HZ / 10);
+       if (atomic_dec_and_test(&chip->wq_processing))
+               schedule_delayed_work(&chip->work, HZ / 10);
 }
index c7f56339415d31052812e1c3d6066c275298411e..b70e6eccbd035372e48098188f32aa99e83b4ea5 100644 (file)
@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
 
 static void snd_ak4114_free(struct ak4114 *chip)
 {
-       chip->init = 1; /* don't schedule new work */
-       mb();
+       atomic_inc(&chip->wq_processing);       /* don't schedule new work */
        cancel_delayed_work_sync(&chip->work);
        kfree(chip);
 }
@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card,
        chip->write = write;
        chip->private_data = private_data;
        INIT_DELAYED_WORK(&chip->work, ak4114_stats);
+       atomic_set(&chip->wq_processing, 0);
 
        for (reg = 0; reg < 6; reg++)
                chip->regmap[reg] = pgm[reg];
@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip)
 
 void snd_ak4114_reinit(struct ak4114 *chip)
 {
-       chip->init = 1;
-       mb();
-       flush_delayed_work(&chip->work);
+       if (atomic_inc_return(&chip->wq_processing) == 1)
+               cancel_delayed_work_sync(&chip->work);
        ak4114_init_regs(chip);
        /* bring up statistics / event queing */
-       chip->init = 0;
-       if (chip->kctls[0])
+       if (atomic_dec_and_test(&chip->wq_processing))
                schedule_delayed_work(&chip->work, HZ / 10);
 }
 
@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work)
 {
        struct ak4114 *chip = container_of(work, struct ak4114, work.work);
 
-       if (!chip->init)
+       if (atomic_inc_return(&chip->wq_processing) == 1)
                snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
-
-       schedule_delayed_work(&chip->work, HZ / 10);
+       if (atomic_dec_and_test(&chip->wq_processing))
+               schedule_delayed_work(&chip->work, HZ / 10);
 }
 
 EXPORT_SYMBOL(snd_ak4114_create);
index 7752860f7230e2821ac906715c06007543e84499..4c23381727a1ed1c53406b596ff04d9cd2e68577 100644 (file)
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)
        if (ret)
                goto err_clk_disable;
 
+       return 0;
+
 err_clk_disable:
        clk_disable_unprepare(i2s->clk);
        return ret;
index 99ff35e2a25d012ba052b00fef7c2d1963a8e5c3..35e44e463cfe580f538e7c1d7524fd3b9cc4249f 100644 (file)
@@ -348,7 +348,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
        struct atmel_pcm_dma_params *dma_params;
        int dir, channels, bits;
        u32 tfmr, rfmr, tcmr, rcmr;
-       int start_event;
        int ret;
        int fslen, fslen_ext;
 
@@ -457,19 +456,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                 * The SSC transmit clock is obtained from the BCLK signal on
                 * on the TK line, and the SSC receive clock is
                 * generated from the transmit clock.
-                *
-                *  For single channel data, one sample is transferred
-                * on the falling edge of the LRC clock.
-                * For two channel data, one sample is
-                * transferred on both edges of the LRC clock.
                 */
-               start_event = ((channels == 1)
-                               ? SSC_START_FALLING_RF
-                               : SSC_START_EDGE_RF);
-
                rcmr =    SSC_BF(RCMR_PERIOD, 0)
                        | SSC_BF(RCMR_STTDLY, START_DELAY)
-                       | SSC_BF(RCMR_START, start_event)
+                       | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
                        | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
                        | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
@@ -478,14 +468,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                rfmr =    SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
                        | SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
                        | SSC_BF(RFMR_FSLEN, 0)
-                       | SSC_BF(RFMR_DATNB, 0)
+                       | SSC_BF(RFMR_DATNB, (channels - 1))
                        | SSC_BIT(RFMR_MSBF)
                        | SSC_BF(RFMR_LOOP, 0)
                        | SSC_BF(RFMR_DATLEN, (bits - 1));
 
                tcmr =    SSC_BF(TCMR_PERIOD, 0)
                        | SSC_BF(TCMR_STTDLY, START_DELAY)
-                       | SSC_BF(TCMR_START, start_event)
+                       | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
                        | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(TCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
@@ -495,7 +485,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                        | SSC_BF(TFMR_FSDEN, 0)
                        | SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
                        | SSC_BF(TFMR_FSLEN, 0)
-                       | SSC_BF(TFMR_DATNB, 0)
+                       | SSC_BF(TFMR_DATNB, (channels - 1))
                        | SSC_BIT(TFMR_MSBF)
                        | SSC_BF(TFMR_DATDEF, 0)
                        | SSC_BF(TFMR_DATLEN, (bits - 1));
@@ -512,7 +502,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                rcmr =    SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
                        | SSC_BF(RCMR_STTDLY, 1)
                        | SSC_BF(RCMR_START, SSC_START_RISING_RF)
-                       | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+                       | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(RCMR_CKS, SSC_CKS_DIV);
 
@@ -527,7 +517,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                tcmr =    SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
                        | SSC_BF(TCMR_STTDLY, 1)
                        | SSC_BF(TCMR_START, SSC_START_RISING_RF)
-                       | SSC_BF(TCMR_CKI, SSC_CKI_RISING)
+                       | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
                        | SSC_BF(TCMR_CKS, SSC_CKS_DIV);
 
@@ -556,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
                rcmr =    SSC_BF(RCMR_PERIOD, 0)
                        | SSC_BF(RCMR_STTDLY, START_DELAY)
                        | SSC_BF(RCMR_START, SSC_START_RISING_RF)
-                       | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+                       | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
                        | SSC_BF(RCMR_CKO, SSC_CKO_NONE)
                        | SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
                                           SSC_CKS_PIN : SSC_CKS_CLOCK);
index e5f2fb884bf34688bc42add58e0b93cc5b863a9b..30c673cdc12ed409a720836d139cd48b7ed5d134 100644 (file)
@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
 static const char * const pcm512x_dsp_program_texts[] = {
        "FIR interpolation with de-emphasis",
        "Low latency IIR with de-emphasis",
-       "Fixed process flow",
        "High attenuation with de-emphasis",
+       "Fixed process flow",
        "Ringing-less low latency FIR",
 };
 
index 2cd4fe463102d4532458e1ed3402abd276be8fc1..1d1c7f8a9af27329a10dacf22c8752093dba91f8 100644 (file)
@@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
                RT286_I2S_CTRL1, 0x0018, d_len_code << 3);
        dev_dbg(codec->dev, "format val = 0x%x\n", val);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
-       else
-               snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
+       snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
+       snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
 
        return 0;
 }
index c3f2decd643c9ba7eb57eec8a644091bc440376d..1ff726c292491eaf14f09ad235e779b65bca0041 100644 (file)
@@ -2124,6 +2124,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match);
 static struct acpi_device_id rt5640_acpi_match[] = {
        { "INT33CA", 0 },
        { "10EC5640", 0 },
+       { "10EC5642", 0 },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
index c0fbe18814398b03ebfbfaabdc535424185dd3fa..918ada9738b0431e9d47d0dc5155cd42d9e2fb2a 100644 (file)
@@ -2083,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
        struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
        switch (event) {
-       case SND_SOC_DAPM_POST_PMU:
+       case SND_SOC_DAPM_PRE_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2);
+               break;
+
+       case SND_SOC_DAPM_POST_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);
                break;
+
        default:
                return 0;
        }
@@ -2101,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
        struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
        switch (event) {
-       case SND_SOC_DAPM_POST_PMU:
+       case SND_SOC_DAPM_PRE_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2);
+               break;
+
+       case SND_SOC_DAPM_POST_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);
                break;
+
        default:
                return 0;
        }
@@ -2212,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
 
 static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
-               0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU),
+               0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT,
-               0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU),
+               0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU |
+               SND_SOC_DAPM_POST_PMU),
 
        /* Input Side */
        /* micbias */
index 29cf7ce610f4707feeaba22425daedbe50491ba4..aa98be32bb60cfdb37ed150b4cffabf7dd4ee071 100644 (file)
@@ -483,21 +483,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        /* setting i2s data format */
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_DSP_A:
-               i2sctl |= SGTL5000_I2S_MODE_PCM;
+               i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
                break;
        case SND_SOC_DAIFMT_DSP_B:
-               i2sctl |= SGTL5000_I2S_MODE_PCM;
+               i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
                i2sctl |= SGTL5000_I2S_LRALIGN;
                break;
        case SND_SOC_DAIFMT_I2S:
-               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
                break;
        case SND_SOC_DAIFMT_RIGHT_J:
-               i2sctl |= SGTL5000_I2S_MODE_RJ;
+               i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT;
                i2sctl |= SGTL5000_I2S_LRPOL;
                break;
        case SND_SOC_DAIFMT_LEFT_J:
-               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+               i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
                i2sctl |= SGTL5000_I2S_LRALIGN;
                break;
        default:
@@ -1462,6 +1462,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
        if (ret)
                return ret;
 
+       /* Need 8 clocks before I2C accesses */
+       udelay(1);
+
        /* read chip information */
        ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
        if (ret)
index b7ebce054b4ebe673ceeb23fa73118a1c9c74998..dd222b10ce132bfdfd15aecc06f19fca41e1bd7f 100644 (file)
@@ -1046,7 +1046,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream,
                delay += aic3x->tdm_delay;
 
        /* Configure data delay */
-       snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay);
+       snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay);
 
        return 0;
 }
index 1d1205702d2324bcef0b41d9682a49549bc4f6e0..9f2dced046de4b0ade76fcb8f088237f9443a643 100644 (file)
@@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
        struct ts3a227e *ts3a227e;
        struct device *dev = &i2c->dev;
        int ret;
+       unsigned int acc_reg;
 
        ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
        if (ts3a227e == NULL)
@@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
                           INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
                           ADC_COMPLETE_INT_DISABLE);
 
+       /* Read jack status because chip might not trigger interrupt at boot. */
+       regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
+       ts3a227e_new_jack_state(ts3a227e, acc_reg);
+       ts3a227e_jack_report(ts3a227e);
+
        return 0;
 }
 
index b9211b42f6e919f0603eabda95ca332eea71dfb6..b115ed815db97329bb9d6a4c6d7b50faf42f5f40 100644 (file)
@@ -717,6 +717,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
        if (wm8731 == NULL)
                return -ENOMEM;
 
+       mutex_init(&wm8731->lock);
+
        wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
        if (IS_ERR(wm8731->regmap)) {
                ret = PTR_ERR(wm8731->regmap);
index 4d2d2b1380d59d2b8e1043acbdd29d314eef1b97..75b87c5c0f046286f41ceace869091f6323d9f4c 100644 (file)
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
        { "Right Capture PGA", NULL, "Right Capture Mux" },
        { "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
 
-       { "AIFOUTL", "Left",  "ADCL" },
-       { "AIFOUTL", "Right", "ADCR" },
-       { "AIFOUTR", "Left",  "ADCL" },
-       { "AIFOUTR", "Right", "ADCR" },
+       { "AIFOUTL Mux", "Left", "ADCL" },
+       { "AIFOUTL Mux", "Right", "ADCR" },
+       { "AIFOUTR Mux", "Left", "ADCL" },
+       { "AIFOUTR Mux", "Right", "ADCR" },
+
+       { "AIFOUTL", NULL, "AIFOUTL Mux" },
+       { "AIFOUTR", NULL, "AIFOUTR Mux" },
 
        { "ADCL", NULL, "CLK_DSP" },
        { "ADCL", NULL, "Left Capture PGA" },
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
 };
 
 static const struct snd_soc_dapm_route dac_intercon[] = {
-       { "DACL", "Right", "AIFINR" },
-       { "DACL", "Left",  "AIFINL" },
+       { "DACL Mux", "Left", "AIFINL" },
+       { "DACL Mux", "Right", "AIFINR" },
+
+       { "DACR Mux", "Left", "AIFINL" },
+       { "DACR Mux", "Right", "AIFINR" },
+
+       { "DACL", NULL, "DACL Mux" },
        { "DACL", NULL, "CLK_DSP" },
 
-       { "DACR", "Right", "AIFINR" },
-       { "DACR", "Left",  "AIFINL" },
+       { "DACR", NULL, "DACR Mux" },
        { "DACR", NULL, "CLK_DSP" },
 
        { "Charge pump", NULL, "SYSCLK" },
index 031a1ae71d943f2782d08f911d24c79d38d078ab..a96eb497a3796e9b67b539eae255a316af52864b 100644 (file)
@@ -556,7 +556,7 @@ static struct {
        { 22050, 2 },
        { 24000, 2 },
        { 16000, 3 },
-       { 11250, 4 },
+       { 11025, 4 },
        { 12000, 4 },
        {  8000, 5 },
 };
index 3eddb18fefd156eaf1acd3b050b106b3d32ff3b5..5cc457ef8894f7c69ae7c4981ab7e2d6d08d135b 100644 (file)
@@ -344,23 +344,27 @@ static int wm9705_soc_probe(struct snd_soc_codec *codec)
        struct snd_ac97 *ac97;
        int ret = 0;
 
-       ac97 = snd_soc_new_ac97_codec(codec);
+       ac97 = snd_soc_alloc_ac97_codec(codec);
        if (IS_ERR(ac97)) {
                ret = PTR_ERR(ac97);
                dev_err(codec->dev, "Failed to register AC97 codec\n");
                return ret;
        }
 
-       snd_soc_codec_set_drvdata(codec, ac97);
-
        ret = wm9705_reset(codec);
        if (ret)
-               goto reset_err;
+               goto err_put_device;
+
+       ret = device_add(&ac97->dev);
+       if (ret)
+               goto err_put_device;
+
+       snd_soc_codec_set_drvdata(codec, ac97);
 
        return 0;
 
-reset_err:
-       snd_soc_free_ac97_codec(ac97);
+err_put_device:
+       put_device(&ac97->dev);
        return ret;
 }
 
index e04643d2bb2412e334112cbb6ef15c8942cbf558..9517571e820d9576b9505be863a73f3c02116cb8 100644 (file)
@@ -666,7 +666,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
        struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
        int ret = 0;
 
-       wm9712->ac97 = snd_soc_new_ac97_codec(codec);
+       wm9712->ac97 = snd_soc_alloc_ac97_codec(codec);
        if (IS_ERR(wm9712->ac97)) {
                ret = PTR_ERR(wm9712->ac97);
                dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
@@ -675,15 +675,19 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec)
 
        ret = wm9712_reset(codec, 0);
        if (ret < 0)
-               goto reset_err;
+               goto err_put_device;
+
+       ret = device_add(&wm9712->ac97->dev);
+       if (ret)
+               goto err_put_device;
 
        /* set alc mux to none */
        ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
 
        return 0;
 
-reset_err:
-       snd_soc_free_ac97_codec(wm9712->ac97);
+err_put_device:
+       put_device(&wm9712->ac97->dev);
        return ret;
 }
 
index 71b9d5b0734d22c54284172184fede22fc4e5c48..6ab1122a3872dedeefe0460ac065a2f675e231a1 100644 (file)
@@ -1225,7 +1225,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
        struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
        int ret = 0, reg;
 
-       wm9713->ac97 = snd_soc_new_ac97_codec(codec);
+       wm9713->ac97 = snd_soc_alloc_ac97_codec(codec);
        if (IS_ERR(wm9713->ac97))
                return PTR_ERR(wm9713->ac97);
 
@@ -1234,7 +1234,11 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
        wm9713_reset(codec, 0);
        ret = wm9713_reset(codec, 1);
        if (ret < 0)
-               goto reset_err;
+               goto err_put_device;
+
+       ret = device_add(&wm9713->ac97->dev);
+       if (ret)
+               goto err_put_device;
 
        /* unmute the adc - move to kcontrol */
        reg = ac97_read(codec, AC97_CD) & 0x7fff;
@@ -1242,8 +1246,8 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
 
        return 0;
 
-reset_err:
-       snd_soc_free_ac97_codec(wm9713->ac97);
+err_put_device:
+       put_device(&wm9713->ac97->dev);
        return ret;
 }
 
index 91a550f4a10dc7e0156efd8389fc3ba90bb15955..5e793bbb6b02be5f4a628f52755c09564c4a72fd 100644 (file)
 #define ESAI_xCCR_xFP_MASK     (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
 #define ESAI_xCCR_xFP(v)       ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
 #define ESAI_xCCR_xDC_SHIFT     9
-#define ESAI_xCCR_xDC_WIDTH    4
+#define ESAI_xCCR_xDC_WIDTH    5
 #define ESAI_xCCR_xDC_MASK     (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
 #define ESAI_xCCR_xDC(v)       ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
 #define ESAI_xCCR_xPSR_SHIFT   8
index a65f17d57ffb44733b7858364678581308dea040..059496ed9ad76c4771ccafbe593a1e0c971f9420 100644 (file)
@@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        }
 
        ssi_private->irq = platform_get_irq(pdev, 0);
-       if (!ssi_private->irq) {
+       if (ssi_private->irq < 0) {
                dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
-               return -ENXIO;
+               return ssi_private->irq;
        }
 
        /* Are the RX and the TX clocks locked? */
index 4caacb05a62324e404811e45768ec17eed0973ea..cd146d4fa8054fb0980abdda8a9fc506c1c4ef98 100644 (file)
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
        if (ret)
                goto clk_fail;
        data->card.num_links = 1;
+       data->card.owner = THIS_MODULE;
        data->card.dai_link = &data->dai;
        data->card.dapm_widgets = imx_wm8962_dapm_widgets;
        data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
index fb9240fdc9b70095d364e3e90e14c16653ca4aff..7fe3009b1c43c63c055c4f9ea8f2b25376ad5c7b 100644 (file)
@@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
 }
 
 /* Decrease the reference count of the device nodes */
-static int asoc_simple_card_unref(struct platform_device *pdev)
+static int asoc_simple_card_unref(struct snd_soc_card *card)
 {
-       struct snd_soc_card *card = platform_get_drvdata(pdev);
        struct snd_soc_dai_link *dai_link;
        int num_links;
 
@@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
                return ret;
 
 err:
-       asoc_simple_card_unref(pdev);
+       asoc_simple_card_unref(&priv->snd_card);
        return ret;
 }
 
@@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
                snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
                                        &simple_card_mic_jack_gpio);
 
-       return asoc_simple_card_unref(pdev);
+       return asoc_simple_card_unref(card);
 }
 
 static const struct of_device_id asoc_simple_of_match[] = {
index ef2e8b5766a1b92df8879541888b4761f1c2a09f..b3f9489794a6acad776fc2cf7670b6e12ab06bf6 100644 (file)
@@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
        struct list_head *block_list)
 {
        struct sst_mem_block *block, *tmp;
+       struct sst_block_allocator ba_tmp = *ba;
        u32 end = ba->offset + ba->size, block_end;
        int err;
 
@@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
                if (ba->offset >= block->offset && ba->offset < block_end) {
 
                        /* align ba to block boundary */
-                       ba->size -= block_end - ba->offset;
-                       ba->offset = block_end;
-                       err = block_alloc_contiguous(dsp, ba, block_list);
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
                        if (err < 0)
                                return -ENOMEM;
 
@@ -767,10 +768,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
                        list_move(&block->list, &dsp->used_block_list);
                        list_add(&block->module_list, block_list);
                        /* align ba to block boundary */
-                       ba->size -= block_end - ba->offset;
-                       ba->offset = block_end;
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
 
-                       err = block_alloc_contiguous(dsp, ba, block_list);
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
                        if (err < 0)
                                return -ENOMEM;
 
index 3f8c48231364c6c7b9510da4eb9d73c6e291cace..8156cc1accb79aec1c21751d51cd1dd2817bc48c 100644 (file)
@@ -651,11 +651,11 @@ static void hsw_notification_work(struct work_struct *work)
        }
 
        /* tell DSP that notification has been handled */
-       sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD,
+       sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD,
                SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
 
        /* unmask busy interrupt */
-       sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
+       sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
 }
 
 static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
@@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        struct sst_dsp *sst = hsw->dsp;
        unsigned long flags;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
+               return 0;
+       }
+
        /* dont free DSP streams that are not commited */
        if (!stream->commited)
                goto out;
@@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        u32 header;
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
+               return 0;
+       }
+
+       if (stream->commited) {
+               dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream alloc", stream->host_id);
 
        header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
@@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
 {
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream pause", stream->reply.stream_hw_id);
 
        ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
@@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
 {
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream resume", stream->reply.stream_hw_id);
 
        ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
@@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 {
        int ret, tries = 10;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
+               return 0;
+       }
+
        /* dont reset streams that are not commited */
        if (!stream->commited)
                return 0;
index 2ac72eb5e75d82e1a6e8e11e4b176984692215d3..b3360139c41a905ba9575574d42b7477dd0cbc26 100644 (file)
@@ -350,7 +350,7 @@ static struct sst_machines sst_acpi_bytcr[] = {
 
 /* Cherryview-based platforms: CherryTrail and Braswell */
 static struct sst_machines sst_acpi_chv[] = {
-       {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin",
+       {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin",
                                                &chv_platform_data },
        {},
 };
index 8b79cafab1e2299af486c343565e68a162060c72..c7eb9dd67f608c47ffa93a97c1824e3dae3c867b 100644 (file)
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
        case SND_SOC_DAIFMT_CBM_CFS:
                /* McBSP slave. FS clock as output */
                regs->srgr2     |= FSGM;
-               regs->pcr0      |= FSXM;
+               regs->pcr0      |= FSXM | FSRM;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
                /* McBSP slave */
index 13d8507333b8f8507679350aadc12df277d35249..dcc26eda0539b470f37382dacdc78622a8da0a95 100644 (file)
@@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
                            SNDRV_PCM_FMTBIT_S24_LE),
        },
        .ops = &rockchip_i2s_dai_ops,
+       .symmetric_rates = 1,
 };
 
 static const struct snd_soc_component_driver rockchip_i2s_component = {
index 2e10e9a383768a5ed5132fb182b8ef3753c39540..08d7259bbaabad727709281234905011e990f07d 100644 (file)
@@ -48,15 +48,18 @@ static void soc_ac97_device_release(struct device *dev)
 }
 
 /**
- * snd_soc_new_ac97_codec - initailise AC97 device
- * @codec: audio codec
+ * snd_soc_alloc_ac97_codec() - Allocate new a AC'97 device
+ * @codec: The CODEC for which to create the AC'97 device
  *
- * Initialises AC97 codec resources for use by ad-hoc devices only.
+ * Allocated a new snd_ac97 device and intializes it, but does not yet register
+ * it. The caller is responsible to either call device_add(&ac97->dev) to
+ * register the device, or to call put_device(&ac97->dev) to free the device.
+ *
+ * Returns: A snd_ac97 device or a PTR_ERR in case of an error.
  */
-struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+struct snd_ac97 *snd_soc_alloc_ac97_codec(struct snd_soc_codec *codec)
 {
        struct snd_ac97 *ac97;
-       int ret;
 
        ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
        if (ac97 == NULL)
@@ -73,7 +76,28 @@ struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
                     codec->component.card->snd_card->number, 0,
                     codec->component.name);
 
-       ret = device_register(&ac97->dev);
+       device_initialize(&ac97->dev);
+
+       return ac97;
+}
+EXPORT_SYMBOL(snd_soc_alloc_ac97_codec);
+
+/**
+ * snd_soc_new_ac97_codec - initailise AC97 device
+ * @codec: audio codec
+ *
+ * Initialises AC97 codec resources for use by ad-hoc devices only.
+ */
+struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
+{
+       struct snd_ac97 *ac97;
+       int ret;
+
+       ac97 = snd_soc_alloc_ac97_codec(codec);
+       if (IS_ERR(ac97))
+               return ac97;
+
+       ret = device_add(&ac97->dev);
        if (ret) {
                put_device(&ac97->dev);
                return ERR_PTR(ret);
index 590a82f01d0bdc2cc2fa39b89c006a2ec8b2b589..025c38fbe3c03fea08db0b365e472902c5f110fc 100644 (file)
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
                        rtd->dai_link->stream_name);
 
                ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
-                               1, 0, &be_pcm);
+                               rtd->dai_link->dpcm_playback,
+                               rtd->dai_link->dpcm_capture, &be_pcm);
                if (ret < 0) {
                        dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
                                rtd->dai_link->name);
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 
                rtd->pcm = be_pcm;
                rtd->fe_compr = 1;
-               be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
-               be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+               if (rtd->dai_link->dpcm_playback)
+                       be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+               else if (rtd->dai_link->dpcm_capture)
+                       be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
                memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
        } else
                memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
index 86ea2d7b88451c219dacad848e60564fb1a64ecf..d2b18e88707151551e3f23b4d8f66df6a526b895 100644 (file)
@@ -1,3 +1,4 @@
+#define _GNU_SOURCE
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -98,3 +99,45 @@ char *debugfs_mount(const char *mountpoint)
 out:
        return debugfs_mountpoint;
 }
+
+int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename)
+{
+       char sbuf[128];
+
+       switch (err) {
+       case ENOENT:
+               if (debugfs_found) {
+                       snprintf(buf, size,
+                                "Error:\tFile %s/%s not found.\n"
+                                "Hint:\tPerhaps this kernel misses some CONFIG_ setting to enable this feature?.\n",
+                                debugfs_mountpoint, filename);
+                       break;
+               }
+               snprintf(buf, size, "%s",
+                        "Error:\tUnable to find debugfs\n"
+                        "Hint:\tWas your kernel compiled with debugfs support?\n"
+                        "Hint:\tIs the debugfs filesystem mounted?\n"
+                        "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
+               break;
+       case EACCES:
+               snprintf(buf, size,
+                        "Error:\tNo permissions to read %s/%s\n"
+                        "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
+                        debugfs_mountpoint, filename, debugfs_mountpoint);
+               break;
+       default:
+               snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+               break;
+       }
+
+       return 0;
+}
+
+int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name)
+{
+       char path[PATH_MAX];
+
+       snprintf(path, PATH_MAX, "tracing/events/%s/%s", sys, name ?: "*");
+
+       return debugfs__strerror_open(err, buf, size, path);
+}
index f19d3df9609dd2c94e96a949b9a232183be98532..0739881a98977cfcf70f84098b1e876b4a592f67 100644 (file)
@@ -26,4 +26,7 @@ char *debugfs_mount(const char *mountpoint);
 
 extern char debugfs_mountpoint[];
 
+int debugfs__strerror_open(int err, char *buf, size_t size, const char *filename);
+int debugfs__strerror_open_tp(int err, char *buf, size_t size, const char *sys, const char *name);
+
 #endif /* __API_DEBUGFS_H__ */
diff --git a/tools/lib/lockdep/.gitignore b/tools/lib/lockdep/.gitignore
new file mode 100644 (file)
index 0000000..cc0e7a9
--- /dev/null
@@ -0,0 +1 @@
+liblockdep.so.*
index 52f9279c6c13097344dd5e273ab10631c49f00ec..4b866c54f624bd2adca1b21b241b2937a89494e5 100644 (file)
@@ -104,7 +104,7 @@ N           =
 
 export Q VERBOSE
 
-INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
+INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
 
 # Set compile option CFLAGS if not set elsewhere
 CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
index cf3a44bf1ec3f5d65b5bf3fe128ee178834714ef..afe20ed9fac83e81d1887b86ad5cf01dd1726bad 100644 (file)
@@ -32,6 +32,7 @@
 #include <stdint.h>
 #include <limits.h>
 
+#include <netinet/ip6.h>
 #include "event-parse.h"
 #include "event-utils.h"
 
@@ -4149,6 +4150,324 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
        trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
 }
 
+static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
+{
+       const char *fmt;
+
+       if (i == 'i')
+               fmt = "%03d.%03d.%03d.%03d";
+       else
+               fmt = "%d.%d.%d.%d";
+
+       trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
+}
+
+static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
+{
+       return ((unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
+               (unsigned long)(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
+}
+
+static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
+{
+       return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
+}
+
+static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr)
+{
+       int i, j, range;
+       unsigned char zerolength[8];
+       int longest = 1;
+       int colonpos = -1;
+       uint16_t word;
+       uint8_t hi, lo;
+       bool needcolon = false;
+       bool useIPv4;
+       struct in6_addr in6;
+
+       memcpy(&in6, addr, sizeof(struct in6_addr));
+
+       useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
+
+       memset(zerolength, 0, sizeof(zerolength));
+
+       if (useIPv4)
+               range = 6;
+       else
+               range = 8;
+
+       /* find position of longest 0 run */
+       for (i = 0; i < range; i++) {
+               for (j = i; j < range; j++) {
+                       if (in6.s6_addr16[j] != 0)
+                               break;
+                       zerolength[i]++;
+               }
+       }
+       for (i = 0; i < range; i++) {
+               if (zerolength[i] > longest) {
+                       longest = zerolength[i];
+                       colonpos = i;
+               }
+       }
+       if (longest == 1)               /* don't compress a single 0 */
+               colonpos = -1;
+
+       /* emit address */
+       for (i = 0; i < range; i++) {
+               if (i == colonpos) {
+                       if (needcolon || i == 0)
+                               trace_seq_printf(s, ":");
+                       trace_seq_printf(s, ":");
+                       needcolon = false;
+                       i += longest - 1;
+                       continue;
+               }
+               if (needcolon) {
+                       trace_seq_printf(s, ":");
+                       needcolon = false;
+               }
+               /* hex u16 without leading 0s */
+               word = ntohs(in6.s6_addr16[i]);
+               hi = word >> 8;
+               lo = word & 0xff;
+               if (hi)
+                       trace_seq_printf(s, "%x%02x", hi, lo);
+               else
+                       trace_seq_printf(s, "%x", lo);
+
+               needcolon = true;
+       }
+
+       if (useIPv4) {
+               if (needcolon)
+                       trace_seq_printf(s, ":");
+               print_ip4_addr(s, 'I', &in6.s6_addr[12]);
+       }
+
+       return;
+}
+
+static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
+{
+       int j;
+
+       for (j = 0; j < 16; j += 2) {
+               trace_seq_printf(s, "%02x%02x", buf[j], buf[j+1]);
+               if (i == 'I' && j < 14)
+                       trace_seq_printf(s, ":");
+       }
+}
+
+/*
+ * %pi4   print an IPv4 address with leading zeros
+ * %pI4   print an IPv4 address without leading zeros
+ * %pi6   print an IPv6 address without colons
+ * %pI6   print an IPv6 address with colons
+ * %pI6c  print an IPv6 address in compressed form with colons
+ * %pISpc print an IP address based on sockaddr; p adds port.
+ */
+static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
+                         void *data, int size, struct event_format *event,
+                         struct print_arg *arg)
+{
+       unsigned char *buf;
+
+       if (arg->type == PRINT_FUNC) {
+               process_defined_func(s, data, size, event, arg);
+               return 0;
+       }
+
+       if (arg->type != PRINT_FIELD) {
+               trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+               return 0;
+       }
+
+       if (!arg->field.field) {
+               arg->field.field =
+                       pevent_find_any_field(event, arg->field.name);
+               if (!arg->field.field) {
+                       do_warning("%s: field %s not found",
+                                  __func__, arg->field.name);
+                       return 0;
+               }
+       }
+
+       buf = data + arg->field.field->offset;
+
+       if (arg->field.field->size != 4) {
+               trace_seq_printf(s, "INVALIDIPv4");
+               return 0;
+       }
+       print_ip4_addr(s, i, buf);
+
+       return 0;
+}
+
+static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
+                         void *data, int size, struct event_format *event,
+                         struct print_arg *arg)
+{
+       char have_c = 0;
+       unsigned char *buf;
+       int rc = 0;
+
+       /* pI6c */
+       if (i == 'I' && *ptr == 'c') {
+               have_c = 1;
+               ptr++;
+               rc++;
+       }
+
+       if (arg->type == PRINT_FUNC) {
+               process_defined_func(s, data, size, event, arg);
+               return rc;
+       }
+
+       if (arg->type != PRINT_FIELD) {
+               trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+               return rc;
+       }
+
+       if (!arg->field.field) {
+               arg->field.field =
+                       pevent_find_any_field(event, arg->field.name);
+               if (!arg->field.field) {
+                       do_warning("%s: field %s not found",
+                                  __func__, arg->field.name);
+                       return rc;
+               }
+       }
+
+       buf = data + arg->field.field->offset;
+
+       if (arg->field.field->size != 16) {
+               trace_seq_printf(s, "INVALIDIPv6");
+               return rc;
+       }
+
+       if (have_c)
+               print_ip6c_addr(s, buf);
+       else
+               print_ip6_addr(s, i, buf);
+
+       return rc;
+}
+
+static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
+                         void *data, int size, struct event_format *event,
+                         struct print_arg *arg)
+{
+       char have_c = 0, have_p = 0;
+       unsigned char *buf;
+       struct sockaddr_storage *sa;
+       int rc = 0;
+
+       /* pISpc */
+       if (i == 'I') {
+               if (*ptr == 'p') {
+                       have_p = 1;
+                       ptr++;
+                       rc++;
+               }
+               if (*ptr == 'c') {
+                       have_c = 1;
+                       ptr++;
+                       rc++;
+               }
+       }
+
+       if (arg->type == PRINT_FUNC) {
+               process_defined_func(s, data, size, event, arg);
+               return rc;
+       }
+
+       if (arg->type != PRINT_FIELD) {
+               trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
+               return rc;
+       }
+
+       if (!arg->field.field) {
+               arg->field.field =
+                       pevent_find_any_field(event, arg->field.name);
+               if (!arg->field.field) {
+                       do_warning("%s: field %s not found",
+                                  __func__, arg->field.name);
+                       return rc;
+               }
+       }
+
+       sa = (struct sockaddr_storage *) (data + arg->field.field->offset);
+
+       if (sa->ss_family == AF_INET) {
+               struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
+
+               if (arg->field.field->size < sizeof(struct sockaddr_in)) {
+                       trace_seq_printf(s, "INVALIDIPv4");
+                       return rc;
+               }
+
+               print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr);
+               if (have_p)
+                       trace_seq_printf(s, ":%d", ntohs(sa4->sin_port));
+
+
+       } else if (sa->ss_family == AF_INET6) {
+               struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
+
+               if (arg->field.field->size < sizeof(struct sockaddr_in6)) {
+                       trace_seq_printf(s, "INVALIDIPv6");
+                       return rc;
+               }
+
+               if (have_p)
+                       trace_seq_printf(s, "[");
+
+               buf = (unsigned char *) &sa6->sin6_addr;
+               if (have_c)
+                       print_ip6c_addr(s, buf);
+               else
+                       print_ip6_addr(s, i, buf);
+
+               if (have_p)
+                       trace_seq_printf(s, "]:%d", ntohs(sa6->sin6_port));
+       }
+
+       return rc;
+}
+
+static int print_ip_arg(struct trace_seq *s, const char *ptr,
+                       void *data, int size, struct event_format *event,
+                       struct print_arg *arg)
+{
+       char i = *ptr;  /* 'i' or 'I' */
+       char ver;
+       int rc = 0;
+
+       ptr++;
+       rc++;
+
+       ver = *ptr;
+       ptr++;
+       rc++;
+
+       switch (ver) {
+       case '4':
+               rc += print_ipv4_arg(s, ptr, i, data, size, event, arg);
+               break;
+       case '6':
+               rc += print_ipv6_arg(s, ptr, i, data, size, event, arg);
+               break;
+       case 'S':
+               rc += print_ipsa_arg(s, ptr, i, data, size, event, arg);
+               break;
+       default:
+               return 0;
+       }
+
+       return rc;
+}
+
 static int is_printable_array(char *p, unsigned int len)
 {
        unsigned int i;
@@ -4337,6 +4656,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
                                        ptr++;
                                        arg = arg->next;
                                        break;
+                               } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
+                                       int n;
+
+                                       n = print_ip_arg(s, ptr+1, data, size, event, arg);
+                                       if (n > 0) {
+                                               ptr += n;
+                                               arg = arg->next;
+                                               break;
+                                       }
                                }
 
                                /* fall through */
index fd77d81ea748663284bc55db4db8fa5c20fc2a28..0294c57b1f5ed631534672e6888d280e5c7bc5f3 100644 (file)
@@ -38,7 +38,7 @@ OPTIONS
 --remove=::
         Remove specified file from the cache.
 -M::
---missing=:: 
+--missing=::
        List missing build ids in the cache for the specified file.
 -u::
 --update::
index cbb4f743d9211db4e01ed636aec2fe711978614d..3e2aec94f806b5425eca4f961a01e1be077b8901 100644 (file)
@@ -89,6 +89,19 @@ raw encoding of 0x1A8 can be used:
 You should refer to the processor specific documentation for getting these
 details. Some of them are referenced in the SEE ALSO section below.
 
+PARAMETERIZED EVENTS
+--------------------
+
+Some pmu events listed by 'perf-list' will be displayed with '?' in them. For
+example:
+
+  hv_gpci/dtbp_ptitc,phys_processor_idx=?/
+
+This means that when provided as an event, a value for '?' must
+also be supplied. For example:
+
+  perf stat -C 0 -e 'hv_gpci/dtbp_ptitc,phys_processor_idx=0x2/' ...
+
 OPTIONS
 -------
 
index 1d78a4064da48218b90b15f8495c3626cf614b2c..43310d8661fedfbee4e24f8b803acdd91fe49d56 100644 (file)
@@ -12,11 +12,12 @@ SYNOPSIS
 
 DESCRIPTION
 -----------
-"perf mem -t <TYPE> record" runs a command and gathers memory operation data
+"perf mem record" runs a command and gathers memory operation data
 from it, into perf.data. Perf record options are accepted and are passed through.
 
-"perf mem -t <TYPE> report" displays the result. It invokes perf report with the
-right set of options to display a memory access profile.
+"perf mem report" displays the result. It invokes perf report with the
+right set of options to display a memory access profile. By default, loads
+and stores are sampled. Use the -t option to limit to loads or stores.
 
 Note that on Intel systems the memory latency reported is the use-latency,
 not the pure load (or store latency). Use latency includes any pipeline
@@ -29,7 +30,7 @@ OPTIONS
 
 -t::
 --type=::
-       Select the memory operation type: load or store (default: load)
+       Select the memory operation type: load or store (default: load,store)
 
 -D::
 --dump-raw-samples=::
index af9a54ece0245fe9c340a0172438e18883b536d3..31e977459c519d933473d7a46d4bc1b14263ef7e 100644 (file)
@@ -33,12 +33,27 @@ OPTIONS
         - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
          hexadecimal event descriptor.
 
-        - a hardware breakpoint event in the form of '\mem:addr[:access]'
+       - a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
+         'param1', 'param2', etc are defined as formats for the PMU in
+         /sys/bus/event_sources/devices/<pmu>/format/*.
+
+       - a symbolically formed event like 'pmu/config=M,config1=N,config3=K/'
+
+          where M, N, K are numbers (in decimal, hex, octal format). Acceptable
+          values for each of 'config', 'config1' and 'config2' are defined by
+          corresponding entries in /sys/bus/event_sources/devices/<pmu>/format/*
+          param1 and param2 are defined as formats for the PMU in:
+          /sys/bus/event_sources/devices/<pmu>/format/*
+
+        - a hardware breakpoint event in the form of '\mem:addr[/len][:access]'
           where addr is the address in memory you want to break in.
           Access is the memory access type (read, write, execute) it can
-          be passed as follows: '\mem:addr[:[r][w][x]]'.
+          be passed as follows: '\mem:addr[:[r][w][x]]'. len is the range,
+          number of bytes from specified addr, which the breakpoint will cover.
           If you want to profile read-write accesses in 0x1000, just set
           'mem:0x1000:rw'.
+          If you want to profile write accesses in [0x1000~1008), just set
+          'mem:0x1000/8:w'.
 
 --filter=<filter>::
         Event filter.
index 21494806c0abf419bfe95eb8be1a0bb5d83330e4..a21eec05bc42f8c2e0702e62d9eae965c9855221 100644 (file)
@@ -125,46 +125,46 @@ OPTIONS
        is equivalent to:
 
                perf script -f trace:<fields> -f sw:<fields> -f hw:<fields>
-    
+
        i.e., the specified fields apply to all event types if the type string
        is not given.
-    
+
        The arguments are processed in the order received. A later usage can
        reset a prior request. e.g.:
-    
+
                -f trace: -f comm,tid,time,ip,sym
-    
+
        The first -f suppresses trace events (field list is ""), but then the
        second invocation sets the fields to comm,tid,time,ip,sym. In this case a
        warning is given to the user:
-    
+
                "Overriding previous field request for all events."
-    
+
        Alternatively, consider the order:
-    
+
                -f comm,tid,time,ip,sym -f trace:
-    
+
        The first -f sets the fields for all events and the second -f
        suppresses trace events. The user is given a warning message about
        the override, and the result of the above is that only S/W and H/W
        events are displayed with the given fields.
-    
+
        For the 'wildcard' option if a user selected field is invalid for an
        event type, a message is displayed to the user that the option is
        ignored for that type. For example:
-    
+
                $ perf script -f comm,tid,trace
                'trace' not valid for hardware events. Ignoring.
                'trace' not valid for software events. Ignoring.
-    
+
        Alternatively, if the type is given an invalid field is specified it
        is an error. For example:
-    
+
         perf script -v -f sw:comm,tid,trace
         'trace' not valid for software events.
-    
+
        At this point usage is displayed, and perf-script exits.
-    
+
        Finally, a user may not set fields to none for all event types.
        i.e., -f "" is not allowed.
 
index 29ee857c09c6a88e2aaeeda589a953840783e3b8..04e150d83e7da6fefaa0af14151b26458f6a01a9 100644 (file)
@@ -25,10 +25,22 @@ OPTIONS
 
 -e::
 --event=::
-       Select the PMU event. Selection can be a symbolic event name
-       (use 'perf list' to list all events) or a raw PMU
-       event (eventsel+umask) in the form of rNNN where NNN is a
-        hexadecimal event descriptor.
+       Select the PMU event. Selection can be:
+
+       - a symbolic event name (use 'perf list' to list all events)
+
+       - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
+         hexadecimal event descriptor.
+
+       - a symbolically formed event like 'pmu/param1=0x3,param2/' where
+         param1 and param2 are defined as formats for the PMU in
+         /sys/bus/event_sources/devices/<pmu>/format/*
+
+       - a symbolically formed event like 'pmu/config=M,config1=N,config2=K/'
+         where M, N, K are numbers (in decimal, hex, octal format).
+         Acceptable values for each of 'config', 'config1' and 'config2'
+         parameters are defined by corresponding entries in
+         /sys/bus/event_sources/devices/<pmu>/format/*
 
 -i::
 --no-inherit::
index 71f2844cf97f89a0c9ffef38c94d61b756203895..7ed22ff1e1acd7cc86a97376076b135dc20642b7 100644 (file)
@@ -68,4 +68,17 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
                 val, opflags);
 }
 
+#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
+#include <pthread.h>
+static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr,
+                                             size_t cpusetsize,
+                                             cpu_set_t *cpuset)
+{
+       attr = attr;
+       cpusetsize = cpusetsize;
+       cpuset = cpuset;
+       return 0;
+}
+#endif
+
 #endif /* _FUTEX_H */
index 77d5cae54c6ac3dbed34267fd542303b4253079c..50e6b66aea1ff9c68a1dd02074e98c0b83d481d8 100644 (file)
@@ -236,10 +236,10 @@ static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
                if (errno == ENOENT)
                        return false;
 
-               pr_warning("Problems with %s file, consider removing it from the cache\n", 
+               pr_warning("Problems with %s file, consider removing it from the cache\n",
                           filename);
        } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
-               pr_warning("Problems with %s file, consider removing it from the cache\n", 
+               pr_warning("Problems with %s file, consider removing it from the cache\n",
                           filename);
        }
 
index 1fd96c13f1998a4048cbc5ab3eef1df35a5f80b8..74aada554b128ff1f8927d6e340bc0686cc5be2c 100644 (file)
@@ -390,6 +390,15 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
        }
 }
 
+static struct data__file *fmt_to_data_file(struct perf_hpp_fmt *fmt)
+{
+       struct diff_hpp_fmt *dfmt = container_of(fmt, struct diff_hpp_fmt, fmt);
+       void *ptr = dfmt - dfmt->idx;
+       struct data__file *d = container_of(ptr, struct data__file, fmt);
+
+       return d;
+}
+
 static struct hist_entry*
 get_pair_data(struct hist_entry *he, struct data__file *d)
 {
@@ -407,8 +416,7 @@ get_pair_data(struct hist_entry *he, struct data__file *d)
 static struct hist_entry*
 get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
 {
-       void *ptr = dfmt - dfmt->idx;
-       struct data__file *d = container_of(ptr, struct data__file, fmt);
+       struct data__file *d = fmt_to_data_file(&dfmt->fmt);
 
        return get_pair_data(he, d);
 }
@@ -430,7 +438,7 @@ static void hists__baseline_only(struct hists *hists)
                next = rb_next(&he->rb_node_in);
                if (!hist_entry__next_pair(he)) {
                        rb_erase(&he->rb_node_in, root);
-                       hist_entry__free(he);
+                       hist_entry__delete(he);
                }
        }
 }
@@ -448,26 +456,30 @@ static void hists__precompute(struct hists *hists)
        next = rb_first(root);
        while (next != NULL) {
                struct hist_entry *he, *pair;
+               struct data__file *d;
+               int i;
 
                he   = rb_entry(next, struct hist_entry, rb_node_in);
                next = rb_next(&he->rb_node_in);
 
-               pair = get_pair_data(he, &data__files[sort_compute]);
-               if (!pair)
-                       continue;
+               data__for_each_file_new(i, d) {
+                       pair = get_pair_data(he, d);
+                       if (!pair)
+                               continue;
 
-               switch (compute) {
-               case COMPUTE_DELTA:
-                       compute_delta(he, pair);
-                       break;
-               case COMPUTE_RATIO:
-                       compute_ratio(he, pair);
-                       break;
-               case COMPUTE_WEIGHTED_DIFF:
-                       compute_wdiff(he, pair);
-                       break;
-               default:
-                       BUG_ON(1);
+                       switch (compute) {
+                       case COMPUTE_DELTA:
+                               compute_delta(he, pair);
+                               break;
+                       case COMPUTE_RATIO:
+                               compute_ratio(he, pair);
+                               break;
+                       case COMPUTE_WEIGHTED_DIFF:
+                               compute_wdiff(he, pair);
+                               break;
+                       default:
+                               BUG_ON(1);
+                       }
                }
        }
 }
@@ -517,7 +529,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 
 static int64_t
 hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
-                       int c)
+                       int c, int sort_idx)
 {
        bool pairs_left  = hist_entry__has_pairs(left);
        bool pairs_right = hist_entry__has_pairs(right);
@@ -529,8 +541,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
        if (!pairs_left || !pairs_right)
                return pairs_left ? -1 : 1;
 
-       p_left  = get_pair_data(left,  &data__files[sort_compute]);
-       p_right = get_pair_data(right, &data__files[sort_compute]);
+       p_left  = get_pair_data(left,  &data__files[sort_idx]);
+       p_right = get_pair_data(right, &data__files[sort_idx]);
 
        if (!p_left && !p_right)
                return 0;
@@ -546,90 +558,102 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 }
 
 static int64_t
-hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
+                           int c, int sort_idx)
+{
+       struct hist_entry *p_right, *p_left;
+
+       p_left  = get_pair_data(left,  &data__files[sort_idx]);
+       p_right = get_pair_data(right, &data__files[sort_idx]);
+
+       if (!p_left && !p_right)
+               return 0;
+
+       if (!p_left || !p_right)
+               return p_left ? -1 : 1;
+
+       if (c != COMPUTE_DELTA) {
+               /*
+                * The delta can be computed without the baseline, but
+                * others are not.  Put those entries which have no
+                * values below.
+                */
+               if (left->dummy && right->dummy)
+                       return 0;
+
+               if (left->dummy || right->dummy)
+                       return left->dummy ? 1 : -1;
+       }
+
+       return __hist_entry__cmp_compute(p_left, p_right, c);
+}
+
+static int64_t
+hist_entry__cmp_nop(struct perf_hpp_fmt *fmt __maybe_unused,
+                   struct hist_entry *left __maybe_unused,
                    struct hist_entry *right __maybe_unused)
 {
        return 0;
 }
 
 static int64_t
-hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_baseline(struct perf_hpp_fmt *fmt __maybe_unused,
+                        struct hist_entry *left, struct hist_entry *right)
 {
-       if (sort_compute)
-               return 0;
-
        if (left->stat.period == right->stat.period)
                return 0;
        return left->stat.period > right->stat.period ? 1 : -1;
 }
 
 static int64_t
-hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_delta(struct perf_hpp_fmt *fmt,
+                     struct hist_entry *left, struct hist_entry *right)
 {
-       return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+       struct data__file *d = fmt_to_data_file(fmt);
+
+       return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, d->idx);
 }
 
 static int64_t
-hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_ratio(struct perf_hpp_fmt *fmt,
+                     struct hist_entry *left, struct hist_entry *right)
 {
-       return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+       struct data__file *d = fmt_to_data_file(fmt);
+
+       return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, d->idx);
 }
 
 static int64_t
-hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+hist_entry__cmp_wdiff(struct perf_hpp_fmt *fmt,
+                     struct hist_entry *left, struct hist_entry *right)
 {
-       return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+       struct data__file *d = fmt_to_data_file(fmt);
+
+       return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, d->idx);
 }
 
-static void insert_hist_entry_by_compute(struct rb_root *root,
-                                        struct hist_entry *he,
-                                        int c)
+static int64_t
+hist_entry__cmp_delta_idx(struct perf_hpp_fmt *fmt __maybe_unused,
+                         struct hist_entry *left, struct hist_entry *right)
 {
-       struct rb_node **p = &root->rb_node;
-       struct rb_node *parent = NULL;
-       struct hist_entry *iter;
-
-       while (*p != NULL) {
-               parent = *p;
-               iter = rb_entry(parent, struct hist_entry, rb_node);
-               if (hist_entry__cmp_compute(he, iter, c) < 0)
-                       p = &(*p)->rb_left;
-               else
-                       p = &(*p)->rb_right;
-       }
-
-       rb_link_node(&he->rb_node, parent, p);
-       rb_insert_color(&he->rb_node, root);
+       return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
+                                          sort_compute);
 }
 
-static void hists__compute_resort(struct hists *hists)
+static int64_t
+hist_entry__cmp_ratio_idx(struct perf_hpp_fmt *fmt __maybe_unused,
+                         struct hist_entry *left, struct hist_entry *right)
 {
-       struct rb_root *root;
-       struct rb_node *next;
-
-       if (sort__need_collapse)
-               root = &hists->entries_collapsed;
-       else
-               root = hists->entries_in;
-
-       hists->entries = RB_ROOT;
-       next = rb_first(root);
-
-       hists__reset_stats(hists);
-       hists__reset_col_len(hists);
-
-       while (next != NULL) {
-               struct hist_entry *he;
-
-               he = rb_entry(next, struct hist_entry, rb_node_in);
-               next = rb_next(&he->rb_node_in);
-
-               insert_hist_entry_by_compute(&hists->entries, he, compute);
-               hists__inc_stats(hists, he);
+       return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
+                                          sort_compute);
+}
 
-               if (!he->filtered)
-                       hists__calc_col_len(hists, he);
-       }
+static int64_t
+hist_entry__cmp_wdiff_idx(struct perf_hpp_fmt *fmt __maybe_unused,
+                         struct hist_entry *left, struct hist_entry *right)
+{
+       return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
+                                          sort_compute);
 }
 
 static void hists__process(struct hists *hists)
@@ -637,12 +661,8 @@ static void hists__process(struct hists *hists)
        if (show_baseline_only)
                hists__baseline_only(hists);
 
-       if (sort_compute) {
-               hists__precompute(hists);
-               hists__compute_resort(hists);
-       } else {
-               hists__output_resort(hists, NULL);
-       }
+       hists__precompute(hists);
+       hists__output_resort(hists, NULL);
 
        hists__fprintf(hists, true, 0, 0, 0, stdout);
 }
@@ -841,7 +861,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
        char pfmt[20] = " ";
 
        if (!pair)
-               goto dummy_print;
+               goto no_print;
 
        switch (comparison_method) {
        case COMPUTE_DELTA:
@@ -850,8 +870,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
                else
                        diff = compute_delta(he, pair);
 
-               if (fabs(diff) < 0.01)
-                       goto dummy_print;
                scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
                return percent_color_snprintf(hpp->buf, hpp->size,
                                        pfmt, diff);
@@ -882,6 +900,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
                BUG_ON(1);
        }
 dummy_print:
+       return scnprintf(hpp->buf, hpp->size, "%*s",
+                       dfmt->header_width, "N/A");
+no_print:
        return scnprintf(hpp->buf, hpp->size, "%*s",
                        dfmt->header_width, pfmt);
 }
@@ -932,14 +953,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
                else
                        diff = compute_delta(he, pair);
 
-               if (fabs(diff) >= 0.01)
-                       scnprintf(buf, size, "%+4.2F%%", diff);
+               scnprintf(buf, size, "%+4.2F%%", diff);
                break;
 
        case PERF_HPP_DIFF__RATIO:
                /* No point for ratio number if we are dummy.. */
-               if (he->dummy)
+               if (he->dummy) {
+                       scnprintf(buf, size, "N/A");
                        break;
+               }
 
                if (pair->diff.computed)
                        ratio = pair->diff.period_ratio;
@@ -952,8 +974,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
 
        case PERF_HPP_DIFF__WEIGHTED_DIFF:
                /* No point for wdiff number if we are dummy.. */
-               if (he->dummy)
+               if (he->dummy) {
+                       scnprintf(buf, size, "N/A");
                        break;
+               }
 
                if (pair->diff.computed)
                        wdiff = pair->diff.wdiff;
@@ -1105,9 +1129,10 @@ static void data__hpp_register(struct data__file *d, int idx)
        perf_hpp__register_sort_field(fmt);
 }
 
-static void ui_init(void)
+static int ui_init(void)
 {
        struct data__file *d;
+       struct perf_hpp_fmt *fmt;
        int i;
 
        data__for_each_file(i, d) {
@@ -1137,6 +1162,46 @@ static void ui_init(void)
                        data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
                                                  PERF_HPP_DIFF__PERIOD_BASELINE);
        }
+
+       if (!sort_compute)
+               return 0;
+
+       /*
+        * Prepend an fmt to sort on columns at 'sort_compute' first.
+        * This fmt is added only to the sort list but not to the
+        * output fields list.
+        *
+        * Note that this column (data) can be compared twice - one
+        * for this 'sort_compute' fmt and another for the normal
+        * diff_hpp_fmt.  But it shouldn't a problem as most entries
+        * will be sorted out by first try or baseline and comparing
+        * is not a costly operation.
+        */
+       fmt = zalloc(sizeof(*fmt));
+       if (fmt == NULL) {
+               pr_err("Memory allocation failed\n");
+               return -1;
+       }
+
+       fmt->cmp      = hist_entry__cmp_nop;
+       fmt->collapse = hist_entry__cmp_nop;
+
+       switch (compute) {
+       case COMPUTE_DELTA:
+               fmt->sort = hist_entry__cmp_delta_idx;
+               break;
+       case COMPUTE_RATIO:
+               fmt->sort = hist_entry__cmp_ratio_idx;
+               break;
+       case COMPUTE_WEIGHTED_DIFF:
+               fmt->sort = hist_entry__cmp_wdiff_idx;
+               break;
+       default:
+               BUG_ON(1);
+       }
+
+       list_add(&fmt->sort_list, &perf_hpp__sort_list);
+       return 0;
 }
 
 static int data_init(int argc, const char **argv)
@@ -1202,7 +1267,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
        if (data_init(argc, argv) < 0)
                return -1;
 
-       ui_init();
+       if (ui_init() < 0)
+               return -1;
 
        sort__mode = SORT_MODE__DIFF;
 
index 84df2deed988ab4e0427b5d34aa7e435ce96a893..a13641e066f5608363c7eb1575f7e3414ae6863a 100644 (file)
@@ -343,6 +343,7 @@ static int __cmd_inject(struct perf_inject *inject)
        int ret = -EINVAL;
        struct perf_session *session = inject->session;
        struct perf_data_file *file_out = &inject->output;
+       int fd = perf_data_file__fd(file_out);
 
        signal(SIGINT, sig_handler);
 
@@ -376,7 +377,7 @@ static int __cmd_inject(struct perf_inject *inject)
        }
 
        if (!file_out->is_pipe)
-               lseek(file_out->fd, session->header.data_offset, SEEK_SET);
+               lseek(fd, session->header.data_offset, SEEK_SET);
 
        ret = perf_session__process_events(session, &inject->tool);
 
@@ -385,7 +386,7 @@ static int __cmd_inject(struct perf_inject *inject)
                        perf_header__set_feat(&session->header,
                                              HEADER_BUILD_ID);
                session->header.data_size = inject->bytes_written;
-               perf_session__write_header(session, session->evlist, file_out->fd, true);
+               perf_session__write_header(session, session->evlist, fd, true);
        }
 
        return ret;
index 24db6ffe2957450d17a6a6465441317b81f7f49e..9b5663950a4dd1b11573f9c10da92ac229bbb2f4 100644 (file)
@@ -7,44 +7,47 @@
 #include "util/session.h"
 #include "util/data.h"
 
-#define MEM_OPERATION_LOAD     "load"
-#define MEM_OPERATION_STORE    "store"
-
-static const char      *mem_operation          = MEM_OPERATION_LOAD;
+#define MEM_OPERATION_LOAD     0x1
+#define MEM_OPERATION_STORE    0x2
 
 struct perf_mem {
        struct perf_tool        tool;
        char const              *input_name;
        bool                    hide_unresolved;
        bool                    dump_raw;
+       int                     operation;
        const char              *cpu_list;
        DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 };
 
-static int __cmd_record(int argc, const char **argv)
+static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
 {
        int rec_argc, i = 0, j;
        const char **rec_argv;
-       char event[64];
        int ret;
 
-       rec_argc = argc + 4;
+       rec_argc = argc + 7; /* max number of arguments */
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
        if (!rec_argv)
                return -1;
 
-       rec_argv[i++] = strdup("record");
-       if (!strcmp(mem_operation, MEM_OPERATION_LOAD))
-               rec_argv[i++] = strdup("-W");
-       rec_argv[i++] = strdup("-d");
-       rec_argv[i++] = strdup("-e");
+       rec_argv[i++] = "record";
 
-       if (strcmp(mem_operation, MEM_OPERATION_LOAD))
-               sprintf(event, "cpu/mem-stores/pp");
-       else
-               sprintf(event, "cpu/mem-loads/pp");
+       if (mem->operation & MEM_OPERATION_LOAD)
+               rec_argv[i++] = "-W";
+
+       rec_argv[i++] = "-d";
+
+       if (mem->operation & MEM_OPERATION_LOAD) {
+               rec_argv[i++] = "-e";
+               rec_argv[i++] = "cpu/mem-loads/pp";
+       }
+
+       if (mem->operation & MEM_OPERATION_STORE) {
+               rec_argv[i++] = "-e";
+               rec_argv[i++] = "cpu/mem-stores/pp";
+       }
 
-       rec_argv[i++] = strdup(event);
        for (j = 1; j < argc; j++, i++)
                rec_argv[i] = argv[j];
 
@@ -162,17 +165,17 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
        if (!rep_argv)
                return -1;
 
-       rep_argv[i++] = strdup("report");
-       rep_argv[i++] = strdup("--mem-mode");
-       rep_argv[i++] = strdup("-n"); /* display number of samples */
+       rep_argv[i++] = "report";
+       rep_argv[i++] = "--mem-mode";
+       rep_argv[i++] = "-n"; /* display number of samples */
 
        /*
         * there is no weight (cost) associated with stores, so don't print
         * the column
         */
-       if (strcmp(mem_operation, MEM_OPERATION_LOAD))
-               rep_argv[i++] = strdup("--sort=mem,sym,dso,symbol_daddr,"
-                                      "dso_daddr,tlb,locked");
+       if (!(mem->operation & MEM_OPERATION_LOAD))
+               rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr,"
+                               "dso_daddr,tlb,locked";
 
        for (j = 1; j < argc; j++, i++)
                rep_argv[i] = argv[j];
@@ -182,6 +185,75 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
        return ret;
 }
 
+struct mem_mode {
+       const char *name;
+       int mode;
+};
+
+#define MEM_OPT(n, m) \
+       { .name = n, .mode = (m) }
+
+#define MEM_END { .name = NULL }
+
+static const struct mem_mode mem_modes[]={
+       MEM_OPT("load", MEM_OPERATION_LOAD),
+       MEM_OPT("store", MEM_OPERATION_STORE),
+       MEM_END
+};
+
+static int
+parse_mem_ops(const struct option *opt, const char *str, int unset)
+{
+       int *mode = (int *)opt->value;
+       const struct mem_mode *m;
+       char *s, *os = NULL, *p;
+       int ret = -1;
+
+       if (unset)
+               return 0;
+
+       /* str may be NULL in case no arg is passed to -t */
+       if (str) {
+               /* because str is read-only */
+               s = os = strdup(str);
+               if (!s)
+                       return -1;
+
+               /* reset mode */
+               *mode = 0;
+
+               for (;;) {
+                       p = strchr(s, ',');
+                       if (p)
+                               *p = '\0';
+
+                       for (m = mem_modes; m->name; m++) {
+                               if (!strcasecmp(s, m->name))
+                                       break;
+                       }
+                       if (!m->name) {
+                               fprintf(stderr, "unknown sampling op %s,"
+                                           " check man page\n", s);
+                               goto error;
+                       }
+
+                       *mode |= m->mode;
+
+                       if (!p)
+                               break;
+
+                       s = p + 1;
+               }
+       }
+       ret = 0;
+
+       if (*mode == 0)
+               *mode = MEM_OPERATION_LOAD;
+error:
+       free(os);
+       return ret;
+}
+
 int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        struct stat st;
@@ -197,10 +269,15 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
                        .ordered_events = true,
                },
                .input_name              = "perf.data",
+               /*
+                * default to both load an store sampling
+                */
+               .operation               = MEM_OPERATION_LOAD | MEM_OPERATION_STORE,
        };
        const struct option mem_options[] = {
-       OPT_STRING('t', "type", &mem_operation,
-                  "type", "memory operations(load/store)"),
+       OPT_CALLBACK('t', "type", &mem.operation,
+                  "type", "memory operations(load,store) Default load,store",
+                   parse_mem_ops),
        OPT_BOOLEAN('D', "dump-raw-samples", &mem.dump_raw,
                    "dump raw samples in ASCII"),
        OPT_BOOLEAN('U', "hide-unresolved", &mem.hide_unresolved,
@@ -225,7 +302,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
        argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
                                        mem_usage, PARSE_OPT_STOP_AT_NON_OPTION);
 
-       if (!argc || !(strncmp(argv[0], "rec", 3) || mem_operation))
+       if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation))
                usage_with_options(mem_usage, mem_options);
 
        if (!mem.input_name || !strlen(mem.input_name)) {
@@ -236,7 +313,7 @@ int cmd_mem(int argc, const char **argv, const char *prefix __maybe_unused)
        }
 
        if (!strncmp(argv[0], "rec", 3))
-               return __cmd_record(argc, argv);
+               return __cmd_record(argc, argv, &mem);
        else if (!strncmp(argv[0], "rep", 3))
                return report_events(argc, argv, &mem);
        else
index 8648c6d3003ddb3d64c54142ece0f56b1e30e813..404ab34340523f934abc76fcfc6053907708e4fc 100644 (file)
@@ -190,16 +190,30 @@ out:
        return rc;
 }
 
+static int process_sample_event(struct perf_tool *tool,
+                               union perf_event *event,
+                               struct perf_sample *sample,
+                               struct perf_evsel *evsel,
+                               struct machine *machine)
+{
+       struct record *rec = container_of(tool, struct record, tool);
+
+       rec->samples++;
+
+       return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
+}
+
 static int process_buildids(struct record *rec)
 {
        struct perf_data_file *file  = &rec->file;
        struct perf_session *session = rec->session;
-       u64 start = session->header.data_offset;
 
-       u64 size = lseek(file->fd, 0, SEEK_CUR);
+       u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
        if (size == 0)
                return 0;
 
+       file->size = size;
+
        /*
         * During this process, it'll load kernel map and replace the
         * dso->long_name to a real pathname it found.  In this case
@@ -211,9 +225,7 @@ static int process_buildids(struct record *rec)
         */
        symbol_conf.ignore_vmlinux_buildid = true;
 
-       return __perf_session__process_events(session, start,
-                                             size - start,
-                                             size, &build_id__mark_dso_hit_ops);
+       return perf_session__process_events(session, &rec->tool);
 }
 
 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
@@ -322,6 +334,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        struct perf_data_file *file = &rec->file;
        struct perf_session *session;
        bool disabled = false, draining = false;
+       int fd;
 
        rec->progname = argv[0];
 
@@ -336,6 +349,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                return -1;
        }
 
+       fd = perf_data_file__fd(file);
        rec->session = session;
 
        record__init_features(rec);
@@ -360,12 +374,11 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
 
        if (file->is_pipe) {
-               err = perf_header__write_pipe(file->fd);
+               err = perf_header__write_pipe(fd);
                if (err < 0)
                        goto out_child;
        } else {
-               err = perf_session__write_header(session, rec->evlist,
-                                                file->fd, false);
+               err = perf_session__write_header(session, rec->evlist, fd, false);
                if (err < 0)
                        goto out_child;
        }
@@ -397,7 +410,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                         * return this more properly and also
                         * propagate errors that now are calling die()
                         */
-                       err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
+                       err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
                                                                  process_synthesized_event);
                        if (err <= 0) {
                                pr_err("Couldn't record tracing data.\n");
@@ -504,19 +517,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                goto out_child;
        }
 
-       if (!quiet) {
+       if (!quiet)
                fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
 
-               /*
-                * Approximate RIP event size: 24 bytes.
-                */
-               fprintf(stderr,
-                       "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
-                       (double)rec->bytes_written / 1024.0 / 1024.0,
-                       file->path,
-                       rec->bytes_written / 24);
-       }
-
 out_child:
        if (forks) {
                int exit_status;
@@ -535,13 +538,29 @@ out_child:
        } else
                status = err;
 
+       /* this will be recalculated during process_buildids() */
+       rec->samples = 0;
+
        if (!err && !file->is_pipe) {
                rec->session->header.data_size += rec->bytes_written;
 
                if (!rec->no_buildid)
                        process_buildids(rec);
-               perf_session__write_header(rec->session, rec->evlist,
-                                          file->fd, true);
+               perf_session__write_header(rec->session, rec->evlist, fd, true);
+       }
+
+       if (!err && !quiet) {
+               char samples[128];
+
+               if (rec->samples)
+                       scnprintf(samples, sizeof(samples),
+                                 " (%" PRIu64 " samples)", rec->samples);
+               else
+                       samples[0] = '\0';
+
+               fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
+                       perf_data_file__size(file) / 1024.0 / 1024.0,
+                       file->path, samples);
        }
 
 out_delete_session:
@@ -720,6 +739,13 @@ static struct record record = {
                        .default_per_cpu = true,
                },
        },
+       .tool = {
+               .sample         = process_sample_event,
+               .fork           = perf_event__process_fork,
+               .comm           = perf_event__process_comm,
+               .mmap           = perf_event__process_mmap,
+               .mmap2          = perf_event__process_mmap2,
+       },
 };
 
 #define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
index 072ae8ad67fc1d258354b621a3ae7b2833deba0c..2f91094e228b6010527c84b66e1172dd616819be 100644 (file)
@@ -86,17 +86,6 @@ static int report__config(const char *var, const char *value, void *cb)
        return perf_default_config(var, value, cb);
 }
 
-static void report__inc_stats(struct report *rep, struct hist_entry *he)
-{
-       /*
-        * The @he is either of a newly created one or an existing one
-        * merging current sample.  We only want to count a new one so
-        * checking ->nr_events being 1.
-        */
-       if (he->stat.nr_events == 1)
-               rep->nr_entries++;
-}
-
 static int hist_iter__report_callback(struct hist_entry_iter *iter,
                                      struct addr_location *al, bool single,
                                      void *arg)
@@ -108,8 +97,6 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
        struct mem_info *mi;
        struct branch_info *bi;
 
-       report__inc_stats(rep, he);
-
        if (!ui__has_annotation())
                return 0;
 
@@ -499,6 +486,9 @@ static int __cmd_report(struct report *rep)
 
        report__warn_kptr_restrict(rep);
 
+       evlist__for_each(session->evlist, pos)
+               rep->nr_entries += evsel__hists(pos)->nr_entries;
+
        if (use_browser == 0) {
                if (verbose > 3)
                        perf_session__fprintf(session, stdout);
index 89108637638140d44365274b1cd017d9cbdd09a3..e598e4e98170fd30a708eed4c8a4629df2b43758 100644 (file)
@@ -1730,7 +1730,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
                    "detailed run - start a lot of events"),
        OPT_BOOLEAN('S', "sync", &sync_run,
                    "call sync() before starting a run"),
-       OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL, 
+       OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
                           "print large numbers with thousands\' separators",
                           stat__set_big_num),
        OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
index 616f0fcb47010abf68ac7e4a9e256fa559af4499..c4c7eac69de46405a6aec8c35aa6e9508c4d249a 100644 (file)
@@ -165,7 +165,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
                    err ? "[unknown]" : uts.release, perf_version_string);
        if (use_browser <= 0)
                sleep(5);
-       
+
        map->erange_warned = true;
 }
 
index badfabc6a01f6f1a90d1988abc5ac6178021bb08..7e935f1083ec64b8ea23b0d870a1241c759b724c 100644 (file)
@@ -929,66 +929,66 @@ static struct syscall_fmt {
          .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
        { .name     = "clock_gettime",  .errmsg = true, STRARRAY(0, clk_id, clockid), },
        { .name     = "close",      .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
        { .name     = "connect",    .errmsg = true, },
        { .name     = "dup",        .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "dup2",       .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "dup3",       .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "epoll_ctl",  .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
        { .name     = "eventfd2",   .errmsg = true,
          .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
        { .name     = "faccessat",  .errmsg = true,
          .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
        { .name     = "fadvise64",  .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fallocate",  .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fchdir",     .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fchmod",     .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fchmodat",   .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
        { .name     = "fchown",     .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fchownat",   .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
        { .name     = "fcntl",      .errmsg = true,
          .arg_scnprintf = { [0] = SCA_FD, /* fd */
                             [1] = SCA_STRARRAY, /* cmd */ },
          .arg_parm      = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
        { .name     = "fdatasync",  .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "flock",      .errmsg = true,
          .arg_scnprintf = { [0] = SCA_FD, /* fd */
                             [1] = SCA_FLOCK, /* cmd */ }, },
        { .name     = "fsetxattr",  .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fstat",      .errmsg = true, .alias = "newfstat",
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fstatat",    .errmsg = true, .alias = "newfstatat",
-         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
        { .name     = "fstatfs",    .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "fsync",    .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "ftruncate", .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "futex",      .errmsg = true,
          .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
        { .name     = "futimesat", .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
        { .name     = "getdents",   .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "getdents64", .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "getitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
        { .name     = "getrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
        { .name     = "ioctl",      .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */
 #if defined(__i386__) || defined(__x86_64__)
 /*
  * FIXME: Make this available to all arches.
@@ -1002,7 +1002,7 @@ static struct syscall_fmt {
        { .name     = "kill",       .errmsg = true,
          .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
        { .name     = "linkat",     .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
        { .name     = "lseek",      .errmsg = true,
          .arg_scnprintf = { [0] = SCA_FD, /* fd */
                             [2] = SCA_STRARRAY, /* whence */ },
@@ -1012,9 +1012,9 @@ static struct syscall_fmt {
          .arg_scnprintf = { [0] = SCA_HEX,      /* start */
                             [2] = SCA_MADV_BHV, /* behavior */ }, },
        { .name     = "mkdirat",    .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
        { .name     = "mknodat",    .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
        { .name     = "mlock",      .errmsg = true,
          .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
        { .name     = "mlockall",   .errmsg = true,
@@ -1036,9 +1036,9 @@ static struct syscall_fmt {
        { .name     = "munmap",     .errmsg = true,
          .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
        { .name     = "name_to_handle_at", .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
        { .name     = "newfstatat", .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
        { .name     = "open",       .errmsg = true,
          .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
        { .name     = "open_by_handle_at", .errmsg = true,
@@ -1052,20 +1052,20 @@ static struct syscall_fmt {
        { .name     = "poll",       .errmsg = true, .timeout = true, },
        { .name     = "ppoll",      .errmsg = true, .timeout = true, },
        { .name     = "pread",      .errmsg = true, .alias = "pread64",
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "preadv",     .errmsg = true, .alias = "pread",
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "prlimit64",  .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
        { .name     = "pwrite",     .errmsg = true, .alias = "pwrite64",
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "pwritev",    .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "read",       .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "readlinkat", .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
        { .name     = "readv",      .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "recvfrom",   .errmsg = true,
          .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
        { .name     = "recvmmsg",   .errmsg = true,
@@ -1073,7 +1073,7 @@ static struct syscall_fmt {
        { .name     = "recvmsg",    .errmsg = true,
          .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
        { .name     = "renameat",   .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
        { .name     = "rt_sigaction", .errmsg = true,
          .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
        { .name     = "rt_sigprocmask",  .errmsg = true, STRARRAY(0, how, sighow), },
@@ -1091,7 +1091,7 @@ static struct syscall_fmt {
        { .name     = "setitimer",  .errmsg = true, STRARRAY(0, which, itimers), },
        { .name     = "setrlimit",  .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
        { .name     = "shutdown",   .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "socket",     .errmsg = true,
          .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
                             [1] = SCA_SK_TYPE, /* type */ },
@@ -1102,7 +1102,7 @@ static struct syscall_fmt {
          .arg_parm      = { [0] = &strarray__socket_families, /* family */ }, },
        { .name     = "stat",       .errmsg = true, .alias = "newstat", },
        { .name     = "symlinkat",  .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
        { .name     = "tgkill",     .errmsg = true,
          .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
        { .name     = "tkill",      .errmsg = true,
@@ -1113,9 +1113,9 @@ static struct syscall_fmt {
        { .name     = "utimensat",  .errmsg = true,
          .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
        { .name     = "write",      .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
        { .name     = "writev",     .errmsg = true,
-         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 
+         .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
 };
 
 static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1191,7 +1191,7 @@ static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
 
        if (thread__priv(thread) == NULL)
                thread__set_priv(thread, thread_trace__new());
-               
+
        if (thread__priv(thread) == NULL)
                goto fail;
 
@@ -2056,23 +2056,24 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
        if (trace->trace_syscalls &&
            perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
                                           trace__sys_exit))
-               goto out_error_tp;
+               goto out_error_raw_syscalls;
 
        if (trace->trace_syscalls)
                perf_evlist__add_vfs_getname(evlist);
 
        if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
-           perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ))
-               goto out_error_tp;
+           perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ)) {
+               goto out_error_mem;
+       }
 
        if ((trace->trace_pgfaults & TRACE_PFMIN) &&
            perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
-               goto out_error_tp;
+               goto out_error_mem;
 
        if (trace->sched &&
-               perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
-                               trace__sched_stat_runtime))
-               goto out_error_tp;
+           perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
+                                  trace__sched_stat_runtime))
+               goto out_error_sched_stat_runtime;
 
        err = perf_evlist__create_maps(evlist, &trace->opts.target);
        if (err < 0) {
@@ -2202,8 +2203,12 @@ out:
 {
        char errbuf[BUFSIZ];
 
-out_error_tp:
-       perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf));
+out_error_sched_stat_runtime:
+       debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "sched", "sched_stat_runtime");
+       goto out_error;
+
+out_error_raw_syscalls:
+       debugfs__strerror_open_tp(errno, errbuf, sizeof(errbuf), "raw_syscalls", "sys_(enter|exit)");
        goto out_error;
 
 out_error_mmap:
@@ -2217,6 +2222,9 @@ out_error:
        fprintf(trace->output, "%s\n", errbuf);
        goto out_delete_evlist;
 }
+out_error_mem:
+       fprintf(trace->output, "Not enough memory to run!\n");
+       goto out_delete_evlist;
 }
 
 static int trace__replay(struct trace *trace)
index 648e31ff4021c2e11520ab8b00e6f89213d324a9..cc224080b52560d5b37ed2035b436215a13ea095 100644 (file)
@@ -198,6 +198,7 @@ CORE_FEATURE_TESTS =                        \
        libpython-version               \
        libslang                        \
        libunwind                       \
+       pthread-attr-setaffinity-np     \
        stackprotector-all              \
        timerfd                         \
        libdw-dwarf-unwind              \
@@ -226,6 +227,7 @@ VF_FEATURE_TESTS =                  \
        libelf-getphdrnum               \
        libelf-mmap                     \
        libpython-version               \
+       pthread-attr-setaffinity-np     \
        stackprotector-all              \
        timerfd                         \
        libunwind-debug-frame           \
@@ -301,6 +303,10 @@ ifeq ($(feature-sync-compare-and-swap), 1)
   CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
 endif
 
+ifeq ($(feature-pthread-attr-setaffinity-np), 1)
+  CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
+endif
+
 ifndef NO_BIONIC
   $(call feature_check,bionic)
   ifeq ($(feature-bionic), 1)
index 53f19b5dbc37b3f991eb3afe9a8b09be814f4297..42ac05aaf8ac1a8bf004c1e664aa10a1853111bc 100644 (file)
@@ -25,6 +25,7 @@ FILES=                                        \
        test-libslang.bin               \
        test-libunwind.bin              \
        test-libunwind-debug-frame.bin  \
+       test-pthread-attr-setaffinity-np.bin    \
        test-stackprotector-all.bin     \
        test-timerfd.bin                \
        test-libdw-dwarf-unwind.bin     \
@@ -47,6 +48,9 @@ test-all.bin:
 test-hello.bin:
        $(BUILD)
 
+test-pthread-attr-setaffinity-np.bin:
+       $(BUILD) -Werror -lpthread
+
 test-stackprotector-all.bin:
        $(BUILD) -Werror -fstack-protector-all
 
index 652e0098eba6ef7a050ebed5530c6ac757712024..6d4d093239222a07d51ca81f4ca77d2d76f87bb1 100644 (file)
 # include "test-zlib.c"
 #undef main
 
+#define main main_test_pthread_attr_setaffinity_np
+# include "test-pthread_attr_setaffinity_np.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -121,6 +125,7 @@ int main(int argc, char *argv[])
        main_test_libdw_dwarf_unwind();
        main_test_sync_compare_and_swap(argc, argv);
        main_test_zlib();
+       main_test_pthread_attr_setaffinity_np();
 
        return 0;
 }
diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c
new file mode 100644 (file)
index 0000000..0a0d3ec
--- /dev/null
@@ -0,0 +1,14 @@
+#include <stdint.h>
+#include <pthread.h>
+
+int main(void)
+{
+       int ret = 0;
+       pthread_attr_t thread_attr;
+
+       pthread_attr_init(&thread_attr);
+       /* don't care abt exact args, just the API itself in libpthread */
+       ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL);
+
+       return ret;
+}
index 790ceba6ad3f4a4102a1affa81a637ef774d7d43..28431d1bbcf5768de83f93165f8bf9599ad0f087 100644 (file)
@@ -5,7 +5,10 @@
  *     ANY CHANGES MADE HERE WILL BE LOST! 
  *
  */
-
+#include <stdbool.h>
+#ifndef HAS_BOOL
+# define HAS_BOOL 1
+#endif
 #line 1 "Context.xs"
 /*
  * Context.xs.  XS interfaces for perf script.
index c9b4b6269b514dc6e1e88608fbb9f177f2cc561e..1091bd47adfd7a99a6d50146d3c0ba7d3deadce7 100644 (file)
@@ -104,7 +104,6 @@ class Event(dict):
                 continue
             if not self.compare_data(self[t], other[t]):
                log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
-                
 
 # Test file description needs to have following sections:
 # [config]
index 8d110dec393ee1a42f78cb1b440ea9d19f825e1c..18619966454c572a0f3c0a1b2330818fc6e1ffe4 100644 (file)
@@ -140,7 +140,7 @@ static void del_hist_entries(struct hists *hists)
                he = rb_entry(node, struct hist_entry, rb_node);
                rb_erase(node, root_out);
                rb_erase(&he->rb_node_in, root_in);
-               hist_entry__free(he);
+               hist_entry__delete(he);
        }
 }
 
index f5547610da0200b70c0bdc1a006adaee925eba73..b52c9faea22450ed4092d67acdb1eb15ce15c6a8 100644 (file)
@@ -106,7 +106,7 @@ static void del_hist_entries(struct hists *hists)
                he = rb_entry(node, struct hist_entry, rb_node);
                rb_erase(node, root_out);
                rb_erase(&he->rb_node_in, root_in);
-               hist_entry__free(he);
+               hist_entry__delete(he);
        }
 }
 
index 69a71ff84e01813a4369bb94bc106c0878f4c320..75709d2b17b477b9320c0d9f69e1fc31e0e5579e 100644 (file)
@@ -222,7 +222,6 @@ tarpkg:
        @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
        echo "- $@: $$cmd" && echo $$cmd > $@ && \
        ( eval $$cmd ) >> $@ 2>&1
-       
 
 all: $(run) $(run_O) tarpkg
        @echo OK
index 7f2f51f93619c8bef0b491e2bfcc53ea28e6ae4d..1cdab0ce00e2e4b291750a4dbaeef0229a599733 100644 (file)
@@ -1145,6 +1145,49 @@ static int test__pinned_group(struct perf_evlist *evlist)
        return 0;
 }
 
+static int test__checkevent_breakpoint_len(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+       TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
+                                        evsel->attr.bp_type);
+       TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
+                                       evsel->attr.bp_len);
+
+       return 0;
+}
+
+static int test__checkevent_breakpoint_len_w(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+       TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+       TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
+       TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
+       TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
+                                        evsel->attr.bp_type);
+       TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
+                                       evsel->attr.bp_len);
+
+       return 0;
+}
+
+static int
+test__checkevent_breakpoint_len_rw_modifier(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+       TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+       TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+       TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+       TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+
+       return test__checkevent_breakpoint_rw(evlist);
+}
+
 static int count_tracepoints(void)
 {
        char events_path[PATH_MAX];
@@ -1420,6 +1463,21 @@ static struct evlist_test test__events[] = {
                .check = test__pinned_group,
                .id    = 41,
        },
+       {
+               .name  = "mem:0/1",
+               .check = test__checkevent_breakpoint_len,
+               .id    = 42,
+       },
+       {
+               .name  = "mem:0/2:w",
+               .check = test__checkevent_breakpoint_len_w,
+               .id    = 43,
+       },
+       {
+               .name  = "mem:0/4:rw:u",
+               .check = test__checkevent_breakpoint_len_rw_modifier,
+               .id    = 44
+       },
 #if defined(__s390x__)
        {
                .name  = "kvm-s390:kvm_s390_create_vm",
@@ -1471,7 +1529,7 @@ static int test_event(struct evlist_test *e)
        } else {
                ret = e->check(evlist);
        }
-       
+
        perf_evlist__delete(evlist);
 
        return ret;
index 4908c648a59783fa0dee6f78055a1ec454f50fed..30c02181e78b228449fa5263f7b0b4c9e33a54d8 100644 (file)
@@ -110,7 +110,7 @@ static bool samples_same(const struct perf_sample *s1,
 
        if (type & PERF_SAMPLE_STACK_USER) {
                COMP(user_stack.size);
-               if (memcmp(s1->user_stack.data, s1->user_stack.data,
+               if (memcmp(s1->user_stack.data, s2->user_stack.data,
                           s1->user_stack.size)) {
                        pr_debug("Samples differ at 'user_stack'\n");
                        return false;
index 1e0a2fd80115aba0654008d87a26fb3bf53f6177..9d32e3c0cfeedfe4cc60f62babad3fd3e451bf3c 100644 (file)
@@ -517,7 +517,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
        }
 
        annotate_browser__set_top(browser, dl, idx);
-       
+
        return true;
 }
 
@@ -867,7 +867,6 @@ static void annotate_browser__mark_jump_targets(struct annotate_browser *browser
 
                ++browser->nr_jumps;
        }
-               
 }
 
 static inline int width_jumps(int n)
index 482adae3cc44a50889bb2278b323a3b6871197c6..25d608394d746fcb7435517915bed787f20aa674 100644 (file)
@@ -285,7 +285,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,                             \
 }
 
 #define __HPP_SORT_FN(_type, _field)                                           \
-static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b)   \
+static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused,      \
+                                struct hist_entry *a, struct hist_entry *b)    \
 {                                                                              \
        return __hpp__sort(a, b, he_get_##_field);                              \
 }
@@ -312,7 +313,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,                             \
 }
 
 #define __HPP_SORT_ACC_FN(_type, _field)                                       \
-static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b)   \
+static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused,      \
+                                struct hist_entry *a, struct hist_entry *b)    \
 {                                                                              \
        return __hpp__sort_acc(a, b, he_get_acc_##_field);                      \
 }
@@ -331,7 +333,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *fmt,                             \
 }
 
 #define __HPP_SORT_RAW_FN(_type, _field)                                       \
-static int64_t hpp__sort_##_type(struct hist_entry *a, struct hist_entry *b)   \
+static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused,      \
+                                struct hist_entry *a, struct hist_entry *b)    \
 {                                                                              \
        return __hpp__sort(a, b, he_get_raw_##_field);                          \
 }
@@ -361,7 +364,8 @@ HPP_PERCENT_ACC_FNS(overhead_acc, period)
 HPP_RAW_FNS(samples, nr_events)
 HPP_RAW_FNS(period, period)
 
-static int64_t hpp__nop_cmp(struct hist_entry *a __maybe_unused,
+static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+                           struct hist_entry *a __maybe_unused,
                            struct hist_entry *b __maybe_unused)
 {
        return 0;
index f34f89eb607cba5653e86801eff01c453e458903..717d39d3052b8080ff90af9e6cbb715dbd15f203 100644 (file)
@@ -4,12 +4,12 @@
 #include <linux/types.h>
 
 void ui_progress__finish(void);
+
 struct ui_progress {
        const char *title;
        u64 curr, next, step, total;
 };
+
 void ui_progress__init(struct ui_progress *p, u64 total, const char *title);
 void ui_progress__update(struct ui_progress *p, u64 adv);
 
index 1c8b9afd5d6e723127ade5700e52a07eeee2cf8d..88f5143a59811521dad080453fc69aab9a55e01f 100644 (file)
@@ -9,6 +9,7 @@
 #include "../libslang.h"
 
 char ui_helpline__last_msg[1024];
+bool tui_helpline__set;
 
 static void tui_helpline__pop(void)
 {
@@ -35,6 +36,8 @@ static int tui_helpline__show(const char *format, va_list ap)
                        sizeof(ui_helpline__last_msg) - backlog, format, ap);
        backlog += ret;
 
+       tui_helpline__set = true;
+
        if (ui_helpline__last_msg[backlog - 1] == '\n') {
                ui_helpline__puts(ui_helpline__last_msg);
                SLsmg_refresh();
index 3c38f25b1695cdd289808d0d9f5ea858f06fc5db..b77e1d7713637c711e144886c9914fe02cb110ca 100644 (file)
@@ -17,6 +17,7 @@
 static volatile int ui__need_resize;
 
 extern struct perf_error_ops perf_tui_eops;
+extern bool tui_helpline__set;
 
 extern void hist_browser__init_hpp(void);
 
@@ -159,7 +160,7 @@ out:
 
 void ui__exit(bool wait_for_ok)
 {
-       if (wait_for_ok)
+       if (wait_for_ok && tui_helpline__set)
                ui__question_window("Fatal Error",
                                    ui_helpline__last_msg,
                                    "Press any key...", 0);
index 79999ceaf2be08e5f4880e853d467bf59874340c..61bf9128e1f28ce40d3d694ef5d9de8bb9cda1f8 100644 (file)
@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops)
                goto out_free_ops;
 
        ops->locked.ins = ins__find(name);
+       free(name);
+
        if (ops->locked.ins == NULL)
                goto out_free_ops;
 
        if (!ops->locked.ins->ops)
                return 0;
 
-       if (ops->locked.ins->ops->parse)
-               ops->locked.ins->ops->parse(ops->locked.ops);
+       if (ops->locked.ins->ops->parse &&
+           ops->locked.ins->ops->parse(ops->locked.ops) < 0)
+               goto out_free_ops;
 
        return 0;
 
@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
 
 static void lock__delete(struct ins_operands *ops)
 {
+       struct ins *ins = ops->locked.ins;
+
+       if (ins && ins->ops->free)
+               ins->ops->free(ops->locked.ops);
+       else
+               ins__delete(ops->locked.ops);
+
        zfree(&ops->locked.ops);
        zfree(&ops->target.raw);
        zfree(&ops->target.name);
@@ -229,7 +239,7 @@ static int mov__parse(struct ins_operands *ops)
        *s = '\0';
        ops->source.raw = strdup(ops->raw);
        *s = ',';
-       
+
        if (ops->source.raw == NULL)
                return -1;
 
@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl)
        if (!dl->ins->ops)
                return;
 
-       if (dl->ins->ops->parse)
-               dl->ins->ops->parse(&dl->ops);
+       if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
+               dl->ins = NULL;
 }
 
 static int disasm_line__parse(char *line, char **namep, char **rawp)
index f4654183d391a4051a970719229d47b2a9027108..55355b3d4f854477e70d84e8bed87a2e6c13962a 100644 (file)
@@ -5,132 +5,6 @@
 
 int perf_use_color_default = -1;
 
-static int parse_color(const char *name, int len)
-{
-       static const char * const color_names[] = {
-               "normal", "black", "red", "green", "yellow",
-               "blue", "magenta", "cyan", "white"
-       };
-       char *end;
-       int i;
-
-       for (i = 0; i < (int)ARRAY_SIZE(color_names); i++) {
-               const char *str = color_names[i];
-               if (!strncasecmp(name, str, len) && !str[len])
-                       return i - 1;
-       }
-       i = strtol(name, &end, 10);
-       if (end - name == len && i >= -1 && i <= 255)
-               return i;
-       return -2;
-}
-
-static int parse_attr(const char *name, int len)
-{
-       static const int attr_values[] = { 1, 2, 4, 5, 7 };
-       static const char * const attr_names[] = {
-               "bold", "dim", "ul", "blink", "reverse"
-       };
-       unsigned int i;
-
-       for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
-               const char *str = attr_names[i];
-               if (!strncasecmp(name, str, len) && !str[len])
-                       return attr_values[i];
-       }
-       return -1;
-}
-
-void color_parse(const char *value, const char *var, char *dst)
-{
-       color_parse_mem(value, strlen(value), var, dst);
-}
-
-void color_parse_mem(const char *value, int value_len, const char *var,
-               char *dst)
-{
-       const char *ptr = value;
-       int len = value_len;
-       int attr = -1;
-       int fg = -2;
-       int bg = -2;
-
-       if (!strncasecmp(value, "reset", len)) {
-               strcpy(dst, PERF_COLOR_RESET);
-               return;
-       }
-
-       /* [fg [bg]] [attr] */
-       while (len > 0) {
-               const char *word = ptr;
-               int val, wordlen = 0;
-
-               while (len > 0 && !isspace(word[wordlen])) {
-                       wordlen++;
-                       len--;
-               }
-
-               ptr = word + wordlen;
-               while (len > 0 && isspace(*ptr)) {
-                       ptr++;
-                       len--;
-               }
-
-               val = parse_color(word, wordlen);
-               if (val >= -1) {
-                       if (fg == -2) {
-                               fg = val;
-                               continue;
-                       }
-                       if (bg == -2) {
-                               bg = val;
-                               continue;
-                       }
-                       goto bad;
-               }
-               val = parse_attr(word, wordlen);
-               if (val < 0 || attr != -1)
-                       goto bad;
-               attr = val;
-       }
-
-       if (attr >= 0 || fg >= 0 || bg >= 0) {
-               int sep = 0;
-
-               *dst++ = '\033';
-               *dst++ = '[';
-               if (attr >= 0) {
-                       *dst++ = '0' + attr;
-                       sep++;
-               }
-               if (fg >= 0) {
-                       if (sep++)
-                               *dst++ = ';';
-                       if (fg < 8) {
-                               *dst++ = '3';
-                               *dst++ = '0' + fg;
-                       } else {
-                               dst += sprintf(dst, "38;5;%d", fg);
-                       }
-               }
-               if (bg >= 0) {
-                       if (sep++)
-                               *dst++ = ';';
-                       if (bg < 8) {
-                               *dst++ = '4';
-                               *dst++ = '0' + bg;
-                       } else {
-                               dst += sprintf(dst, "48;5;%d", bg);
-                       }
-               }
-               *dst++ = 'm';
-       }
-       *dst = 0;
-       return;
-bad:
-       die("bad color value '%.*s' for variable '%s'", value_len, value, var);
-}
-
 int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty)
 {
        if (value) {
index 0a594b8a0c26ab2e0753221ea0ae03e2b63e640e..38146f922c541fe107edc7acc1a3a87c325142ad 100644 (file)
@@ -30,8 +30,6 @@ extern int perf_use_color_default;
 int perf_color_default_config(const char *var, const char *value, void *cb);
 
 int perf_config_colorbool(const char *var, const char *value, int stdout_is_tty);
-void color_parse(const char *value, const char *var, char *dst);
-void color_parse_mem(const char *value, int len, const char *var, char *dst);
 int color_vsnprintf(char *bf, size_t size, const char *color,
                    const char *fmt, va_list args);
 int color_vfprintf(FILE *fp, const char *color, const char *fmt, va_list args);
index 45be944d450adfcfaf9c0dff3a0c68296769027a..c2f7d3b90966a66fb5d628e933d92fb83325a582 100644 (file)
@@ -532,12 +532,8 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
                        break;
 
                cache_offset = offset & DSO__DATA_CACHE_MASK;
-               ret = -EINVAL;
 
-               if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET))
-                       break;
-
-               ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
+               ret = pread(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE, cache_offset);
                if (ret <= 0)
                        break;
 
index 3782c82c6e44b579895dc0b48f034f856706c8d4..ced92841ff97d75f2768fae0edba8e5971c93968 100644 (file)
@@ -139,6 +139,7 @@ struct dso {
                u32              status_seen;
                size_t           file_size;
                struct list_head open_entry;
+               u64              frame_offset;
        } data;
 
        union { /* Tool specific area */
index cbab1fb77b1d6c4efb8565144256e6f1c5a6d540..28b8ce86bf120d9f635ab5bcc83032083ebfac0f 100644 (file)
@@ -1436,33 +1436,6 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
        return printed + fprintf(fp, "\n");
 }
 
-int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
-                            int err, char *buf, size_t size)
-{
-       char sbuf[128];
-
-       switch (err) {
-       case ENOENT:
-               scnprintf(buf, size, "%s",
-                         "Error:\tUnable to find debugfs\n"
-                         "Hint:\tWas your kernel was compiled with debugfs support?\n"
-                         "Hint:\tIs the debugfs filesystem mounted?\n"
-                         "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
-               break;
-       case EACCES:
-               scnprintf(buf, size,
-                         "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
-                         "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
-                         debugfs_mountpoint, debugfs_mountpoint);
-               break;
-       default:
-               scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
-               break;
-       }
-
-       return 0;
-}
-
 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
                               int err, char *buf, size_t size)
 {
index 0ba93f67ab946839fb3576d6285cf08cafc4810c..c94a9e03ecf15744800d4a6bc68cca28ca70259e 100644 (file)
@@ -183,7 +183,6 @@ static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
 
 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
 
-int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
 
index 1e90c8557ede152b52ff5cf3f0baa443dbdbad3e..ea51a90e20a0e9daa1a3f57c7dcf289b83299061 100644 (file)
@@ -709,6 +709,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
        if (opts->sample_weight)
                perf_evsel__set_sample_bit(evsel, WEIGHT);
 
+       attr->task  = track;
        attr->mmap  = track;
        attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
@@ -797,6 +798,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
 
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
+       if (ncpus == 0 || nthreads == 0)
+               return 0;
+
        if (evsel->system_wide)
                nthreads = 1;
 
index b20e40c74468d13f951c8e3415e7bd8db225ee53..1f407f7352a7fd2bab67ad13e63e5fcd053495ba 100644 (file)
@@ -2237,6 +2237,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
         * - unique number to identify actual perf.data files
         * - encode endianness of file
         */
+       ph->version = PERF_HEADER_VERSION_2;
 
        /* check magic number with one endianness */
        if (magic == __perf_magic2)
@@ -2247,7 +2248,6 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
                return -1;
 
        ph->needs_swap = true;
-       ph->version = PERF_HEADER_VERSION_2;
 
        return 0;
 }
index 182395546ddca63d919886f4b49896fbdd46e3e2..70b48a65064cbc85fd30d6777eb1a9ca25ca0098 100644 (file)
@@ -241,6 +241,20 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
        return he->stat.period == 0;
 }
 
+static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
+{
+       rb_erase(&he->rb_node, &hists->entries);
+
+       if (sort__need_collapse)
+               rb_erase(&he->rb_node_in, &hists->entries_collapsed);
+
+       --hists->nr_entries;
+       if (!he->filtered)
+               --hists->nr_non_filtered_entries;
+
+       hist_entry__delete(he);
+}
+
 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
 {
        struct rb_node *next = rb_first(&hists->entries);
@@ -258,16 +272,7 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
                     (zap_kernel && n->level != '.') ||
                     hists__decay_entry(hists, n)) &&
                    !n->used) {
-                       rb_erase(&n->rb_node, &hists->entries);
-
-                       if (sort__need_collapse)
-                               rb_erase(&n->rb_node_in, &hists->entries_collapsed);
-
-                       --hists->nr_entries;
-                       if (!n->filtered)
-                               --hists->nr_non_filtered_entries;
-
-                       hist_entry__free(n);
+                       hists__delete_entry(hists, n);
                }
        }
 }
@@ -281,16 +286,7 @@ void hists__delete_entries(struct hists *hists)
                n = rb_entry(next, struct hist_entry, rb_node);
                next = rb_next(&n->rb_node);
 
-               rb_erase(&n->rb_node, &hists->entries);
-
-               if (sort__need_collapse)
-                       rb_erase(&n->rb_node_in, &hists->entries_collapsed);
-
-               --hists->nr_entries;
-               if (!n->filtered)
-                       --hists->nr_non_filtered_entries;
-
-               hist_entry__free(n);
+               hists__delete_entry(hists, n);
        }
 }
 
@@ -433,6 +429,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
        if (!he)
                return NULL;
 
+       hists->nr_entries++;
+
        rb_link_node(&he->rb_node_in, parent, p);
        rb_insert_color(&he->rb_node_in, hists->entries_in);
 out:
@@ -915,7 +913,7 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
                if (perf_hpp__should_skip(fmt))
                        continue;
 
-               cmp = fmt->cmp(left, right);
+               cmp = fmt->cmp(fmt, left, right);
                if (cmp)
                        break;
        }
@@ -933,7 +931,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
                if (perf_hpp__should_skip(fmt))
                        continue;
 
-               cmp = fmt->collapse(left, right);
+               cmp = fmt->collapse(fmt, left, right);
                if (cmp)
                        break;
        }
@@ -941,7 +939,7 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
        return cmp;
 }
 
-void hist_entry__free(struct hist_entry *he)
+void hist_entry__delete(struct hist_entry *he)
 {
        zfree(&he->branch_info);
        zfree(&he->mem_info);
@@ -981,7 +979,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
                                                iter->callchain,
                                                he->callchain);
                        }
-                       hist_entry__free(he);
+                       hist_entry__delete(he);
                        return false;
                }
 
@@ -1063,7 +1061,7 @@ static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
                if (perf_hpp__should_skip(fmt))
                        continue;
 
-               cmp = fmt->sort(a, b);
+               cmp = fmt->sort(fmt, a, b);
                if (cmp)
                        break;
        }
index 46bd50344f853f8f55f43bc23cd95f8459e53cab..2b690d02890707f2b916220c1402df3b8f774e98 100644 (file)
@@ -119,7 +119,7 @@ int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
 int hist_entry__transaction_len(void);
 int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
                              struct hists *hists);
-void hist_entry__free(struct hist_entry *);
+void hist_entry__delete(struct hist_entry *he);
 
 void hists__output_resort(struct hists *hists, struct ui_progress *prog);
 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
@@ -195,9 +195,12 @@ struct perf_hpp_fmt {
                     struct hist_entry *he);
        int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
                     struct hist_entry *he);
-       int64_t (*cmp)(struct hist_entry *a, struct hist_entry *b);
-       int64_t (*collapse)(struct hist_entry *a, struct hist_entry *b);
-       int64_t (*sort)(struct hist_entry *a, struct hist_entry *b);
+       int64_t (*cmp)(struct perf_hpp_fmt *fmt,
+                      struct hist_entry *a, struct hist_entry *b);
+       int64_t (*collapse)(struct perf_hpp_fmt *fmt,
+                           struct hist_entry *a, struct hist_entry *b);
+       int64_t (*sort)(struct perf_hpp_fmt *fmt,
+                       struct hist_entry *a, struct hist_entry *b);
 
        struct list_head list;
        struct list_head sort_list;
index 6951a9d42339ee089c67dd068622f41c64f4670b..0e42438b1e593c0e6369186d84cc1ccad76a0093 100644 (file)
@@ -116,6 +116,22 @@ struct thread;
 #define map__for_each_symbol(map, pos, n)      \
        dso__for_each_symbol(map->dso, pos, n, map->type)
 
+/* map__for_each_symbol_with_name - iterate over the symbols in the given map
+ *                                  that have the given name
+ *
+ * @map: the 'struct map *' in which symbols itereated
+ * @sym_name: the symbol name
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @filter: to use when loading the DSO
+ */
+#define __map__for_each_symbol_by_name(map, sym_name, pos, filter)     \
+       for (pos = map__find_symbol_by_name(map, sym_name, filter);     \
+            pos && strcmp(pos->name, sym_name) == 0;           \
+            pos = symbol__next_by_name(pos))
+
+#define map__for_each_symbol_by_name(map, sym_name, pos)               \
+       __map__for_each_symbol_by_name(map, sym_name, (pos), NULL)
+
 typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
 
 void map__init(struct map *map, enum map_type type,
index 77b43fe43d55732c6d9ffeef50dbb66e309fca63..7f8ec6ce2823c4652e04f11618af1b5cfcb4b952 100644 (file)
@@ -526,7 +526,7 @@ do {                                        \
 }
 
 int parse_events_add_breakpoint(struct list_head *list, int *idx,
-                               void *ptr, char *type)
+                               void *ptr, char *type, u64 len)
 {
        struct perf_event_attr attr;
 
@@ -536,14 +536,15 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
        if (parse_breakpoint_type(type, &attr))
                return -EINVAL;
 
-       /*
-        * We should find a nice way to override the access length
-        * Provide some defaults for now
-        */
-       if (attr.bp_type == HW_BREAKPOINT_X)
-               attr.bp_len = sizeof(long);
-       else
-               attr.bp_len = HW_BREAKPOINT_LEN_4;
+       /* Provide some defaults if len is not specified */
+       if (!len) {
+               if (attr.bp_type == HW_BREAKPOINT_X)
+                       len = sizeof(long);
+               else
+                       len = HW_BREAKPOINT_LEN_4;
+       }
+
+       attr.bp_len = len;
 
        attr.type = PERF_TYPE_BREAKPOINT;
        attr.sample_period = 1;
@@ -1121,7 +1122,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                return;
 
        for_each_subsystem(sys_dir, sys_dirent, sys_next) {
-               if (subsys_glob != NULL && 
+               if (subsys_glob != NULL &&
                    !strglobmatch(sys_dirent.d_name, subsys_glob))
                        continue;
 
@@ -1132,7 +1133,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                        continue;
 
                for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
-                       if (event_glob != NULL && 
+                       if (event_glob != NULL &&
                            !strglobmatch(evt_dirent.d_name, event_glob))
                                continue;
 
@@ -1305,7 +1306,7 @@ static void print_symbol_events(const char *event_glob, unsigned type,
 
        for (i = 0; i < max; i++, syms++) {
 
-               if (event_glob != NULL && 
+               if (event_glob != NULL &&
                    !(strglobmatch(syms->symbol, event_glob) ||
                      (syms->alias && strglobmatch(syms->alias, event_glob))))
                        continue;
@@ -1366,7 +1367,7 @@ void print_events(const char *event_glob, bool name_only)
                printf("\n");
 
                printf("  %-50s [%s]\n",
-                      "mem:<addr>[:access]",
+                      "mem:<addr>[/len][:access]",
                        event_type_descriptors[PERF_TYPE_BREAKPOINT]);
                printf("\n");
        }
index db2cf78ff0f3c70c0fb3d6563942b02c6dba2efa..ff6e1fa4111ec7dca08c4e90da7ef14984e602cb 100644 (file)
@@ -71,6 +71,7 @@ struct parse_events_term {
        int type_val;
        int type_term;
        struct list_head list;
+       bool used;
 };
 
 struct parse_events_evlist {
@@ -104,7 +105,7 @@ int parse_events_add_numeric(struct list_head *list, int *idx,
 int parse_events_add_cache(struct list_head *list, int *idx,
                           char *type, char *op_result1, char *op_result2);
 int parse_events_add_breakpoint(struct list_head *list, int *idx,
-                               void *ptr, char *type);
+                               void *ptr, char *type, u64 len);
 int parse_events_add_pmu(struct list_head *list, int *idx,
                         char *pmu , struct list_head *head_config);
 enum perf_pmu_event_symbol_type
index 906630bbf8eb95de9d34a0a8ef36e74f6a8efda5..94eacb6c1ef71e46d0de1bdf2df002c29bbbe118 100644 (file)
@@ -159,6 +159,7 @@ branch_type         { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
 <mem>{
 {modifier_bp}          { return str(yyscanner, PE_MODIFIER_BP); }
 :                      { return ':'; }
+"/"                    { return '/'; }
 {num_dec}              { return value(yyscanner, 10); }
 {num_hex}              { return value(yyscanner, 16); }
        /*
index 93c4c9fbc922d360843db69a91afae3438f90eec..72def077dbbfda149dfe893135dd3940ca2ed648 100644 (file)
@@ -326,6 +326,28 @@ PE_NAME_CACHE_TYPE
 }
 
 event_legacy_mem:
+PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
+{
+       struct parse_events_evlist *data = _data;
+       struct list_head *list;
+
+       ALLOC_LIST(list);
+       ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
+                                            (void *) $2, $6, $4));
+       $$ = list;
+}
+|
+PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
+{
+       struct parse_events_evlist *data = _data;
+       struct list_head *list;
+
+       ALLOC_LIST(list);
+       ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
+                                            (void *) $2, NULL, $4));
+       $$ = list;
+}
+|
 PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
 {
        struct parse_events_evlist *data = _data;
@@ -333,7 +355,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
 
        ALLOC_LIST(list);
        ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
-                                            (void *) $2, $4));
+                                            (void *) $2, $4, 0));
        $$ = list;
 }
 |
@@ -344,7 +366,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
 
        ALLOC_LIST(list);
        ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
-                                            (void *) $2, NULL));
+                                            (void *) $2, NULL, 0));
        $$ = list;
 }
 
index f62dee7bd924b30696da6205fece4980a7b13e24..4a015f77e2b5bb6ff7f6ab3e8c22ec42bf1b8b67 100644 (file)
@@ -46,7 +46,7 @@ static int get_value(struct parse_opt_ctx_t *p,
                return opterror(opt, "is not usable", flags);
 
        if (opt->flags & PARSE_OPT_EXCLUSIVE) {
-               if (p->excl_opt) {
+               if (p->excl_opt && p->excl_opt != opt) {
                        char msg[128];
 
                        if (((flags & OPT_SHORT) && p->excl_opt->short_name) ||
index 5c9c4947cfb43f08522baa88aef0d16b5bdc4ee8..48411674da0f9cef6c87ba74a08bec513b112d6c 100644 (file)
@@ -550,6 +550,35 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
        }
 }
 
+/*
+ * Term is a string term, and might be a param-term. Try to look up it's value
+ * in the remaining terms.
+ * - We have a term like "base-or-format-term=param-term",
+ * - We need to find the value supplied for "param-term" (with param-term named
+ *   in a config string) later on in the term list.
+ */
+static int pmu_resolve_param_term(struct parse_events_term *term,
+                                 struct list_head *head_terms,
+                                 __u64 *value)
+{
+       struct parse_events_term *t;
+
+       list_for_each_entry(t, head_terms, list) {
+               if (t->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+                       if (!strcmp(t->config, term->config)) {
+                               t->used = true;
+                               *value = t->val.num;
+                               return 0;
+                       }
+               }
+       }
+
+       if (verbose)
+               printf("Required parameter '%s' not specified\n", term->config);
+
+       return -1;
+}
+
 /*
  * Setup one of config[12] attr members based on the
  * user input data - term parameter.
@@ -557,25 +586,33 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
 static int pmu_config_term(struct list_head *formats,
                           struct perf_event_attr *attr,
                           struct parse_events_term *term,
+                          struct list_head *head_terms,
                           bool zero)
 {
        struct perf_pmu_format *format;
        __u64 *vp;
+       __u64 val;
+
+       /*
+        * If this is a parameter we've already used for parameterized-eval,
+        * skip it in normal eval.
+        */
+       if (term->used)
+               return 0;
 
        /*
-        * Support only for hardcoded and numnerial terms.
         * Hardcoded terms should be already in, so nothing
         * to be done for them.
         */
        if (parse_events__is_hardcoded_term(term))
                return 0;
 
-       if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM)
-               return -EINVAL;
-
        format = pmu_find_format(formats, term->config);
-       if (!format)
+       if (!format) {
+               if (verbose)
+                       printf("Invalid event/parameter '%s'\n", term->config);
                return -EINVAL;
+       }
 
        switch (format->value) {
        case PERF_PMU_FORMAT_VALUE_CONFIG:
@@ -592,11 +629,25 @@ static int pmu_config_term(struct list_head *formats,
        }
 
        /*
-        * XXX If we ever decide to go with string values for
-        * non-hardcoded terms, here's the place to translate
-        * them into value.
+        * Either directly use a numeric term, or try to translate string terms
+        * using event parameters.
         */
-       pmu_format_value(format->bits, term->val.num, vp, zero);
+       if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+               val = term->val.num;
+       else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
+               if (strcmp(term->val.str, "?")) {
+                       if (verbose)
+                               pr_info("Invalid sysfs entry %s=%s\n",
+                                               term->config, term->val.str);
+                       return -EINVAL;
+               }
+
+               if (pmu_resolve_param_term(term, head_terms, &val))
+                       return -EINVAL;
+       } else
+               return -EINVAL;
+
+       pmu_format_value(format->bits, val, vp, zero);
        return 0;
 }
 
@@ -607,9 +658,10 @@ int perf_pmu__config_terms(struct list_head *formats,
 {
        struct parse_events_term *term;
 
-       list_for_each_entry(term, head_terms, list)
-               if (pmu_config_term(formats, attr, term, zero))
+       list_for_each_entry(term, head_terms, list) {
+               if (pmu_config_term(formats, attr, term, head_terms, zero))
                        return -EINVAL;
+       }
 
        return 0;
 }
@@ -767,10 +819,36 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
                set_bit(b, bits);
 }
 
+static int sub_non_neg(int a, int b)
+{
+       if (b > a)
+               return 0;
+       return a - b;
+}
+
 static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
                          struct perf_pmu_alias *alias)
 {
-       snprintf(buf, len, "%s/%s/", pmu->name, alias->name);
+       struct parse_events_term *term;
+       int used = snprintf(buf, len, "%s/%s", pmu->name, alias->name);
+
+       list_for_each_entry(term, &alias->terms, list) {
+               if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+                       used += snprintf(buf + used, sub_non_neg(len, used),
+                                       ",%s=%s", term->config,
+                                       term->val.str);
+       }
+
+       if (sub_non_neg(len, used) > 0) {
+               buf[used] = '/';
+               used++;
+       }
+       if (sub_non_neg(len, used) > 0) {
+               buf[used] = '\0';
+               used++;
+       } else
+               buf[len - 1] = '\0';
+
        return buf;
 }
 
index 94a717bf007de77658032dcb03dcc6db818b451b..919937eb0be2b643e93bfe7fc983e7131074deeb 100644 (file)
@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        for (i = 0; i < ntevs; i++) {
-               if (tevs[i].point.address) {
+               if (tevs[i].point.address && !tevs[i].point.retprobe) {
                        tmp = strdup(reloc_sym->name);
                        if (!tmp)
                                return -ENOMEM;
@@ -2193,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        return ret;
 }
 
-static char *looking_function_name;
-static int num_matched_functions;
-
-static int probe_function_filter(struct map *map __maybe_unused,
-                                     struct symbol *sym)
+static int find_probe_functions(struct map *map, char *name)
 {
-       if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
-           strcmp(looking_function_name, sym->name) == 0) {
-               num_matched_functions++;
-               return 0;
+       int found = 0;
+       struct symbol *sym;
+
+       map__for_each_symbol_by_name(map, name, sym) {
+               if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
+                       found++;
        }
-       return 1;
+
+       return found;
 }
 
 #define strdup_or_goto(str, label)     \
@@ -2222,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
        struct kmap *kmap = NULL;
        struct ref_reloc_sym *reloc_sym = NULL;
        struct symbol *sym;
-       struct rb_node *nd;
        struct probe_trace_event *tev;
        struct perf_probe_point *pp = &pev->point;
        struct probe_trace_point *tp;
+       int num_matched_functions;
        int ret, i;
 
        /* Init maps of given executable or kernel */
@@ -2242,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
         * Load matched symbols: Since the different local symbols may have
         * same name but different addresses, this lists all the symbols.
         */
-       num_matched_functions = 0;
-       looking_function_name = pp->function;
-       ret = map__load(map, probe_function_filter);
-       if (ret || num_matched_functions == 0) {
+       num_matched_functions = find_probe_functions(map, pp->function);
+       if (num_matched_functions == 0) {
                pr_err("Failed to find symbol %s in %s\n", pp->function,
                        target ? : "kernel");
                ret = -ENOENT;
@@ -2257,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
                goto out;
        }
 
-       if (!pev->uprobes) {
+       if (!pev->uprobes && !pp->retprobe) {
                kmap = map__kmap(map);
                reloc_sym = kmap->ref_reloc_sym;
                if (!reloc_sym) {
@@ -2275,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
        }
 
        ret = 0;
-       map__for_each_symbol(map, sym, nd) {
+
+       map__for_each_symbol_by_name(map, pp->function, sym) {
                tev = (*tevs) + ret;
                tp = &tev->point;
                if (ret == num_matched_functions) {
index 3dda85ca50c1d25bc81ff2457f9d624d774f754b..d906d0ad5d40a34b49955ad9571130b1f93c7dc3 100644 (file)
@@ -768,7 +768,7 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
                        Py_DECREF(file);
                        goto free_list;
                }
-                       
+
                Py_DECREF(file);
        }
 
index d808a328f4dca03cd255edef292271ff9a4cf3f9..0c815a40a6e86bbedf16e3893e2c00c5827419e0 100644 (file)
@@ -89,7 +89,7 @@ static void handler_call_die(const char *handler_name)
 
 /*
  * Insert val into into the dictionary and decrement the reference counter.
- * This is necessary for dictionaries since PyDict_SetItemString() does not 
+ * This is necessary for dictionaries since PyDict_SetItemString() does not
  * steal a reference, as opposed to PyTuple_SetItem().
  */
 static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
index 5f0e05a76c05ab00a267b27701ca37728aee8c54..0baf75f12b7c4a663544cb6f65b6c001c3ae890e 100644 (file)
@@ -274,7 +274,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
        if (tool->id_index == NULL)
                tool->id_index = process_id_index_stub;
 }
+
 static void swap_sample_id_all(union perf_event *event, void *data)
 {
        void *end = (void *) event + event->header.size;
@@ -1251,9 +1251,9 @@ fetch_mmaped_event(struct perf_session *session,
 #define NUM_MMAPS 128
 #endif
 
-int __perf_session__process_events(struct perf_session *session,
-                                  u64 data_offset, u64 data_size,
-                                  u64 file_size, struct perf_tool *tool)
+static int __perf_session__process_events(struct perf_session *session,
+                                         u64 data_offset, u64 data_size,
+                                         u64 file_size, struct perf_tool *tool)
 {
        int fd = perf_data_file__fd(session->file);
        u64 head, page_offset, file_offset, file_pos, size;
index dc26ebf60fe421050b58eafa9c8fb29d49fa3f0a..6d663dc76404395ad6231fcf93bd881b9978fab8 100644 (file)
@@ -49,9 +49,6 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
                             union perf_event **event_ptr,
                             struct perf_sample *sample);
 
-int __perf_session__process_events(struct perf_session *session,
-                                  u64 data_offset, u64 data_size, u64 size,
-                                  struct perf_tool *tool);
 int perf_session__process_events(struct perf_session *session,
                                 struct perf_tool *tool);
 
index 9139dda9f9a37afd7ae16921928e224604950cf5..7a39c1ed8d37c16cfe24d9dc0a908166658903a4 100644 (file)
@@ -1304,6 +1304,37 @@ static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
 }
 
+static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
+                              struct hist_entry *a, struct hist_entry *b)
+{
+       struct hpp_sort_entry *hse;
+
+       hse = container_of(fmt, struct hpp_sort_entry, hpp);
+       return hse->se->se_cmp(a, b);
+}
+
+static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
+                                   struct hist_entry *a, struct hist_entry *b)
+{
+       struct hpp_sort_entry *hse;
+       int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
+
+       hse = container_of(fmt, struct hpp_sort_entry, hpp);
+       collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
+       return collapse_fn(a, b);
+}
+
+static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
+                               struct hist_entry *a, struct hist_entry *b)
+{
+       struct hpp_sort_entry *hse;
+       int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
+
+       hse = container_of(fmt, struct hpp_sort_entry, hpp);
+       sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
+       return sort_fn(a, b);
+}
+
 static struct hpp_sort_entry *
 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
 {
@@ -1322,9 +1353,9 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
        hse->hpp.entry = __sort__hpp_entry;
        hse->hpp.color = NULL;
 
-       hse->hpp.cmp = sd->entry->se_cmp;
-       hse->hpp.collapse = sd->entry->se_collapse ? : sd->entry->se_cmp;
-       hse->hpp.sort = sd->entry->se_sort ? : hse->hpp.collapse;
+       hse->hpp.cmp = __sort__hpp_cmp;
+       hse->hpp.collapse = __sort__hpp_collapse;
+       hse->hpp.sort = __sort__hpp_sort;
 
        INIT_LIST_HEAD(&hse->hpp.list);
        INIT_LIST_HEAD(&hse->hpp.sort_list);
index 06fcd1bf98b6034e39ef9af8dd0b4111d9208535..b24f9d8727a894ccae13353abad0b951ec1b0916 100644 (file)
@@ -574,13 +574,16 @@ static int decompress_kmodule(struct dso *dso, const char *name,
        const char *ext = strrchr(name, '.');
        char tmpbuf[] = "/tmp/perf-kmod-XXXXXX";
 
-       if ((type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
-            type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP) ||
-           type != dso->symtab_type)
+       if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP &&
+           type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP &&
+           type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
                return -1;
 
-       if (!ext || !is_supported_compression(ext + 1))
-               return -1;
+       if (!ext || !is_supported_compression(ext + 1)) {
+               ext = strrchr(dso->name, '.');
+               if (!ext || !is_supported_compression(ext + 1))
+                       return -1;
+       }
 
        fd = mkstemp(tmpbuf);
        if (fd < 0)
index c24c5b83156cd92ec5ba5c457153e43706b81424..a69066865a555f7b02c50606f3d53f4759a1eecc 100644 (file)
@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
                                            const char *name)
 {
        struct rb_node *n;
+       struct symbol_name_rb_node *s;
 
        if (symbols == NULL)
                return NULL;
@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
        n = symbols->rb_node;
 
        while (n) {
-               struct symbol_name_rb_node *s;
                int cmp;
 
                s = rb_entry(n, struct symbol_name_rb_node, rb_node);
@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
                else if (cmp > 0)
                        n = n->rb_right;
                else
-                       return &s->sym;
+                       break;
        }
 
-       return NULL;
+       if (n == NULL)
+               return NULL;
+
+       /* return first symbol that has same name (if any) */
+       for (n = rb_prev(n); n; n = rb_prev(n)) {
+               struct symbol_name_rb_node *tmp;
+
+               tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
+               if (strcmp(tmp->sym.name, s->sym.name))
+                       break;
+
+               s = tmp;
+       }
+
+       return &s->sym;
 }
 
 struct symbol *dso__find_symbol(struct dso *dso,
@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym)
        return symbols__next(sym);
 }
 
+struct symbol *symbol__next_by_name(struct symbol *sym)
+{
+       struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
+       struct rb_node *n = rb_next(&s->rb_node);
+
+       return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
+}
+
+ /*
+  * Teturns first symbol that matched with @name.
+  */
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
                                        const char *name)
 {
@@ -660,7 +685,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
        struct machine *machine = kmaps->machine;
        struct map *curr_map = map;
        struct symbol *pos;
-       int count = 0, moved = 0;       
+       int count = 0, moved = 0;
        struct rb_root *root = &dso->symbols[map->type];
        struct rb_node *next = rb_first(root);
        int kernel_range = 0;
index 9d602e9c6f590f73eb9413f53bbbceeada64b20d..1650dcb3a67bc3fddff40c93c6fdf6a1122d185a 100644 (file)
@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
                                u64 addr);
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
                                        const char *name);
+struct symbol *symbol__next_by_name(struct symbol *sym);
 
 struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
 struct symbol *dso__next_symbol(struct symbol *sym);
index 6edf535f65c23428b4982fb651ba3df997d55dfb..e3c40a520a253c73cbad3f4e04558f006508cdb7 100644 (file)
@@ -266,14 +266,17 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
                                     u64 *fde_count)
 {
        int ret = -EINVAL, fd;
-       u64 offset;
+       u64 offset = dso->data.frame_offset;
 
-       fd = dso__data_fd(dso, machine);
-       if (fd < 0)
-               return -EINVAL;
+       if (offset == 0) {
+               fd = dso__data_fd(dso, machine);
+               if (fd < 0)
+                       return -EINVAL;
 
-       /* Check the .eh_frame section for unwinding info */
-       offset = elf_section_offset(fd, ".eh_frame_hdr");
+               /* Check the .eh_frame section for unwinding info */
+               offset = elf_section_offset(fd, ".eh_frame_hdr");
+               dso->data.frame_offset = offset;
+       }
 
        if (offset)
                ret = unwind_spec_ehframe(dso, machine, offset,
@@ -287,14 +290,20 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
 static int read_unwind_spec_debug_frame(struct dso *dso,
                                        struct machine *machine, u64 *offset)
 {
-       int fd = dso__data_fd(dso, machine);
+       int fd;
+       u64 ofs = dso->data.frame_offset;
 
-       if (fd < 0)
-               return -EINVAL;
+       if (ofs == 0) {
+               fd = dso__data_fd(dso, machine);
+               if (fd < 0)
+                       return -EINVAL;
 
-       /* Check the .debug_frame section for unwinding info */
-       *offset = elf_section_offset(fd, ".debug_frame");
+               /* Check the .debug_frame section for unwinding info */
+               ofs = elf_section_offset(fd, ".debug_frame");
+               dso->data.frame_offset = ofs;
+       }
 
+       *offset = ofs;
        if (*offset)
                return 0;
 
index abe14b7f36e987189419bb45d10594189e336e26..bb99cde3f5f97a216cd85b4654ea0da3cc2b048b 100755 (executable)
@@ -24,7 +24,7 @@
 
 ncpus=`grep '^processor' /proc/cpuinfo | wc -l`
 idlecpus=`mpstat | tail -1 | \
-       awk -v ncpus=$ncpus '{ print ncpus * ($7 + $12) / 100 }'`
+       awk -v ncpus=$ncpus '{ print ncpus * ($7 + $NF) / 100 }'`
 awk -v ncpus=$ncpus -v idlecpus=$idlecpus < /dev/null '
 BEGIN {
        cpus2use = idlecpus;
index d6cc07fc137fc35c78329586081a1135e5b5b850..559e01ac86be9731cf9040d682132869f42d3ec9 100755 (executable)
@@ -30,6 +30,7 @@ else
        echo Unreadable results directory: $i
        exit 1
 fi
+. tools/testing/selftests/rcutorture/bin/functions.sh
 
 configfile=`echo $i | sed -e 's/^.*\///'`
 ngps=`grep ver: $i/console.log 2> /dev/null | tail -1 | sed -e 's/^.* ver: //' -e 's/ .*$//'`
@@ -48,4 +49,21 @@ else
                title="$title ($ngpsps per second)"
        fi
        echo $title
+       nclosecalls=`grep --binary-files=text 'torture: Reader Batch' $i/console.log | tail -1 | awk '{for (i=NF-8;i<=NF;i++) sum+=$i; } END {print sum}'`
+       if test -z "$nclosecalls"
+       then
+               exit 0
+       fi
+       if test "$nclosecalls" -eq 0
+       then
+               exit 0
+       fi
+       # Compute number of close calls per tenth of an hour
+       nclosecalls10=`awk -v nclosecalls=$nclosecalls -v dur=$dur 'BEGIN { print int(nclosecalls * 36000 / dur) }' < /dev/null`
+       if test $nclosecalls10 -gt 5 -a $nclosecalls -gt 1
+       then
+               print_bug $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
+       else
+               print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
+       fi
 fi
index 8ca9f21f2efcba8ab113585192d9cf285ebca77a..5236e073919d2e508e6073d201fa157670a8688e 100755 (executable)
@@ -8,9 +8,9 @@
 #
 # Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
 #
-# qemu-args defaults to "-nographic", along with arguments specifying the
-#                      number of CPUs and other options generated from
-#                      the underlying CPU architecture.
+# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
+#                      arguments specifying the number of CPUs and other
+#                      options generated from the underlying CPU architecture.
 # boot_args defaults to value returned by the per_version_boot_params
 #                      shell function.
 #
@@ -138,7 +138,7 @@ then
 fi
 
 # Generate -smp qemu argument.
-qemu_args="-nographic $qemu_args"
+qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
 cpu_count=`configNR_CPUS.sh $config_template`
 cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
 vcpus=`identify_qemu_vcpus`
@@ -168,6 +168,7 @@ then
        touch $resdir/buildonly
        exit 0
 fi
+echo "NOTE: $QEMU either did not run or was interactive" > $builddir/console.log
 echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 ( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
 qemu_pid=$!
index 499d1e598e425c390e5ebac5293a72cebddb01b2..a6b57622c2e589c67f36455f49c853a5203640ab 100755 (executable)
 #
 # Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
 
-T=$1
+F=$1
 title=$2
+T=/tmp/parse-build.sh.$$
+trap 'rm -rf $T' 0
+mkdir $T
 
 . functions.sh
 
-if grep -q CC < $T
+if grep -q CC < $F
 then
        :
 else
@@ -39,18 +42,21 @@ else
        exit 1
 fi
 
-if grep -q "error:" < $T
+if grep -q "error:" < $F
 then
        print_bug $title build errors:
-       grep "error:" < $T
+       grep "error:" < $F
        exit 2
 fi
-exit 0
 
-if egrep -q "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+grep warning: < $F > $T/warnings
+grep "include/linux/*rcu*\.h:" $T/warnings > $T/hwarnings
+grep "kernel/rcu/[^/]*:" $T/warnings > $T/cwarnings
+cat $T/hwarnings $T/cwarnings > $T/rcuwarnings
+if test -s $T/rcuwarnings
 then
        print_warning $title build errors:
-       egrep "rcu[^/]*\.c.*warning:|rcu.*\.h.*warning:" < $T
+       cat $T/rcuwarnings
        exit 2
 fi
 exit 0
index f962ba4cf68b6a06121b1d6f2c11ce8ed8df63ee..d8f35cf116be2ca6fb5095caec9ef25daa90e2ac 100755 (executable)
@@ -36,7 +36,7 @@ if grep -Pq '\x00' < $file
 then
        print_warning Console output contains nul bytes, old qemu still running?
 fi
-egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $T
 if test -s $T
 then
        print_warning Assertion failure in $file $title